aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authortrims <none@none>2008-09-04 18:40:43 -0700
committertrims <none@none>2008-09-04 18:40:43 -0700
commit3463ceec5c4369a29f9438a75d82d4e845dda4d9 (patch)
tree48b8bae3162b09a2db7d321fa47f3617d04a8198
parentc19582cc2b45d48aa35112a2f9fe9d83ef31dd2f (diff)
parent0b839c7101e948e14191fc940eb2cecd7a7833c0 (diff)
-rw-r--r--agent/make/build-pkglist2
-rw-r--r--make/linux/makefiles/sa.make18
-rw-r--r--make/sa.files59
-rw-r--r--make/solaris/makefiles/reorder_COMPILER1_amd645450
-rw-r--r--make/solaris/makefiles/reorder_COMPILER1_i486226
-rw-r--r--make/solaris/makefiles/sa.make17
-rw-r--r--make/windows/makefiles/sa.make11
-rw-r--r--src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp3
-rw-r--r--src/cpu/sparc/vm/relocInfo_sparc.cpp6
-rw-r--r--src/cpu/sparc/vm/sharedRuntime_sparc.cpp14
-rw-r--r--src/cpu/x86/vm/assembler_x86.cpp (renamed from src/cpu/x86/vm/assembler_x86_64.cpp)8396
-rw-r--r--src/cpu/x86/vm/assembler_x86.hpp (renamed from src/cpu/x86/vm/assembler_x86_32.hpp)1480
-rw-r--r--src/cpu/x86/vm/assembler_x86.inline.hpp (renamed from src/cpu/x86/vm/assembler_x86_64.inline.hpp)39
-rw-r--r--src/cpu/x86/vm/assembler_x86_32.cpp5001
-rw-r--r--src/cpu/x86/vm/assembler_x86_32.inline.hpp64
-rw-r--r--src/cpu/x86/vm/assembler_x86_64.hpp1477
-rw-r--r--src/cpu/x86/vm/c1_CodeStubs_x86.cpp19
-rw-r--r--src/cpu/x86/vm/c1_Defs_x86.hpp31
-rw-r--r--src/cpu/x86/vm/c1_FrameMap_x86.cpp125
-rw-r--r--src/cpu/x86/vm/c1_FrameMap_x86.hpp41
-rw-r--r--src/cpu/x86/vm/c1_LIRAssembler_x86.cpp752
-rw-r--r--src/cpu/x86/vm/c1_LIRAssembler_x86.hpp11
-rw-r--r--src/cpu/x86/vm/c1_LIRGenerator_x86.cpp60
-rw-r--r--src/cpu/x86/vm/c1_LinearScan_x86.hpp13
-rw-r--r--src/cpu/x86/vm/c1_MacroAssembler_x86.cpp111
-rw-r--r--src/cpu/x86/vm/c1_MacroAssembler_x86.hpp11
-rw-r--r--src/cpu/x86/vm/c1_Runtime1_x86.cpp674
-rw-r--r--src/cpu/x86/vm/cppInterpreter_x86.cpp785
-rw-r--r--src/cpu/x86/vm/dump_x86_32.cpp20
-rw-r--r--src/cpu/x86/vm/dump_x86_64.cpp30
-rw-r--r--src/cpu/x86/vm/frame_x86.cpp3
-rw-r--r--src/cpu/x86/vm/frame_x86.inline.hpp4
-rw-r--r--src/cpu/x86/vm/icache_x86.cpp4
-rw-r--r--src/cpu/x86/vm/interp_masm_x86_32.cpp360
-rw-r--r--src/cpu/x86/vm/interp_masm_x86_32.hpp31
-rw-r--r--src/cpu/x86/vm/interp_masm_x86_64.cpp368
-rw-r--r--src/cpu/x86/vm/interp_masm_x86_64.hpp73
-rw-r--r--src/cpu/x86/vm/interpreterRT_x86_32.cpp8
-rw-r--r--src/cpu/x86/vm/interpreterRT_x86_64.cpp88
-rw-r--r--src/cpu/x86/vm/interpreter_x86_32.cpp20
-rw-r--r--src/cpu/x86/vm/interpreter_x86_64.cpp236
-rw-r--r--src/cpu/x86/vm/jniFastGetField_x86_32.cpp86
-rw-r--r--src/cpu/x86/vm/jniFastGetField_x86_64.cpp32
-rw-r--r--src/cpu/x86/vm/nativeInst_x86.cpp176
-rw-r--r--src/cpu/x86/vm/nativeInst_x86.hpp128
-rw-r--r--src/cpu/x86/vm/relocInfo_x86.cpp72
-rw-r--r--src/cpu/x86/vm/runtime_x86_32.cpp32
-rw-r--r--src/cpu/x86/vm/sharedRuntime_x86_32.cpp543
-rw-r--r--src/cpu/x86/vm/sharedRuntime_x86_64.cpp488
-rw-r--r--src/cpu/x86/vm/stubGenerator_x86_32.cpp597
-rw-r--r--src/cpu/x86/vm/stubGenerator_x86_64.cpp687
-rw-r--r--src/cpu/x86/vm/stubRoutines_x86_32.cpp6
-rw-r--r--src/cpu/x86/vm/stubRoutines_x86_32.hpp4
-rw-r--r--src/cpu/x86/vm/stubRoutines_x86_64.cpp22
-rw-r--r--src/cpu/x86/vm/stubRoutines_x86_64.hpp4
-rw-r--r--src/cpu/x86/vm/templateInterpreter_x86_32.cpp332
-rw-r--r--src/cpu/x86/vm/templateInterpreter_x86_64.cpp459
-rw-r--r--src/cpu/x86/vm/templateTable_x86_32.cpp727
-rw-r--r--src/cpu/x86/vm/templateTable_x86_32.hpp2
-rw-r--r--src/cpu/x86/vm/templateTable_x86_64.cpp403
-rw-r--r--src/cpu/x86/vm/vm_version_x86_32.cpp66
-rw-r--r--src/cpu/x86/vm/vm_version_x86_64.cpp30
-rw-r--r--src/cpu/x86/vm/vtableStubs_x86_32.cpp38
-rw-r--r--src/cpu/x86/vm/vtableStubs_x86_64.cpp34
-rw-r--r--src/cpu/x86/vm/x86_32.ad309
-rw-r--r--src/cpu/x86/vm/x86_64.ad207
-rw-r--r--src/os_cpu/linux_x86/vm/assembler_linux_x86.cpp (renamed from src/os_cpu/linux_x86/vm/assembler_linux_x86_32.cpp)45
-rw-r--r--src/os_cpu/linux_x86/vm/assembler_linux_x86_64.cpp67
-rw-r--r--src/os_cpu/solaris_x86/vm/assembler_solaris_x86.cpp (renamed from src/os_cpu/solaris_x86/vm/assembler_solaris_x86_32.cpp)118
-rw-r--r--src/os_cpu/solaris_x86/vm/assembler_solaris_x86_64.cpp87
-rw-r--r--src/os_cpu/solaris_x86/vm/solaris_x86_32.ad12
-rw-r--r--src/os_cpu/windows_x86/vm/assembler_windows_x86.cpp (renamed from src/os_cpu/windows_x86/vm/assembler_windows_x86_32.cpp)40
-rw-r--r--src/os_cpu/windows_x86/vm/os_windows_x86.cpp2
-rw-r--r--src/share/vm/adlc/output_h.cpp13
-rw-r--r--src/share/vm/c1/c1_FrameMap.cpp2
-rw-r--r--src/share/vm/c1/c1_LIR.cpp11
-rw-r--r--src/share/vm/c1/c1_LIR.hpp155
-rw-r--r--src/share/vm/c1/c1_LIRAssembler.cpp8
-rw-r--r--src/share/vm/c1/c1_LIRAssembler.hpp6
-rw-r--r--src/share/vm/c1/c1_LIRGenerator.cpp2
-rw-r--r--src/share/vm/c1/c1_LinearScan.cpp73
-rw-r--r--src/share/vm/c1/c1_LinearScan.hpp2
-rw-r--r--src/share/vm/c1/c1_Runtime1.cpp15
-rw-r--r--src/share/vm/ci/ciTypeFlow.hpp2
-rw-r--r--src/share/vm/code/relocInfo.hpp2
-rw-r--r--src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp2
-rw-r--r--src/share/vm/includeDB_compiler13
-rw-r--r--src/share/vm/includeDB_compiler24
-rw-r--r--src/share/vm/includeDB_core80
-rw-r--r--src/share/vm/includeDB_features2
-rw-r--r--src/share/vm/memory/blockOffsetTable.hpp6
-rw-r--r--src/share/vm/opto/addnode.cpp8
-rw-r--r--src/share/vm/opto/block.cpp6
-rw-r--r--src/share/vm/opto/callGenerator.cpp6
-rw-r--r--src/share/vm/opto/callnode.cpp4
-rw-r--r--src/share/vm/opto/chaitin.cpp6
-rw-r--r--src/share/vm/opto/chaitin.hpp5
-rw-r--r--src/share/vm/opto/coalesce.cpp4
-rw-r--r--src/share/vm/opto/compile.cpp1
-rw-r--r--src/share/vm/opto/connode.cpp2
-rw-r--r--src/share/vm/opto/divnode.cpp26
-rw-r--r--src/share/vm/opto/escape.cpp41
-rw-r--r--src/share/vm/opto/escape.hpp2
-rw-r--r--src/share/vm/opto/gcm.cpp25
-rw-r--r--src/share/vm/opto/ifg.cpp2
-rw-r--r--src/share/vm/opto/loopnode.cpp2
-rw-r--r--src/share/vm/opto/loopopts.cpp14
-rw-r--r--src/share/vm/opto/macro.cpp4
-rw-r--r--src/share/vm/opto/matcher.cpp8
-rw-r--r--src/share/vm/opto/memnode.cpp14
-rw-r--r--src/share/vm/opto/node.cpp11
-rw-r--r--src/share/vm/opto/phaseX.cpp4
-rw-r--r--src/share/vm/opto/reg_split.cpp14
-rw-r--r--src/share/vm/opto/subnode.cpp58
-rw-r--r--src/share/vm/opto/type.cpp71
-rw-r--r--src/share/vm/opto/type.hpp14
-rw-r--r--src/share/vm/runtime/globals.hpp2
-rw-r--r--src/share/vm/runtime/vmStructs.cpp4
-rw-r--r--src/share/vm/utilities/macros.hpp8
-rw-r--r--test/compiler/6741738/Tester.java (renamed from src/os_cpu/windows_x86/vm/assembler_windows_x86_64.cpp)65
120 files changed, 17935 insertions, 14880 deletions
diff --git a/agent/make/build-pkglist b/agent/make/build-pkglist
index 64d9a96ca..c7cac3dfc 100644
--- a/agent/make/build-pkglist
+++ b/agent/make/build-pkglist
@@ -8,4 +8,4 @@ FIND=$MKS_HOME/find
SED=$MKS_HOME/sed
SORT=$MKS_HOME/sort
-$CD ../src/share/classes; $FIND sun/jvm/hotspot \( -name SCCS -prune \) -o -type d -print | $SED -e 's/\//./g' | $SORT > ../../../make/pkglist.txt
+$CD ../src/share/classes; $FIND sun/jvm/hotspot com/sun/java/swing -type d -print | $SED -e 's/\//./g' | $SORT > ../../../make/pkglist.txt
diff --git a/make/linux/makefiles/sa.make b/make/linux/makefiles/sa.make
index 94463d6a0..eca293bb8 100644
--- a/make/linux/makefiles/sa.make
+++ b/make/linux/makefiles/sa.make
@@ -41,8 +41,9 @@ GENERATED = $(TOPDIR)/../generated
SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar
# gnumake 3.78.1 does not accept the *s that
-# are in AGENT_ALLFILES, so use the shell to expand them
-AGENT_ALLFILES := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_ALLFILES))
+# are in AGENT_FILES1 and AGENT_FILES2, so use the shell to expand them
+AGENT_FILES1 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES1))
+AGENT_FILES2 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES2))
SA_CLASSDIR = $(GENERATED)/saclasses
@@ -58,7 +59,7 @@ all:
$(MAKE) -f sa.make $(GENERATED)/sa-jdi.jar; \
fi
-$(GENERATED)/sa-jdi.jar: $(AGENT_ALLFILES)
+$(GENERATED)/sa-jdi.jar: $(AGENT_FILES1) $(AGENT_FILES2)
$(QUIETLY) echo "Making $@"
$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
@@ -72,9 +73,18 @@ $(GENERATED)/sa-jdi.jar: $(AGENT_ALLFILES)
$(QUIETLY) if [ ! -d $(SA_CLASSDIR) ] ; then \
mkdir -p $(SA_CLASSDIR); \
fi
- $(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -g -d $(SA_CLASSDIR) $(AGENT_ALLFILES)
+
+ $(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES1)
+ $(QUIETLY) $(REMOTE) $(COMPILE.JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES2)
+
$(QUIETLY) $(REMOTE) $(COMPILE.RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
$(QUIETLY) echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES)
+ $(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js
+ $(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql
+ $(QUIETLY) mkdir -p $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
+ $(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/*
+ $(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/
+ $(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR)/
$(QUIETLY) $(REMOTE) $(RUN.JAR) cf $@ -C $(SA_CLASSDIR)/ .
$(QUIETLY) $(REMOTE) $(RUN.JAR) uf $@ -C $(AGENT_SRC_DIR) META-INF/services/com.sun.jdi.connect.Connector
$(QUIETLY) $(REMOTE) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.x86.X86ThreadContext
diff --git a/make/sa.files b/make/sa.files
index 6f76f9d8f..7040cf517 100644
--- a/make/sa.files
+++ b/make/sa.files
@@ -33,40 +33,23 @@
AGENT_SRC_DIR = $(AGENT_DIR)/src/share/classes
-AGENT_ALLFILES = \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/DebugServer.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/HelloWorld.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/HotSpotAgent.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/HotSpotSolarisVtblAccess.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/HotSpotTypeDataBase.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/LinuxVtblAccess.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/ObjectHistogram.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/RMIHelper.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/StackTrace.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/TestDebugger.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/Win32VtblAccess.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/Immediate.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/ImmediateOrRegister.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/Operand.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/Register.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/amd64/AMD64Register.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/amd64/AMD64Registers.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/ia64/IA64Register.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/ia64/IA64Registers.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/sparc/SPARCArgument.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/sparc/SPARCRegister.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/sparc/SPARCRegisterType.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/sparc/SPARCRegisters.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/x86/X86Register.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/x86/X86RegisterPart.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/x86/X86Registers.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/x86/X86SegmentRegister.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/x86/X86SegmentRegisters.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/bugspot/BugSpotAgent.java \
+# Splitted the set of files into two sets because on linux plaform
+# listing or compiling all the files results in 'Argument list too long' error.
+
+AGENT_FILES1 = \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/amd64/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/ia64/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/sparc/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/asm/x86/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/bugspot/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/bugspot/tree/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/c1/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/code/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/compiler/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/amd64/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/basic/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/cdbg/basic/x86/*.java \
@@ -75,7 +58,6 @@ $(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dbx/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dbx/sparc/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dbx/x86/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/dummy/*.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/amd64/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/ia64/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/debugger/linux/amd64/*.java \
@@ -107,7 +89,10 @@ $(AGENT_SRC_DIR)/sun/jvm/hotspot/interpreter/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/jdi/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/livejvm/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/memory/*.java \
-$(AGENT_SRC_DIR)/sun/jvm/hotspot/oops/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/oops/*.java
+
+
+AGENT_FILES2 = \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/amd64/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/ia64/*.java \
@@ -127,7 +112,17 @@ $(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/win32_x86/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/runtime/x86/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/tools/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/tools/jcore/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/tools/soql/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/types/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/types/basic/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/*.java \
$(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/memo/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/soql/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/action/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/classbrowser/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/table/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/tree/*.java \
+$(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/treetable/*.java \
+$(AGENT_SRC_DIR)/com/sun/java/swing/action/*.java \
+$(AGENT_SRC_DIR)/com/sun/java/swing/ui/*.java
diff --git a/make/solaris/makefiles/reorder_COMPILER1_amd64 b/make/solaris/makefiles/reorder_COMPILER1_amd64
new file mode 100644
index 000000000..fdc435bbf
--- /dev/null
+++ b/make/solaris/makefiles/reorder_COMPILER1_amd64
@@ -0,0 +1,5450 @@
+data = R0x2000;
+text = LOAD ?RXO;
+
+
+# Test Null
+text: .text%__cplus_fini_at_exit: CCrti.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: arguments.o;
+text: .text%__1cQAgentLibraryList2t6M_v_: arguments.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_AllocTable.o;
+text: .text%__1cFRInfo2t6M_v_: c1_AllocTable.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_AllocTable_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_AllocTable_x86.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CacheLocals.o;
+text: .text%__1cFRInfo2t6M_v_: c1_CacheLocals.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CacheLocals_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_CacheLocals_x86.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Canonicalizer.o;
+text: .text%__1cFRInfo2t6M_v_: c1_Canonicalizer.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeGenerator.o;
+text: .text%__1cFRInfo2t6M_v_: c1_CodeGenerator.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeGenerator_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_CodeGenerator_x86.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeStubs_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_CodeStubs_x86.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Compilation.o;
+text: .text%__1cFRInfo2t6M_v_: c1_Compilation.o;
+text: .text%__1cMelapsedTimer2t6M_v_: c1_Compilation.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Compiler.o;
+text: .text%__1cFRInfo2t6M_v_: c1_Compiler.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_FrameMap.o;
+text: .text%__1cFRInfo2t6M_v_: c1_FrameMap.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_FrameMap_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_FrameMap_x86.o;
+text: .text%__1cKc1_RegMask2t6M_v_: c1_FrameMap_x86.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_GraphBuilder.o;
+text: .text%__1cFRInfo2t6M_v_: c1_GraphBuilder.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_IR.o;
+text: .text%__1cFRInfo2t6M_v_: c1_IR.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Instruction.o;
+text: .text%__1cFRInfo2t6M_v_: c1_Instruction.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_InstructionPrinter.o;
+text: .text%__1cFRInfo2t6M_v_: c1_InstructionPrinter.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Items.o;
+text: .text%__1cFRInfo2t6M_v_: c1_Items.o;
+text: .text%__1cIHintItem2t6MpnJValueType_i_v_: c1_Items.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Items_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_Items_x86.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIR.o;
+text: .text%__1cFRInfo2t6M_v_: c1_LIR.o;
+text: .text%__1cLLIR_OprFactHillegal6F_pnLLIR_OprDesc__: c1_LIR.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIRAssembler.o;
+text: .text%__1cFRInfo2t6M_v_: c1_LIRAssembler.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIRAssembler_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_LIRAssembler_x86.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIREmitter.o;
+text: .text%__1cFRInfo2t6M_v_: c1_LIREmitter.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIREmitter_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_LIREmitter_x86.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIROptimizer.o;
+text: .text%__1cFRInfo2t6M_v_: c1_LIROptimizer.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Loops.o;
+text: .text%__1cFRInfo2t6M_v_: c1_Loops.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_MacroAssembler_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_MacroAssembler_x86.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Optimizer.o;
+text: .text%__1cFRInfo2t6M_v_: c1_Optimizer.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RInfo.o;
+text: .text%__1cFRInfo2t6M_v_: c1_RInfo.o;
+text: .text%__1cKc1_RegMask2t6M_v_: c1_RInfo.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RInfo_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_RInfo_x86.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RegAlloc.o;
+text: .text%__1cFRInfo2t6M_v_: c1_RegAlloc.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RegAlloc_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_RegAlloc_x86.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Runtime1.o;
+text: .text%__1cFRInfo2t6M_v_: c1_Runtime1.o;
+text: .text%__1cIiEntries2t6M_v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Runtime1_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_Runtime1_x86.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ScanBlocks.o;
+text: .text%__1cFRInfo2t6M_v_: c1_ScanBlocks.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ValueMap.o;
+text: .text%__1cFRInfo2t6M_v_: c1_ValueMap.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ValueSet.o;
+text: .text%__1cFRInfo2t6M_v_: c1_ValueSet.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ValueStack.o;
+text: .text%__1cFRInfo2t6M_v_: c1_ValueStack.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: codeBlob.o;
+text: .text%__1cFRInfo2t6M_v_: codeBlob.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: codeCache.o;
+text: .text%__1cICHeapObj2n6FI_pv_;
+text: .text%__1cCosGmalloc6FI_pv_;
+text: .text%__1cICodeHeap2t6M_v_;
+text: .text%__1cMVirtualSpace2t6M_v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compilationPolicy.o;
+text: .text%__1cMelapsedTimer2t6M_v_: compilationPolicy.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compileBroker.o;
+text: .text%__1cMelapsedTimer2t6M_v_: compileBroker.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: compiledIC.o;
+text: .text%__1cFRInfo2t6M_v_: compiledIC.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: deoptimization.o;
+text: .text%__1cFRInfo2t6M_v_: deoptimization.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: fprofiler.o;
+text: .text%__1cMelapsedTimer2t6M_v_: fprofiler.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: frame.o;
+text: .text%__1cFRInfo2t6M_v_: frame.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: frame_x86.o;
+text: .text%__1cFRInfo2t6M_v_: frame_x86.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: genCollectedHeap.o;
+text: .text%__1cTAssertIsPermClosure2t6M_v_: genCollectedHeap.o;
+text: .text%__1cRAlwaysTrueClosure2t6M_v_: genCollectedHeap.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: generateOopMap.o;
+text: .text%__1cNCellTypeStateLmake_bottom6F_0_: generateOopMap.o;
+text: .text%__1cNCellTypeStateImake_any6Fi_0_: generateOopMap.o;
+text: .text%__1cNCellTypeStateImake_top6F_0_: generateOopMap.o;
+text: .text%__1cMelapsedTimer2t6M_v_: generateOopMap.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter.o;
+text: .text%__1cKEntryPoint2t6M_v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter_x86.o;
+text: .text%__1cFRInfo2t6M_v_: interpreter_x86.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: java.o;
+text: .text%__1cFRInfo2t6M_v_: java.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEnvBase.o;
+text: .text%__1cWNonPrintingResourceObj2n6FInLResourceObjPallocation_type__pv_: jvmtiEnvBase.o;
+text: .text%__1cLResourceObj2n6FIn0APallocation_type__pv_;
+text: .text%__1cNGrowableArray4CpnMJvmtiEnvBase__2t6Mii_v_: jvmtiEnvBase.o;
+text: .text%__1cUGenericGrowableArray2t6Mii_v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEventController.o;
+text: .text%__1cRJvmtiEventEnabled2t6M_v_;
+text: .text%__1cRJvmtiEventEnabledFclear6M_v_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiImpl.o;
+text: .text%__1cWNonPrintingResourceObj2n6FInLResourceObjPallocation_type__pv_: jvmtiImpl.o;
+text: .text%__1cNGrowableArray4CpnPJvmtiRawMonitor__2t6Mii_v_: jvmtiImpl.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiTagMap.o;
+text: .text%__1cJMemRegion2t6M_v_: jvmtiTagMap.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: klassVtable.o;
+text: .text%__1cFRInfo2t6M_v_: klassVtable.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: management.o;
+text: .text%__1cJTimeStamp2t6M_v_: management.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: markSweep.o;
+text: .text%__1cJMarkSweepSMarkAndPushClosure2t6M_v_: markSweep.o;
+text: .text%__1cJMarkSweepRFollowRootClosure2t6M_v_: markSweep.o;
+text: .text%__1cJMarkSweepSFollowStackClosure2t6M_v_: markSweep.o;
+text: .text%__1cJMarkSweepUAdjustPointerClosure2t6Mi_v_: markSweep.o;
+text: .text%__1cJMarkSweepOIsAliveClosure2t6M_v_: markSweep.o;
+text: .text%__1cJMarkSweepQKeepAliveClosure2t6M_v_: markSweep.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: memoryService.o;
+text: .text%__1cWNonPrintingResourceObj2n6FInLResourceObjPallocation_type__pv_: memoryService.o;
+text: .text%__1cNGrowableArray4CpnKMemoryPool__2t6Mii_v_: memoryService.o;
+text: .text%__1cNGrowableArray4CpnNMemoryManager__2t6Mii_v_: memoryService.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: methodOop.o;
+text: .text%__1cFRInfo2t6M_v_: methodOop.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: nativeInst_x86.o;
+text: .text%__1cFRInfo2t6M_v_: nativeInst_x86.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: nmethod.o;
+text: .text%__1cFRInfo2t6M_v_: nmethod.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: oopMap.o;
+text: .text%__1cQDoNothingClosure2t6M_v_: oopMap.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: os_solaris.o;
+text: .text%__1cFRInfo2t6M_v_: os_solaris.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: os_solaris_x86.o;
+text: .text%__1cFRInfo2t6M_v_: os_solaris_x86.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: parGCAllocBuffer.o;
+text: .text%__1cMarrayOopDescLheader_size6FnJBasicType__i_: parGCAllocBuffer.o;
+text: .text%__1cRalign_object_size6Fi_i_: parGCAllocBuffer.o;
+text: .text%__1cHoopDescLheader_size6F_i_: parGCAllocBuffer.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psAdaptiveSizePolicy.o;
+text: .text%__1cMelapsedTimer2t6M_v_: psAdaptiveSizePolicy.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psMarkSweep.o;
+text: .text%__1cMelapsedTimer2t6M_v_: psMarkSweep.o;
+text: .text%__1cTPSAlwaysTrueClosure2t6M_v_: psMarkSweep.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psPromotionLAB.o;
+text: .text%__1cMarrayOopDescLheader_size6FnJBasicType__i_: psPromotionLAB.o;
+text: .text%__1cRalign_object_size6Fi_i_: psPromotionLAB.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: psScavenge.o;
+text: .text%__1cMelapsedTimer2t6M_v_: psScavenge.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: relocInfo.o;
+text: .text%__1cQRelocationHolder2t6M_v_: relocInfo.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: runtimeService.o;
+text: .text%__1cJTimeStamp2t6M_v_: runtimeService.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: safepoint.o;
+text: .text%__1cFRInfo2t6M_v_: safepoint.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: safepoint_solaris_x86.o;
+text: .text%__1cFRInfo2t6M_v_: safepoint_solaris_x86.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedHeap.o;
+text: .text%__1cTAssertIsPermClosure2t6M_v_: sharedHeap.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedRuntime.o;
+text: .text%__1cFRInfo2t6M_v_: sharedRuntime.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: tenuredGeneration.o;
+text: .text%__1cRCardTableModRefBSbCpar_chunk_heapword_alignment6F_I_: tenuredGeneration.o;
+text: .text%__1cEMIN24CI_6FTA0_0_: tenuredGeneration.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vframeArray.o;
+text: .text%__1cFRInfo2t6M_v_: vframeArray.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vmStructs.o;
+text: .text%__1cFRInfo2t6M_v_: vmStructs.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vm_version.o;
+text: .text%__1cTAbstract_VM_VersionKvm_release6F_pkc_;
+text: .text%__1cTAbstract_VM_VersionXinternal_vm_info_string6F_pkc_;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vtableStubs_x86.o;
+text: .text%__1cFRInfo2t6M_v_: vtableStubs_x86.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIROptimizer_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_LIROptimizer_x86.o;
+text: .text%JNI_CreateJavaVM;
+text: .text%__1cCosVatomic_xchg_bootstrap6Fipoi_i_;
+text: .text%__1cHThreadsJcreate_vm6FpnOJavaVMInitArgs_pi_i_;
+text: .text%__1cHThreadsYis_supported_jni_version6Fi_C_;
+text: .text%__1cMostream_init6F_v_;
+text: .text%__1cMoutputStream2t6Mi_v_;
+text: .text%__1cCosEinit6F_v_;
+text: .text%__1cCosLinit_random6Fl_v_;
+text: .text%__1cCosHSolarisWinitialize_system_info6F_v_;
+text: .text%__1cOThreadCriticalKinitialize6F_v_;
+text: .text%__1cCosMvm_page_size6F_i_;
+text: .text%__1cJArgumentsWinit_system_properties6F_v_;
+text: .text%__1cJArgumentsQPropertyList_add6FppnOSystemProperty_2_v_;
+text: .text%__1cTAbstract_VM_VersionHvm_name6F_pkc_;
+text: .text%__1cTAbstract_VM_VersionJvm_vendor6F_pkc_;
+text: .text%__1cTAbstract_VM_VersionOvm_info_string6F_pkc_;
+text: .text%__1cCosbDinit_system_properties_values6F_v_;
+text: .text%__1cCosIjvm_path6Fpci_v_;
+text: .text%__1cCosNset_boot_path6Fcc_i_;
+text: .text%__1cJArgumentsFparse6FpknOJavaVMInitArgs__i_;
+text: .text%__1cMmatch_option6FpknMJavaVMOption_pkcp4_i_: arguments.o;
+text: .text%__1cJArgumentsVprocess_settings_file6Fpkcii_i_;
+text: .text%__1cJArgumentsSparse_vm_init_args6FpknOJavaVMInitArgs__i_;
+text: .text%__1cMSysClassPath2t6Mpkc_v_;
+text: .text%__1cJArgumentsbSparse_java_tool_options_environment_variable6FpnMSysClassPath_pi_i_;
+text: .text%__1cCosGgetenv6Fpkcpci_i_;
+text: .text%__1cJArgumentsWparse_each_vm_init_arg6FpknOJavaVMInitArgs_pnMSysClassPath_pi_i_;
+text: .text%__1cMmatch_option6FpknMJavaVMOption_ppkc5i_i_: arguments.o;
+text: .text%__1cJArgumentsMadd_property6Fpkc_i_;
+text: .text%__1cJArgumentsXPropertyList_unique_add6FppnOSystemProperty_pkcpc_v_;
+text: .text%__1cCosEfree6Fpv_v_;
+text: .text%__1cJArgumentsbNparse_java_options_environment_variable6FpnMSysClassPath_pi_i_;
+text: .text%__1cJArgumentsVfinalize_vm_init_args6FpnMSysClassPath_i_i_;
+text: .text%__1cMSysClassPathPexpand_endorsed6M_v_;
+text: .text%__1cJArgumentsMget_property6Fpkc_2_;
+text: .text%__1cJArgumentsWPropertyList_get_value6FpnOSystemProperty_pkc_4_;
+text: .text%__1cMSysClassPathQadd_jars_to_path6Fpcpkc_1_;
+text: .text%__1cJArgumentsZcheck_vm_args_consistency6F_i_;
+text: .text%__1cJArgumentsRverify_percentage6FIpkc_i_;
+text: .text%__1cMSysClassPath2T6M_v_;
+text: .text%__1cMSysClassPathNreset_item_at6Mi_v_: arguments.o;
+text: .text%__1cJArgumentsbOparse_java_compiler_environment_variable6F_v_;
+text: .text%__1cJArgumentsVset_parallel_gc_flags6F_v_;
+text: .text%__1cJArgumentsbBset_cms_and_parnew_gc_flags6F_v_;
+text: .text%__1cJArgumentsTset_parnew_gc_flags6F_v_;
+text: .text%__1cSCommandLineFlagsExKis_default6FnPCommandLineFlag__i_;
+text: .text%__1cJArgumentsUset_ergonomics_flags6F_v_;
+text: .text%__1cCosXis_server_class_machine6F_i_;
+text: .text%__1cJTimeStampJupdate_to6Mx_v_;
+text: .text%__1cCosOjavaTimeMillis6F_x_;
+text: .text%__1cJTraceTime2t6MpkciipnMoutputStream__v_;
+text: .text%__1cCosGinit_26F_i_;
+text: .text%__1cCosHSolarisKmmap_chunk6FpcIii_2_;
+text: .text%__1cCosXnon_memory_address_word6F_pc_;
+text: .text%__1cCosHSolarisRmpss_sanity_check6F_v_;
+text: .text%__1cCosHSolarisOset_mpss_range6FpcII_i_;
+text: .text%__1cCosPuncommit_memory6FpcI_i_;
+text: .text%__1cCosHSolarisOlibthread_init6F_v_;
+text: .text%__1cOisT2_libthread6F_i_;
+text: .text%__1cRlwp_priocntl_init6F_i_: os_solaris.o;
+text: .text%__1cNpriocntl_stub6FinGidtype_lipc_l_: os_solaris.o;
+text: .text%__1cOresolve_symbol6Fpkc_pC_: os_solaris.o;
+text: .text%__1cCosHSolarisQsignal_sets_init6F_v_;
+text: .text%__1cCosHSolarisOis_sig_ignored6Fi_i_;
+text: .text%__1cCosHSolarisPinit_signal_mem6F_v_;
+text: .text%__1cCosHSolarisXinstall_signal_handlers6F_v_;
+text: .text%__1cCosHSolarisSset_signal_handler6Fiii_v_;
+text: .text%__1cCosHSolarisUsynchronization_init6F_v_;
+text: .text%__1cDhpiKinitialize6F_i_;
+text: .text%__1cDhpiYinitialize_get_interface6FpnIvm_calls__v_;
+text: .text%__1cQostream_init_log6F_v_;
+text: .text%__1cNdefaultStreamMhas_log_file6M_i_;
+text: .text%__1cRis_error_reported6F_i_;
+text: .text%__1cNdefaultStreamEinit6M_v_;
+text: .text%__1cSThreadLocalStorageEinit6F_v_;
+text: .text%__1cSThreadLocalStorageHpd_init6F_v_;
+text: .text%__1cCosbDallocate_thread_local_storage6F_i_;
+text: .text%__1cSThreadLocalStoragebCgenerate_code_for_get_thread6F_v_;
+text: .text%__1cRAllocateTLSOffset6F_v_: threadLS_solaris_x86.o;
+text: .text%__1cPvm_init_globals6F_v_;
+text: .text%__1cScheck_ThreadShadow6F_v_;
+text: .text%__1cRcheck_basic_types6F_v_;
+text: .text%__1cNeventlog_init6F_v_;
+text: .text%__1cKmutex_init6F_v_;
+text: .text%__1cFMutex2t6Mipkci_v_;
+text: .text%lwp_cond_init: os_solaris.o;
+text: .text%lwp_mutex_init: os_solaris.o;
+text: .text%__1cHMonitor2t6Mipkci_v_;
+text: .text%__1cOchunkpool_init6F_v_;
+text: .text%__1cPperfMemory_init6F_v_;
+text: .text%__1cKPerfMemoryKinitialize6F_v_;
+text: .text%__1cCosZvm_allocation_granularity6F_i_;
+text: .text%__1cKPerfMemoryUcreate_memory_region6FI_v_;
+text: .text%__1cUcreate_shared_memory6FI_pc_: perfMemory_solaris.o;
+text: .text%__1cSmmap_create_shared6FI_pc_: perfMemory_solaris.o;
+text: .text%__1cCosScurrent_process_id6F_i_;
+text: .text%__1cNget_user_name6Fl_pc_: perfMemory_solaris.o;
+text: .text%__1cQget_user_tmp_dir6Fpkc_pc_: perfMemory_solaris.o;
+text: .text%__1cCosSget_temp_directory6F_pkc_;
+text: .text%__1cWget_sharedmem_filename6Fpkci_pc_: perfMemory_solaris.o;
+text: .text%__1cbBcleanup_sharedmem_resources6Fpkc_v_: perfMemory_solaris.o;
+text: .text%__1cTis_directory_secure6Fpkc_i_: perfMemory_solaris.o;
+text: .text%lstat: perfMemory_solaris.o;
+text: .text%__1cPfilename_to_pid6Fpkc_l_: perfMemory_solaris.o;
+text: .text%__1cbAcreate_sharedmem_resources6Fpkc1I_i_: perfMemory_solaris.o;
+text: .text%__1cRmake_user_tmp_dir6Fpkc_i_: perfMemory_solaris.o;
+text: .text%__1cKJavaThread2t6M_v_;
+text: .text%__1cGThread2t6M_v_;
+text: .text%__1cFArena2t6M_v_;
+text: .text%__1cFChunk2n6FII_pv_;
+text: .text%__1cOThreadCritical2t6M_v_;
+text: .text%__1cOThreadCritical2T6M_v_;
+text: .text%__1cFChunk2t6MI_v_;
+text: .text%__1cKHandleMarkKinitialize6MpnGThread__v_;
+text: .text%__1cKJavaThreadKinitialize6M_v_;
+text: .text%__1cNjni_functions6F_pknTJNINativeInterface___;
+text: .text%__1cQThreadStatistics2t6M_v_;
+text: .text%__1cGParker2t6M_v_;
+text: .text%__1cWThreadLocalAllocBufferKinitialize6M_v_;
+text: .text%__1cWThreadLocalAllocBufferKinitialize6MpnIHeapWord_22_v_;
+text: .text%__1cWThreadLocalAllocBufferMinitial_size6F_I_;
+text: .text%__1cWThreadLocalAllocBufferVinitialize_statistics6M_v_;
+text: .text%__1cMFlatProfilerJis_active6F_i_;
+text: .text%__1cUThreadSafepointStateGcreate6FpnKJavaThread__v_;
+text: .text%__1cUThreadSafepointState2t6MpnKJavaThread__v_;
+text: .text%__1cGThreadbArecord_stack_base_and_size6M_v_;
+text: .text%__1cCosScurrent_stack_base6F_pC_;
+text: .text%__1cCosScurrent_stack_size6F_I_;
+text: .text%__1cGThreadbFinitialize_thread_local_storage6M_v_;
+text: .text%__1cSThreadLocalStorageKset_thread6FpnGThread__v_;
+text: .text%__1cSThreadLocalStorageNpd_set_thread6FpnGThread__v_;
+text: .text%__1cCosbBthread_local_storage_at_put6Fipv_v_;
+text: .text%__1cSThreadLocalStorageSset_thread_in_slot6FpnGThread__v_;
+text: .text%get_thread;
+text: .text%__1cSThreadLocalStoragebBget_thread_via_cache_slowly6FIi_pnGThread__;
+text: .text%__1cSThreadLocalStoragePget_thread_slow6F_pnGThread__;
+text: .text%__1cCosXthread_local_storage_at6Fi_pv_;
+text: .text%__1cCosVcurrent_stack_pointer6F_pC_;
+text: .text%__1cCosRinitialize_thread6F_v_;
+text: .text%__1cNReservedSpaceUpage_align_size_down6FI_I_;
+text: .text%__1cCosHSolarisVinit_thread_fpu_state6F_v_;
+text: .text%__1cOJNIHandleBlockOallocate_block6FpnGThread__p0_;
+text: .text%__1cFMutexbClock_without_safepoint_check6M_v_;
+text: .text%__1cFMutexGunlock6M_v_;
+text: .text%__1cGThreadWset_as_starting_thread6M_i_;
+text: .text%__1cCosScreate_main_thread6FpnGThread__i_;
+text: .text%__1cQcreate_os_thread6FpnGThread_I_pnIOSThread__: os_solaris.o;
+text: .text%__1cIOSThread2t6MpFpv_i1_v_;
+text: .text%__1cIOSThreadNpd_initialize6M_v_;
+text: .text%__1cCosHSolarisPhotspot_sigmask6FpnGThread__v_;
+text: .text%__1cCosHSolarisRunblocked_signals6F_pnIsigset_t__;
+text: .text%__1cGThreadMis_VM_thread6kM_i_: thread.o;
+text: .text%__1cCosHSolarisKvm_signals6F_pnIsigset_t__;
+text: .text%__1cKJavaThreadYcreate_stack_guard_pages6M_v_;
+text: .text%__1cCosNcommit_memory6FpcI_i_;
+text: .text%__1cCosMguard_memory6FpcI_i_;
+text: .text%__1cMinit_globals6F_i_;
+text: .text%__1cPmanagement_init6F_v_;
+text: .text%__1cKManagementEinit6F_v_;
+text: .text%__1cNExceptionMark2t6MrpnGThread__v_;
+text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongVariable__;
+text: .text%__1cIPerfLong2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability__v_;
+text: .text%__1cIPerfData2t6MnJCounterNS_pkcn0AFUnits_n0ALVariability__v_;
+text: .text%__1cIPerfDataMcreate_entry6MnJBasicType_II_v_;
+text: .text%__1cKPerfMemoryFalloc6FI_pc_;
+text: .text%__1cFMutexElock6M_v_;
+text: .text%__1cFMutexElock6MpnGThread__v_;
+text: .text%__1cKPerfMemoryMmark_updated6F_v_;
+text: .text%__1cCosLelapsedTime6F_d_;
+text: .text%__1cMgetTimeNanos6F_x_: os_solaris.o;
+text: .text%__1cPoldgetTimeNanos6F_x_: os_solaris.o;
+text: .text%__1cPPerfDataManagerIadd_item6FpnIPerfData_i_v_;
+text: .text%__1cMPerfDataList2t6Mi_v_;
+text: .text%__1cCosbCis_thread_cpu_time_supported6F_i_;
+text: .text%__1cNExceptionMark2T6M_v_;
+text: .text%__1cNThreadServiceEinit6F_v_;
+text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnPPerfLongCounter__;
+text: .text%__1cORuntimeServiceEinit6F_v_;
+text: .text%__1cTClassLoadingServiceEinit6F_v_;
+text: .text%__1cKvtune_init6F_v_;
+text: .text%__1cObytecodes_init6F_v_;
+text: .text%__1cJBytecodesKinitialize6F_v_;
+text: .text%__1cJBytecodesDdef6Fn0AECode_pkc33nJBasicType_iii_v_;
+text: .text%__1cJBytecodesDdef6Fn0AECode_pkc33nJBasicType_ii1i_v_;
+text: .text%__1cJBytecodesNpd_initialize6F_v_;
+text: .text%__1cQclassLoader_init6F_v_;
+text: .text%__1cLClassLoaderKinitialize6F_v_;
+text: .text%__1cLClassLoaderQload_zip_library6F_v_;
+text: .text%__1cCosTnative_java_library6F_pv_;
+text: .text%JVM_GetInterfaceVersion;
+text: .text%__1cHThreadsbMis_supported_jni_version_including_1_16Fi_C_;
+text: .text%__1cKHandleMark2T6M_v_;
+text: .text%__1cLClassLoaderbBsetup_bootstrap_search_path6F_v_;
+text: .text%__1cCosGstrdup6Fpkc_pc_;
+text: .text%__1cLClassLoaderbCupdate_class_path_entry_list6Fpkc_v_;
+text: .text%__1cCosEstat6FpkcpnEstat__i_;
+text: .text%stat: os_solaris.o;
+text: .text%__1cLClassLoaderXcreate_class_path_entry6FpcnEstat_ppnOClassPathEntry__v_;
+text: .text%__1cLClassLoaderSget_canonical_path6Fpc1i_i_;
+text: .text%JVM_RawMonitorCreate;
+text: .text%JVM_NativePath;
+text: .text%JVM_RawMonitorEnter;
+text: .text%__1cFMutexMjvm_raw_lock6M_v_;
+text: .text%JVM_RawMonitorExit;
+text: .text%__1cFMutexOjvm_raw_unlock6M_v_;
+text: .text%JVM_Open;
+text: .text%JVM_Lseek;
+text: .text%JVM_Close;
+text: .text%__1cDhpiFclose6Fi_i_: jvm.o;
+text: .text%__1cRClassPathZipEntry2t6Mppvpc_v_;
+text: .text%__1cOClassPathEntry2t6M_v_;
+text: .text%__1cLClassLoaderLadd_to_list6FpnOClassPathEntry__v_;
+text: .text%__1cOcodeCache_init6F_v_;
+text: .text%__1cJCodeCacheKinitialize6F_v_;
+text: .text%__1cICodeHeapHreserve6MIII_i_;
+text: .text%__1cLlog2_intptr6Fi_i_: heap.o;
+text: .text%__1cYalign_to_allocation_size6FI_I_: heap.o;
+text: .text%__1cNReservedSpace2t6MI_v_;
+text: .text%__1cNReservedSpaceKinitialize6MIIipc_v_;
+text: .text%__1cCosOreserve_memory6FIpc_1_;
+text: .text%__1cMVirtualSpaceKinitialize6MnNReservedSpace_I_i_;
+text: .text%__1cMVirtualSpaceJexpand_by6MI_i_;
+text: .text%__1cMVirtualSpaceQuncommitted_size6kM_I_;
+text: .text%__1cMVirtualSpaceNreserved_size6kM_I_;
+text: .text%__1cMVirtualSpaceOcommitted_size6kM_I_;
+text: .text%__1cCosNcommit_memory6FpcII_i_;
+text: .text%__1cSalign_to_page_size6FI_I_: heap.o;
+text: .text%__1cICodeHeapFclear6M_v_;
+text: .text%__1cICodeHeapTmark_segmap_as_free6MII_v_;
+text: .text%__1cNMemoryServiceZadd_code_heap_memory_pool6FpnICodeHeap__v_;
+text: .text%__1cMCodeHeapPool2t6MpnICodeHeap_pkci_v_;
+text: .text%__1cICodeHeapIcapacity6kM_I_;
+text: .text%__1cICodeHeapMmax_capacity6kM_I_;
+text: .text%__1cKMemoryPool2t6Mpkcn0AIPoolType_IIii_v_;
+text: .text%__1cNMemoryManagerbDget_code_cache_memory_manager6F_p0_;
+text: .text%__1cNMemoryManager2t6M_v_;
+text: .text%__1cNMemoryManagerIadd_pool6MpnKMemoryPool__v_;
+text: .text%__1cKMemoryPoolLadd_manager6MpnNMemoryManager__v_;
+text: .text%__1cLicache_init6F_v_;
+text: .text%__1cPVM_Version_init6F_v_;
+text: .text%__1cKVM_VersionKinitialize6F_v_;
+text: .text%__1cKBufferBlobGcreate6Fpkci_p0_;
+text: .text%__1cKJavaThreadOis_Java_thread6kM_i_: thread.o;
+text: .text%__1cRalign_code_offset6Fi_I_;
+text: .text%__1cICodeHeapLheader_size6F_I_;
+text: .text%__1cKBufferBlob2n6FII_pv_;
+text: .text%__1cJCodeCacheIallocate6Fi_pnICodeBlob__;
+text: .text%__1cICodeHeapIallocate6MI_pv_;
+text: .text%__1cICodeHeapPsearch_freelist6MI_pnJFreeBlock__;
+text: .text%__1cICodeHeapTmark_segmap_as_used6MII_v_;
+text: .text%__1cKBufferBlob2t6Mpkci_v_;
+text: .text%__1cICodeBlob2t6Mpkcii_v_;
+text: .text%__1cICodeBlobMset_oop_maps6MpnJOopMapSet__v_;
+text: .text%__1cNMemoryServiceXtrack_memory_pool_usage6FpnKMemoryPool__v_;
+text: .text%__1cKMemoryPoolYrecord_peak_memory_usage6M_v_;
+text: .text%__1cMCodeHeapPoolQget_memory_usage6M_nLMemoryUsage__;
+text: .text%__1cMCodeHeapPoolNused_in_bytes6M_I_: memoryPool.o;
+text: .text%__1cICodeHeapSallocated_capacity6kM_I_;
+text: .text%__1cKMemoryPoolImax_size6kM_I_: memoryPool.o;
+text: .text%__1cXresource_allocate_bytes6FI_pc_;
+text: .text%__1cKCodeBuffer2t6MpCi_v_;
+text: .text%__1cRAbstractAssembler2t6MpnKCodeBuffer__v_;
+text: .text%__1cYVM_Version_StubGeneratorTgenerate_getPsrInfo6M_pC_: vm_version_x86.o;
+text: .text%__1cMStubCodeMark2t6MpnRStubCodeGenerator_pkc4_v_;
+text: .text%__1cRStubCodeGeneratorLstub_prolog6MpnMStubCodeDesc__v_;
+text: .text%__1cJAssemblerFpushl6MpnMRegisterImpl__v_;
+text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_2_v_;
+text: .text%__1cJAssemblerGpushfd6M_v_;
+text: .text%__1cJAssemblerEpopl6MpnMRegisterImpl__v_;
+text: .text%__1cJAssemblerExorl6MpnMRegisterImpl_2_v_;
+text: .text%__1cJAssemblerKemit_arith6MiipnMRegisterImpl_2_v_;
+text: .text%__1cJAssemblerExorl6MpnMRegisterImpl_i_v_;
+text: .text%__1cJAssemblerKemit_arith6MiipnMRegisterImpl_i_v_;
+text: .text%__1cJAssemblerFpopfd6M_v_;
+text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_2_v_;
+text: .text%__1cJAssemblerDjcc6Mn0AJCondition_rnFLabel_nJrelocInfoJrelocType__v_;
+text: .text%__1cJAssemblerFcpuid6M_v_;
+text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_2_v_;
+text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_i_v_;
+text: .text%__1cJAssemblerDjmp6MrnFLabel_nJrelocInfoJrelocType__v_;
+text: .text%__1cRAbstractAssemblerEbind6MrnFLabel__v_;
+text: .text%__1cRAbstractAssemblerHbind_to6MrnFLabel_i_v_;
+text: .text%__1cMDisplacementEbind6MrnFLabel_ipnRAbstractAssembler__v_;
+text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_nHAddress__v_;
+text: .text%__1cJAssemblerMemit_operand6MpnMRegisterImpl_nHAddress__v_;
+text: .text%__1cJAssemblerMemit_operand6MpnMRegisterImpl_22nHAddressLScaleFactor_irknQRelocationHolder__v_;
+text: .text%__1cKRelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
+text: .text%__1cJAssemblerEmovl6MnHAddress_pnMRegisterImpl__v_;
+text: .text%__1cJAssemblerDret6Mi_v_;
+text: .text%__1cMStubCodeMark2T6M_v_;
+text: .text%__1cRAbstractAssemblerFflush6M_v_;
+text: .text%__1cRStubCodeGeneratorLstub_epilog6MpnMStubCodeDesc__v_;
+text: .text%__1cFVTuneNregister_stub6FpkcpC3_v_;
+text: .text%__1cFForteNregister_stub6FpkcpC3_v_;
+text: .text%__1cKVM_VersionWget_processor_features6F_v_;
+text: .text%__1cCosMsupports_sse6F_i_;
+text: .text%__1cVcheck_for_sse_support6F_v_: os_solaris_x86.o;
+text: .text%jio_snprintf;
+text: .text%jio_vsnprintf;
+text: .text%__1cPlocal_vsnprintf6FpcIpkcpv_i_;
+text: .text%__1cSstubRoutines_init16F_v_;
+text: .text%__1cMStubRoutinesLinitialize16F_v_;
+text: .text%__1cWStubGenerator_generate6FpnKCodeBuffer_i_v_;
+text: .text%__1cNStubGeneratorbAgenerate_forward_exception6M_pC_: stubGenerator_x86.o;
+text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCpnMRegisterImpl__v_;
+text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCi_v_;
+text: .text%__1cOMacroAssemblerRcall_VM_leaf_base6MpCi_v_;
+text: .text%__1cJAssemblerEcall6MpCnJrelocInfoJrelocType__v_;
+text: .text%__1cJAssemblerJemit_data6MinJrelocInfoJrelocType_i_v_;
+text: .text%__1cKRelocationLspec_simple6FnJrelocInfoJrelocType__nQRelocationHolder__;
+text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: relocInfo.o;
+text: .text%__1cKRelocationLunpack_data6M_v_: codeBlob.o;
+text: .text%__1cJAssemblerJemit_data6MirknQRelocationHolder_i_v_;
+text: .text%__1cKCodeBufferIrelocate6MpCrknQRelocationHolder_i_v_;
+text: .text%__1cXruntime_call_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o;
+text: .text%__1cOMacroAssemblerJincrement6MpnMRegisterImpl_i_v_;
+text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_i_v_;
+text: .text%__1cOMacroAssemblerKget_thread6MpnMRegisterImpl__v_;
+text: .text%__1cSThreadLocalStorageTpd_getTlsAccessMode6F_n0AQpd_tlsAccessMode__;
+text: .text%__1cJAssemblerFpushl6Mi_v_;
+text: .text%__1cJAssemblerEleal6MpnMRegisterImpl_nHAddress__v_;
+text: .text%__1cJAssemblerEmovl6MnHAddress_i_v_;
+text: .text%__1cOMacroAssemblerKverify_oop6MpnMRegisterImpl_pkc_v_;
+text: .text%__1cJAssemblerDjmp6MpnMRegisterImpl_nJrelocInfoJrelocType__v_;
+text: .text%__1cNStubGeneratorSgenerate_call_stub6MrpC_1_: stubGenerator_x86.o;
+text: .text%__1cOMacroAssemblerFenter6M_v_;
+text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_i_v_;
+text: .text%__1cJAssemblerFtestl6MpnMRegisterImpl_2_v_;
+text: .text%__1cJAssemblerEdecl6MpnMRegisterImpl__v_;
+text: .text%__1cJAssemblerEcall6MpnMRegisterImpl_nJrelocInfoJrelocType__v_;
+text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_i_v_;
+text: .text%__1cJAssemblerGfstp_s6MnHAddress__v_;
+text: .text%__1cJAssemblerGfstp_d6MnHAddress__v_;
+text: .text%__1cNStubGeneratorYgenerate_catch_exception6M_pC_: stubGenerator_x86.o;
+text: .text%__1cJAssemblerDjmp6MpCnJrelocInfoJrelocType__v_;
+text: .text%__1cNStubGeneratorUgenerate_atomic_xchg6M_pC_: stubGenerator_x86.o;
+text: .text%__1cJAssemblerExchg6MpnMRegisterImpl_nHAddress__v_;
+text: .text%__1cJAssemblerGpushad6M_v_;
+text: .text%__1cJAssemblerFpopad6M_v_;
+text: .text%__1cNStubGeneratorYgenerate_get_previous_fp6M_pC_: stubGenerator_x86.o;
+text: .text%__1cNStubGeneratorUgenerate_d2i_wrapper6MpC_1_: stubGenerator_x86.o;
+text: .text%__1cOMacroAssemblerOpush_FPU_state6M_v_;
+text: .text%__1cJAssemblerGfnsave6MnHAddress__v_;
+text: .text%__1cJAssemblerFfwait6M_v_;
+text: .text%__1cJAssemblerFfld_d6MnHAddress__v_;
+text: .text%__1cJAssemblerFfst_d6MnHAddress__v_;
+text: .text%__1cOMacroAssemblerPempty_FPU_stack6M_v_;
+text: .text%__1cJAssemblerFffree6Mi_v_;
+text: .text%__1cJAssemblerLemit_farith6Miii_v_;
+text: .text%__1cOMacroAssemblerNpop_FPU_state6M_v_;
+text: .text%__1cJAssemblerGfrstor6MnHAddress__v_;
+text: .text%__1cNStubGeneratorUcreate_control_words6M_v_: stubGenerator_x86.o;
+text: .text%__1cJTraceTime2T6M_v_;
+text: .text%__1cNcarSpace_init6F_v_;
+text: .text%__1cICarSpaceEinit6F_v_;
+text: .text%__1cNuniverse_init6F_i_;
+text: .text%__1cLJavaClassesbAcompute_hard_coded_offsets6F_v_;
+text: .text%__1cLFileMapInfoKinitialize6M_i_;
+text: .text%__1cLFileMapInfoNfail_continue6MpkcE_v_;
+text: .text%__1cLFileMapInfoFclose6M_v_;
+text: .text%__1cIUniversePinitialize_heap6F_i_;
+text: .text%__1cPMarkSweepPolicy2t6M_v_;
+text: .text%__1cbCTwoGenerationCollectorPolicyQinitialize_flags6M_v_;
+text: .text%__1cbCTwoGenerationCollectorPolicyMrem_set_name6M_nJGenRemSetEName__: collectorPolicy.o;
+text: .text%__1cJGenRemSetYmax_alignment_constraint6Fn0AEName__I_;
+text: .text%__1cRCardTableModRefBSbBct_max_alignment_constraint6F_I_;
+text: .text%__1cbCTwoGenerationCollectorPolicyUinitialize_size_info6M_v_;
+text: .text%__1cPMarkSweepPolicyWinitialize_generations6M_v_;
+text: .text%__1cbCTwoGenerationCollectorPolicyVnumber_of_generations6M_i_: collectorPolicy.o;
+text: .text%__1cXPermanentGenerationSpec2t6MnHPermGenEName_IIIIII_v_;
+text: .text%__1cQGCPolicyCounters2t6Mpkcii_v_;
+text: .text%__1cPPerfDataManagerMcounter_name6Fpkc2_pc_;
+text: .text%__1cPPerfDataManagerWcreate_string_constant6FnJCounterNS_pkc3pnGThread__pnSPerfStringConstant__;
+text: .text%__1cSPerfStringConstant2t6MnJCounterNS_pkc3_v_;
+text: .text%__1cNPerfByteArray2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_i_v_;
+text: .text%__1cKPerfStringKset_string6Mpkc_v_;
+text: .text%__1cPPerfDataManagerUcreate_long_constant6FnJCounterNS_pkcnIPerfDataFUnits_xpnGThread__pnQPerfLongConstant__;
+text: .text%__1cQGenCollectedHeap2t6MpnPCollectorPolicy__v_;
+text: .text%__1cKSharedHeap2t6MpnPCollectorPolicy__v_;
+text: .text%__1cNCollectedHeap2t6M_v_;
+text: .text%__1cHGCCauseJto_string6Fn0AFCause__pkc_;
+text: .text%__1cPPerfDataManagerWcreate_string_variable6FnJCounterNS_pkci3pnGThread__pnSPerfStringVariable__;
+text: .text%__1cMSubTasksDone2t6Mi_v_;
+text: .text%__1cMSubTasksDoneFclear6M_v_;
+text: .text%__1cMSubTasksDoneFvalid6M_i_;
+text: .text%__1cQGenCollectedHeapKinitialize6M_i_;
+text: .text%__1cPCollectorPolicyLgenerations6M_ppnOGenerationSpec__: collectorPolicy.o;
+text: .text%__1cPCollectorPolicyUpermanent_generation6M_pnXPermanentGenerationSpec__: collectorPolicy.o;
+text: .text%__1cXPermanentGenerationSpecFalign6MI_v_;
+text: .text%__1cQGenCollectedHeapIallocate6MIpnXPermanentGenerationSpec_pIpipnNReservedSpace__pc_;
+text: .text%__1cOGenerationSpecRn_covered_regions6kM_i_: collectorPolicy.o;
+text: .text%__1cNReservedSpace2t6MIIipc_v_;
+text: .text%__1cPCollectorPolicyOcreate_rem_set6MnJMemRegion_i_pnJGenRemSet__;
+text: .text%__1cbCTwoGenerationCollectorPolicyQbarrier_set_name6M_nKBarrierSetEName__: collectorPolicy.o;
+text: .text%__1cLCardTableRS2t6MnJMemRegion_i_v_;
+text: .text%__1cRCardTableModRefBS2t6MnJMemRegion_i_v_;
+text: .text%__1cNReservedSpaceYallocation_align_size_up6FI_I_;
+text: .text%__1cJMemRegion2t6M_v_: cardTableModRefBS.o;
+text: .text%__1cKSharedHeapPset_barrier_set6MpnKBarrierSet__v_;
+text: .text%__1cNReservedSpaceKfirst_part6MIii_0_;
+text: .text%__1cNReservedSpace2t6MpcI_v_;
+text: .text%__1cOGenerationSpecEinit6MnNReservedSpace_ipnJGenRemSet__pnKGeneration__;
+text: .text%__1cQDefNewGeneration2t6MnNReservedSpace_Iipkc_v_;
+text: .text%__1cKGeneration2t6MnNReservedSpace_Ii_v_;
+text: .text%__1cIageTable2t6Mi_v_;
+text: .text%__1cIageTableFclear6M_v_;
+text: .text%__1cFArenaEgrow6MI_pv_;
+text: .text%__1cFChunkJnext_chop6M_v_;
+text: .text%__1cFChunkEchop6M_v_;
+text: .text%__1cFChunk2k6Fpv_v_;
+text: .text%__1cRCardTableModRefBSVresize_covered_region6MnJMemRegion__v_;
+text: .text%__1cRCardTableModRefBSbCfind_covering_region_by_base6MpnIHeapWord__i_;
+text: .text%__1cRCardTableModRefBSbAlargest_prev_committed_end6kMi_pnIHeapWord__;
+text: .text%__1cWSequentialSubTasksDoneFclear6M_v_;
+text: .text%__1cSGenerationCounters2t6MpkciipnMVirtualSpace__v_;
+text: .text%__1cPPerfDataManagerKname_space6Fpkci_pc_;
+text: .text%__1cRCollectorCounters2t6Mpkci_v_;
+text: .text%__1cOCSpaceCounters2t6MpkciIpnPContiguousSpace_pnSGenerationCounters__v_;
+text: .text%__1cPPerfDataManagerKname_space6Fpkc2i_pc_;
+text: .text%__1cPPerfDataManagerUcreate_long_variable6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnQPerfLongVariable__;
+text: .text%__1cPPerfLongVariant2t6MnJCounterNS_pkcnIPerfDataFUnits_n0CLVariability_pnUPerfLongSampleHelper__v_;
+text: .text%__1cPPerfLongVariantGsample6M_v_;
+text: .text%__1cZContiguousSpaceUsedHelperLtake_sample6M_x_: cSpaceCounters.o;
+text: .text%__1cPContiguousSpaceEused6kM_I_: space.o;
+text: .text%__1cQDefNewGenerationYcompute_space_boundaries6MI_v_;
+text: .text%__1cQCompactibleSpaceKinitialize6MnJMemRegion_i_v_;
+text: .text%__1cFSpaceKinitialize6MnJMemRegion_i_v_;
+text: .text%__1cFSpaceKset_bottom6MpnIHeapWord__v_: space.o;
+text: .text%__1cJEdenSpaceHset_end6MpnIHeapWord__v_: space.o;
+text: .text%__1cJEdenSpaceFclear6M_v_;
+text: .text%__1cPContiguousSpaceFclear6M_v_;
+text: .text%__1cFSpaceFclear6M_v_;
+text: .text%__1cFSpaceHset_end6MpnIHeapWord__v_: space.o;
+text: .text%__1cQDefNewGenerationPupdate_counters6M_v_;
+text: .text%__1cSGenerationCountersKupdate_all6M_v_: generationCounters.o;
+text: .text%__1cNReservedSpaceJlast_part6MI_0_;
+text: .text%__1cRTenuredGeneration2t6MnNReservedSpace_IipnJGenRemSet__v_;
+text: .text%__1cOCardGeneration2t6MnNReservedSpace_IipnJGenRemSet__v_;
+text: .text%__1cWBlockOffsetSharedArray2t6MnJMemRegion_I_v_;
+text: .text%__1cWBlockOffsetSharedArrayGresize6MI_v_;
+text: .text%__1cNReservedSpaceSpage_align_size_up6FI_I_;
+text: .text%__1cLCardTableRSVresize_covered_region6MnJMemRegion__v_;
+text: .text%__1cLCardTableRSKis_aligned6MpnIHeapWord__i_: cardTableRS.o;
+text: .text%__1cHGCStats2t6M_v_;
+text: .text%__1cWOffsetTableContigSpace2t6MpnWBlockOffsetSharedArray_nJMemRegion__v_;
+text: .text%__1cQBlockOffsetArray2t6MpnWBlockOffsetSharedArray_nJMemRegion_i_v_;
+text: .text%__1cWOffsetTableContigSpaceKset_bottom6MpnIHeapWord__v_;
+text: .text%__1cQBlockOffsetArrayGresize6MI_v_: blockOffsetTable.o;
+text: .text%__1cWOffsetTableContigSpaceHset_end6MpnIHeapWord__v_;
+text: .text%__1cWOffsetTableContigSpaceFclear6M_v_;
+text: .text%__1cbBBlockOffsetArrayContigSpaceUinitialize_threshold6M_pnIHeapWord__;
+text: .text%__1cUGenericGrowableArrayEgrow6Mi_v_;
+text: .text%__1cXPermanentGenerationSpecEinit6MnNReservedSpace_IpnJGenRemSet__pnHPermGen__;
+text: .text%__1cRCompactingPermGen2t6MnNReservedSpace_1IpnJGenRemSet_pnXPermanentGenerationSpec__v_;
+text: .text%__1cUCompactingPermGenGen2t6MnNReservedSpace_1IipnJGenRemSet_pnPContiguousSpace_pnXPermanentGenerationSpec__v_;
+text: .text%__1cUCompactingPermGenGenbFinitialize_performance_counters6M_v_;
+text: .text%__1cbCOneContigSpaceCardGenerationIcapacity6kM_I_;
+text: .text%__1cPCollectorPolicybFis_concurrent_mark_sweep_policy6M_i_: collectorPolicy.o;
+text: .text%__1cWThreadLocalAllocBufferWstartup_initialization6F_v_;
+text: .text%__1cPGlobalTLABStats2t6M_v_;
+text: .text%__1cPGlobalTLABStatsKinitialize6M_v_;
+text: .text%__1cXAdaptiveWeightedAverageGsample6Mf_v_;
+text: .text%__1cXAdaptiveWeightedAverageYcompute_adaptive_average6Mff_f_;
+text: .text%__1cQGenCollectedHeapNtlab_capacity6kM_I_;
+text: .text%__1cQDefNewGenerationYsupports_tlab_allocation6kM_i_: defNewGeneration.o;
+text: .text%__1cQDefNewGenerationNtlab_capacity6kM_I_: defNewGeneration.o;
+text: .text%__1cKGenerationYsupports_tlab_allocation6kM_i_: tenuredGeneration.o;
+text: .text%__1cWThreadLocalAllocBufferImax_size6F_I_;
+text: .text%__1cOBasicHashtable2t6Mii_v_: universe.o;
+text: .text%__1cLClassLoaderZcreate_package_info_table6F_v_;
+text: .text%__1cOBasicHashtable2t6Mii_v_: classLoader.o;
+text: .text%__1cQinterpreter_init6F_v_;
+text: .text%__1cTAbstractInterpreterKinitialize6F_v_;
+text: .text%__1cNTemplateTableKinitialize6F_v_;
+text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pF_vc_v_;
+text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFi_vi_v_;
+text: .text%__1cITemplateKinitialize6MinITosState_1pFi_vi_v_;
+text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFn0AJOperation__v4_v_;
+text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pFn0AJCondition__v4_v_;
+text: .text%__1cNTemplateTableDdef6FnJBytecodesECode_inITosState_3pF3_v3_v_;
+text: .text%__1cNTemplateTableNpd_initialize6F_v_;
+text: .text%__1cRInvocationCounterMreinitialize6Fi_v_;
+text: .text%__1cRInvocationCounterDdef6Fn0AFState_ipFnMmethodHandle_pnGThread__pC_v_;
+text: .text%__1cJStubQdDueue2t6MpnNStubInterface_ipnFMutex_pkc_v_;
+text: .text%__1cICodeHeapJexpand_by6MI_i_;
+text: .text%__1cJStubQdDueueOregister_queue6Fp0_v_;
+text: .text%__1cUInterpreterGenerator2t6MpnJStubQdDueue__v_;
+text: .text%__1cbCAbstractInterpreterGenerator2t6MpnJStubQdDueue__v_;
+text: .text%__1cbCAbstractInterpreterGeneratorMgenerate_all6M_v_;
+text: .text%__1cLCodeletMark2t6MrpnZInterpreterMacroAssembler_pkcinJBytecodesECode__v_: interpreter.o;
+text: .text%__1cJStubQdDueueHrequest6Mi_pnEStub__;
+text: .text%__1cbBInterpreterCodeletInterfaceRcode_size_to_size6kMi_i_: interpreter.o;
+text: .text%__1cbBInterpreterCodeletInterfaceKinitialize6MpnEStub_i_v_: interpreter.o;
+text: .text%__1cSInterpreterCodeletKinitialize6MpkcinJBytecodesECode__v_;
+text: .text%__1cbCAbstractInterpreterGeneratorTgenerate_error_exit6Mpkc_pC_;
+text: .text%__1cOMacroAssemblerEstop6Mpkc_v_;
+text: .text%__1cJAssemblerEcall6MrnFLabel_nJrelocInfoJrelocType__v_;
+text: .text%__1cJAssemblerDhlt6M_v_;
+text: .text%__1cOMacroAssemblerFalign6Mi_v_;
+text: .text%__1cJAssemblerDnop6M_v_;
+text: .text%__1cJStubQdDueueGcommit6Mi_v_;
+text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_return_entry_for6MnITosState_i_pC_;
+text: .text%__1cZInterpreterMacroAssemblerbAget_cache_and_index_at_bcp6MpnMRegisterImpl_2i_v_;
+text: .text%__1cOMacroAssemblerSload_unsigned_word6MpnMRegisterImpl_nHAddress__i_;
+text: .text%__1cJAssemblerGmovzxw6MpnMRegisterImpl_nHAddress__v_;
+text: .text%__1cJAssemblerEshll6MpnMRegisterImpl_i_v_;
+text: .text%__1cJAssemblerEandl6MpnMRegisterImpl_i_v_;
+text: .text%__1cZInterpreterMacroAssemblerNdispatch_next6MnITosState_i_v_;
+text: .text%__1cOMacroAssemblerSload_unsigned_byte6MpnMRegisterImpl_nHAddress__i_;
+text: .text%__1cJAssemblerGmovzxb6MpnMRegisterImpl_nHAddress__v_;
+text: .text%__1cZInterpreterMacroAssemblerNdispatch_base6MnITosState_ppCi_v_;
+text: .text%__1cZInterpreterMacroAssemblerKverify_FPU6MinITosState__v_;
+text: .text%__1cZInterpreterMacroAssemblerKverify_oop6MpnMRegisterImpl_nITosState__v_;
+text: .text%__1cJAssemblerDjmp6MnHAddress__v_;
+text: .text%__1cOMacroAssemblerKverify_FPU6Mipkc_v_;
+text: .text%__1cKEntryPoint2t6MpC11111111_v_;
+text: .text%__1cJAssemblerEincl6MpnMRegisterImpl__v_;
+text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_deopt_entry_for6MnITosState_i_pC_;
+text: .text%__1cJAssemblerEcmpl6MnHAddress_i_v_;
+text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pCi_v_;
+text: .text%__1cOMacroAssemblerOcall_VM_helper6MpnMRegisterImpl_pCii_v_;
+text: .text%__1cZInterpreterMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_;
+text: .text%__1cOMacroAssemblerMcall_VM_base6MpnMRegisterImpl_22pCii_v_;
+text: .text%__1cZInterpreterMacroAssemblerZcheck_and_handle_popframe6MpnMRegisterImpl__v_;
+text: .text%__1cJAssemblerDjcc6Mn0AJCondition_pCnJrelocInfoJrelocType__v_;
+text: .text%__1cTAbstractInterpreterSBasicType_as_index6FnJBasicType__i_;
+text: .text%__1cbCAbstractInterpreterGeneratorbBgenerate_result_handler_for6MnJBasicType__pC_;
+text: .text%__1cOMacroAssemblerGc2bool6MpnMRegisterImpl__v_;
+text: .text%__1cJAssemblerEsetb6Mn0AJCondition_pnMRegisterImpl__v_;
+text: .text%__1cOMacroAssemblerQsign_extend_byte6MpnMRegisterImpl__v_;
+text: .text%__1cJAssemblerGmovsxb6MpnMRegisterImpl_2_v_;
+text: .text%__1cOMacroAssemblerRsign_extend_short6MpnMRegisterImpl__v_;
+text: .text%__1cJAssemblerGmovsxw6MpnMRegisterImpl_2_v_;
+text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorEtemp6F_pnMRegisterImpl__;
+text: .text%__1cZInterpreterMacroAssemblerDpop6MnITosState__v_;
+text: .text%__1cbCAbstractInterpreterGeneratorbFgenerate_slow_signature_handler6M_pC_;
+text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC222i_v_;
+text: .text%__1cTAbstractInterpreterRTosState_as_index6FnITosState__i_;
+text: .text%__1cTAbstractInterpreterMreturn_entry6FnITosState_i_pC_;
+text: .text%__1cKEntryPointFentry6kMnITosState__pC_;
+text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_continuation_for6MnITosState__pC_;
+text: .text%__1cbCAbstractInterpreterGeneratorZgenerate_safept_entry_for6MnITosState_pC_2_;
+text: .text%__1cZInterpreterMacroAssemblerEpush6MnITosState__v_;
+text: .text%__1cZInterpreterMacroAssemblerMdispatch_via6MnITosState_ppC_v_;
+text: .text%__1cbCAbstractInterpreterGeneratorYgenerate_throw_exception6M_v_;
+text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC2i_v_;
+text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_i_v_;
+text: .text%__1cZInterpreterMacroAssemblerSsuper_call_VM_leaf6MpCpnMRegisterImpl__v_;
+text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_2_v_;
+text: .text%__1cZInterpreterMacroAssemblerSsuper_call_VM_leaf6MpCpnMRegisterImpl_33_v_;
+text: .text%__1cZInterpreterMacroAssemblerRremove_activation6MnITosState_pnMRegisterImpl_iii_v_;
+text: .text%__1cJAssemblerFtestl6MpnMRegisterImpl_i_v_;
+text: .text%__1cZInterpreterMacroAssemblerNunlock_object6MpnMRegisterImpl__v_;
+text: .text%__1cJAssemblerElock6M_v_;
+text: .text%__1cJAssemblerHcmpxchg6MpnMRegisterImpl_nHAddress__v_;
+text: .text%__1cHAddress2t6MinJrelocInfoJrelocType__v_;
+text: .text%__1cOMacroAssemblerFleave6M_v_;
+text: .text%__1cbCAbstractInterpreterGeneratorbMgenerate_ArrayIndexOutOfBounds_handler6Mpkc_pC_;
+text: .text%__1cOMacroAssemblerHcall_VM6MpnMRegisterImpl_pC22i_v_;
+text: .text%__1cbCAbstractInterpreterGeneratorbHgenerate_exception_handler_common6Mpkc2i_pC_;
+text: .text%__1cbCAbstractInterpreterGeneratorbJgenerate_StackOverflowError_handler6M_pC_;
+text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_method_entry6MnTAbstractInterpreterKMethodKind__pC_;
+text: .text%__1cUInterpreterGeneratorbEgenerate_asm_interpreter_entry6Mi_pC_;
+text: .text%__1cUInterpreterGeneratorXcheck_for_compiled_code6MrnFLabel__v_;
+text: .text%__1cUInterpreterGeneratorbDgenerate_stack_overflow_check6M_v_;
+text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_nHAddress__v_;
+text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_nHAddress__v_;
+text: .text%__1cUInterpreterGeneratorUgenerate_fixed_frame6Mi_v_;
+text: .text%__1cUInterpreterGeneratorVgenerate_counter_incr6MpnFLabel_22_v_;
+text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_2_v_;
+text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_nHAddress__v_;
+text: .text%__1cbCAbstractInterpreterGeneratorXbang_stack_shadow_pages6Mi_v_;
+text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: interp_masm_x86.o;
+text: .text%__1cZInterpreterMacroAssemblerTnotify_method_entry6M_v_;
+text: .text%__1cUInterpreterGeneratorZgenerate_counter_overflow6MpC_v_;
+text: .text%__1cJAssemblerEnegl6MpnMRegisterImpl__v_;
+text: .text%__1cUInterpreterGeneratorbAgenerate_run_compiled_code6M_v_;
+text: .text%__1cZInterpreterMacroAssemblerNsuper_call_VM6MpnMRegisterImpl_2pC22_v_;
+text: .text%__1cUInterpreterGeneratorLlock_method6M_v_;
+text: .text%__1cZInterpreterMacroAssemblerLlock_object6MpnMRegisterImpl__v_;
+text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_nHAddress__v_;
+text: .text%__1cUInterpreterGeneratorUgenerate_empty_entry6M_pC_;
+text: .text%__1cUInterpreterGeneratorXgenerate_accessor_entry6M_pC_;
+text: .text%__1cJAssemblerEshrl6MpnMRegisterImpl_i_v_;
+text: .text%__1cLlog2_intptr6Fi_i_: interpreter_x86.o;
+text: .text%__1cOMacroAssemblerQload_signed_byte6MpnMRegisterImpl_nHAddress__i_;
+text: .text%__1cJAssemblerGmovsxb6MpnMRegisterImpl_nHAddress__v_;
+text: .text%__1cOMacroAssemblerQload_signed_word6MpnMRegisterImpl_nHAddress__i_;
+text: .text%__1cJAssemblerGmovsxw6MpnMRegisterImpl_nHAddress__v_;
+text: .text%__1cUInterpreterGeneratorXgenerate_abstract_entry6M_pC_;
+text: .text%__1cUInterpreterGeneratorTgenerate_math_entry6MnTAbstractInterpreterKMethodKind__pC_;
+text: .text%__1cOMacroAssemblerGsincos6Miii_v_;
+text: .text%__1cJAssemblerFfld_s6Mi_v_;
+text: .text%__1cJAssemblerEfabs6M_v_;
+text: .text%__1cOMacroAssemblerEfcmp6MpnMRegisterImpl__v_;
+text: .text%__1cJAssemblerHfucomip6Mi_v_;
+text: .text%__1cOMacroAssemblerEfpop6M_v_;
+text: .text%__1cJAssemblerHfincstp6M_v_;
+text: .text%__1cJAssemblerEfsin6M_v_;
+text: .text%__1cJAssemblerEfcos6M_v_;
+text: .text%__1cJAssemblerFfsqrt6M_v_;
+text: .text%__1cUInterpreterGeneratorVgenerate_native_entry6Mi_pC_;
+text: .text%__1cNSharedRuntimebWnative_method_throw_unsatisfied_link_error_entry6F_pC_;
+text: .text%__1cJAssemblerGmembar6M_v_;
+text: .text%__1cJAssemblerEaddl6MnHAddress_i_v_;
+text: .text%__1cJAssemblerSemit_arith_operand6MipnMRegisterImpl_nHAddress_i_v_;
+text: .text%__1cZInterpreterMacroAssemblerSnotify_method_exit6MnITosState__v_;
+text: .text%__1cbCAbstractInterpreterGeneratorbEset_entry_points_for_all_bytes6M_v_;
+text: .text%__1cbCAbstractInterpreterGeneratorQset_entry_points6MnJBytecodesECode__v_;
+text: .text%__1cbCAbstractInterpreterGeneratorWset_short_entry_points6MpnITemplate_rpC44444444_v_;
+text: .text%__1cbCAbstractInterpreterGeneratorVset_vtos_entry_points6MpnITemplate_rpC44444444_v_;
+text: .text%__1cbCAbstractInterpreterGeneratorVgenerate_and_dispatch6MpnITemplate_nITosState__v_;
+text: .text%__1cITemplateIbytecode6kM_nJBytecodesECode__;
+text: .text%__1cZInterpreterMacroAssemblerPdispatch_prolog6MnITosState_i_v_;
+text: .text%__1cITemplateIgenerate6MpnZInterpreterMacroAssembler__v_;
+text: .text%__1cNTemplateTableDnop6F_v_;
+text: .text%__1cNTemplateTableKtransition6FnITosState_1_v_;
+text: .text%__1cZInterpreterMacroAssemblerPdispatch_epilog6MnITosState_i_v_;
+text: .text%__1cNDispatchTableJset_entry6MirnKEntryPoint__v_;
+text: .text%__1cNTemplateTableLaconst_null6F_v_;
+text: .text%__1cNTemplateTableGiconst6Fi_v_;
+text: .text%__1cNTemplateTableGlconst6Fi_v_;
+text: .text%__1cNTemplateTableGfconst6Fi_v_;
+text: .text%__1cJAssemblerEfldz6M_v_;
+text: .text%__1cJAssemblerEfld16M_v_;
+text: .text%__1cJAssemblerFfaddp6Mi_v_;
+text: .text%__1cNTemplateTableGdconst6Fi_v_;
+text: .text%__1cNTemplateTableGbipush6F_v_;
+text: .text%__1cNTemplateTableGsipush6F_v_;
+text: .text%__1cJAssemblerFbswap6MpnMRegisterImpl__v_;
+text: .text%__1cJAssemblerEsarl6MpnMRegisterImpl_i_v_;
+text: .text%__1cNTemplateTableDldc6Fi_v_;
+text: .text%__1cJAssemblerEmovb6MpnMRegisterImpl_nHAddress__v_;
+text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC2_v_;
+text: .text%__1cJAssemblerFfld_s6MnHAddress__v_;
+text: .text%__1cZInterpreterMacroAssemblerbGget_unsigned_2_byte_index_at_bcp6MpnMRegisterImpl_i_v_;
+text: .text%__1cNTemplateTableGldc2_w6F_v_;
+text: .text%__1cJAssemblerEcmpb6MnHAddress_i_v_;
+text: .text%__1cNTemplateTableFiload6F_v_;
+text: .text%__1cNTemplateTableMlocals_index6FpnMRegisterImpl_i_v_;
+text: .text%__1cbCAbstractInterpreterGeneratorUset_wide_entry_point6MpnITemplate_rpC_v_;
+text: .text%__1cNTemplateTableKwide_iload6F_v_;
+text: .text%__1cNTemplateTableRlocals_index_wide6FpnMRegisterImpl__v_;
+text: .text%__1cNTemplateTableFlload6F_v_;
+text: .text%__1cNTemplateTableKwide_lload6F_v_;
+text: .text%__1cNTemplateTableFfload6F_v_;
+text: .text%__1cNTemplateTableKwide_fload6F_v_;
+text: .text%__1cNTemplateTableFdload6F_v_;
+text: .text%__1cNTemplateTableKwide_dload6F_v_;
+text: .text%__1cNTemplateTableFaload6F_v_;
+text: .text%__1cNTemplateTableKwide_aload6F_v_;
+text: .text%__1cNTemplateTableFiload6Fi_v_;
+text: .text%__1cNTemplateTableFlload6Fi_v_;
+text: .text%__1cNTemplateTableFfload6Fi_v_;
+text: .text%__1cNTemplateTableFdload6Fi_v_;
+text: .text%__1cNTemplateTableHaload_06F_v_;
+text: .text%__1cNTemplateTableFaload6Fi_v_;
+text: .text%__1cNTemplateTableGiaload6F_v_;
+text: .text%__1cNTemplateTableLindex_check6FpnMRegisterImpl_2_v_;
+text: .text%__1cOMacroAssemblerKnull_check6MpnMRegisterImpl_i_v_;
+text: .text%__1cOMacroAssemblerZneeds_explicit_null_check6Fi_i_;
+text: .text%__1cNTemplateTableGlaload6F_v_;
+text: .text%__1cNTemplateTableGfaload6F_v_;
+text: .text%__1cNTemplateTableGdaload6F_v_;
+text: .text%__1cNTemplateTableGaaload6F_v_;
+text: .text%__1cNTemplateTableGbaload6F_v_;
+text: .text%__1cNTemplateTableGcaload6F_v_;
+text: .text%__1cNTemplateTableGsaload6F_v_;
+text: .text%__1cNTemplateTableGistore6F_v_;
+text: .text%__1cNTemplateTableLwide_istore6F_v_;
+text: .text%__1cNTemplateTableGlstore6F_v_;
+text: .text%__1cNTemplateTableLwide_lstore6F_v_;
+text: .text%__1cNTemplateTableGfstore6F_v_;
+text: .text%__1cNTemplateTableLwide_fstore6F_v_;
+text: .text%__1cNTemplateTableGdstore6F_v_;
+text: .text%__1cNTemplateTableLwide_dstore6F_v_;
+text: .text%__1cNTemplateTableGastore6F_v_;
+text: .text%__1cNTemplateTableLwide_astore6F_v_;
+text: .text%__1cNTemplateTableGistore6Fi_v_;
+text: .text%__1cNTemplateTableGlstore6Fi_v_;
+text: .text%__1cNTemplateTableGfstore6Fi_v_;
+text: .text%__1cNTemplateTableGdstore6Fi_v_;
+text: .text%__1cNTemplateTableGastore6Fi_v_;
+text: .text%__1cNTemplateTableHiastore6F_v_;
+text: .text%__1cNTemplateTableHlastore6F_v_;
+text: .text%__1cNTemplateTableHfastore6F_v_;
+text: .text%__1cNTemplateTableHdastore6F_v_;
+text: .text%__1cNTemplateTableHaastore6F_v_;
+text: .text%__1cZInterpreterMacroAssemblerRprofile_checkcast6MipnMRegisterImpl__v_;
+text: .text%__1cZInterpreterMacroAssemblerRgen_subtype_check6MpnMRegisterImpl_rnFLabel__v_;
+text: .text%__1cJAssemblerKrepne_scan6M_v_;
+text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl__v_;
+text: .text%__1cOMacroAssemblerSstore_check_part_16MpnMRegisterImpl__v_;
+text: .text%__1cOMacroAssemblerSstore_check_part_26MpnMRegisterImpl__v_;
+text: .text%__1cJAssemblerEmovb6MnHAddress_i_v_;
+text: .text%__1cNTemplateTableHbastore6F_v_;
+text: .text%__1cJAssemblerEmovb6MnHAddress_pnMRegisterImpl__v_;
+text: .text%__1cNTemplateTableHcastore6F_v_;
+text: .text%__1cJAssemblerEmovw6MnHAddress_pnMRegisterImpl__v_;
+text: .text%__1cNTemplateTableHsastore6F_v_;
+text: .text%__1cNTemplateTableDpop6F_v_;
+text: .text%__1cNTemplateTableEpop26F_v_;
+text: .text%__1cNTemplateTableDdup6F_v_;
+text: .text%__1cJAssemblerFpushl6MnHAddress__v_;
+text: .text%__1cNTemplateTableGdup_x16F_v_;
+text: .text%__1cNTemplateTableGdup_x26F_v_;
+text: .text%__1cNTemplateTableEdup26F_v_;
+text: .text%__1cNTemplateTableHdup2_x16F_v_;
+text: .text%__1cNTemplateTableHdup2_x26F_v_;
+text: .text%__1cNTemplateTableEswap6F_v_;
+text: .text%__1cNTemplateTableEiop26Fn0AJOperation__v_;
+text: .text%__1cNTemplateTableElop26Fn0AJOperation__v_;
+text: .text%__1cJAssemblerEadcl6MpnMRegisterImpl_2_v_;
+text: .text%__1cNTemplateTableEfop26Fn0AJOperation__v_;
+text: .text%__1cJAssemblerGfadd_s6MnHAddress__v_;
+text: .text%__1cZInterpreterMacroAssemblerGf2ieee6M_v_;
+text: .text%__1cNTemplateTableEdop26Fn0AJOperation__v_;
+text: .text%__1cJAssemblerGfadd_d6MnHAddress__v_;
+text: .text%__1cZInterpreterMacroAssemblerGd2ieee6M_v_;
+text: .text%__1cJAssemblerEsbbl6MpnMRegisterImpl_2_v_;
+text: .text%__1cJAssemblerHfsubr_s6MnHAddress__v_;
+text: .text%__1cJAssemblerHfsubr_d6MnHAddress__v_;
+text: .text%__1cJAssemblerFimull6MpnMRegisterImpl_2_v_;
+text: .text%__1cNTemplateTableElmul6F_v_;
+text: .text%__1cOMacroAssemblerElmul6Mii_v_;
+text: .text%__1cJAssemblerEmull6MnHAddress__v_;
+text: .text%__1cJAssemblerEmull6MpnMRegisterImpl__v_;
+text: .text%__1cJAssemblerGfmul_s6MnHAddress__v_;
+text: .text%__1cJAssemblerGfmul_d6MnHAddress__v_;
+text: .text%__1cJAssemblerFfld_x6MnHAddress__v_;
+text: .text%__1cJAssemblerFfmulp6Mi_v_;
+text: .text%__1cNTemplateTableEidiv6F_v_;
+text: .text%__1cOMacroAssemblerPcorrected_idivl6MpnMRegisterImpl__i_;
+text: .text%__1cJAssemblerEcdql6M_v_;
+text: .text%__1cJAssemblerFidivl6MpnMRegisterImpl__v_;
+text: .text%__1cNTemplateTableEldiv6F_v_;
+text: .text%__1cZInterpreterMacroAssemblerRcall_VM_leaf_base6MpCi_v_;
+text: .text%__1cJAssemblerHfdivr_s6MnHAddress__v_;
+text: .text%__1cJAssemblerHfdivr_d6MnHAddress__v_;
+text: .text%__1cJAssemblerGfdivrp6Mi_v_;
+text: .text%__1cNTemplateTableEirem6F_v_;
+text: .text%__1cNTemplateTableElrem6F_v_;
+text: .text%__1cOMacroAssemblerFfremr6MpnMRegisterImpl__v_;
+text: .text%__1cOMacroAssemblerIsave_eax6MpnMRegisterImpl__v_;
+text: .text%__1cJAssemblerFfprem6M_v_;
+text: .text%__1cJAssemblerJfnstsw_ax6M_v_;
+text: .text%__1cJAssemblerEsahf6M_v_;
+text: .text%__1cOMacroAssemblerLrestore_eax6MpnMRegisterImpl__v_;
+text: .text%__1cJAssemblerEfxch6Mi_v_;
+text: .text%__1cNTemplateTableEineg6F_v_;
+text: .text%__1cNTemplateTableElneg6F_v_;
+text: .text%__1cOMacroAssemblerElneg6MpnMRegisterImpl_2_v_;
+text: .text%__1cJAssemblerEadcl6MpnMRegisterImpl_i_v_;
+text: .text%__1cNTemplateTableEfneg6F_v_;
+text: .text%__1cJAssemblerEfchs6M_v_;
+text: .text%__1cNTemplateTableEdneg6F_v_;
+text: .text%__1cJAssemblerEshll6MpnMRegisterImpl__v_;
+text: .text%__1cNTemplateTableElshl6F_v_;
+text: .text%__1cOMacroAssemblerElshl6MpnMRegisterImpl_2_v_;
+text: .text%__1cJAssemblerFshldl6MpnMRegisterImpl_2_v_;
+text: .text%__1cJAssemblerEsarl6MpnMRegisterImpl__v_;
+text: .text%__1cNTemplateTableElshr6F_v_;
+text: .text%__1cOMacroAssemblerElshr6MpnMRegisterImpl_2i_v_;
+text: .text%__1cJAssemblerFshrdl6MpnMRegisterImpl_2_v_;
+text: .text%__1cJAssemblerEshrl6MpnMRegisterImpl__v_;
+text: .text%__1cNTemplateTableFlushr6F_v_;
+text: .text%__1cJAssemblerEandl6MpnMRegisterImpl_2_v_;
+text: .text%__1cNTemplateTableEiinc6F_v_;
+text: .text%__1cJAssemblerEaddl6MnHAddress_pnMRegisterImpl__v_;
+text: .text%__1cNTemplateTableJwide_iinc6F_v_;
+text: .text%__1cNTemplateTableHconvert6F_v_;
+text: .text%__1cOMacroAssemblerLextend_sign6MpnMRegisterImpl_2_v_;
+text: .text%__1cJAssemblerGfild_s6MnHAddress__v_;
+text: .text%__1cJAssemblerGfild_d6MnHAddress__v_;
+text: .text%__1cNTemplateTableElcmp6F_v_;
+text: .text%__1cOMacroAssemblerIlcmp2int6MpnMRegisterImpl_222_v_;
+text: .text%__1cNTemplateTableJfloat_cmp6Fi_v_;
+text: .text%__1cNTemplateTableJfloat_cmp6Fii_v_;
+text: .text%__1cOMacroAssemblerIfcmp2int6MpnMRegisterImpl_i_v_;
+text: .text%__1cNTemplateTableKdouble_cmp6Fi_v_;
+text: .text%__1cNTemplateTableHif_0cmp6Fn0AJCondition__v_;
+text: .text%__1cFj_not6FnNTemplateTableJCondition__nJAssemblerJCondition__: templateTable_x86.o;
+text: .text%__1cNTemplateTableGbranch6Fii_v_;
+text: .text%__1cZInterpreterMacroAssemblerUprofile_taken_branch6MpnMRegisterImpl_2_v_;
+text: .text%__1cZInterpreterMacroAssemblerNdispatch_only6MnITosState__v_;
+text: .text%__1cZInterpreterMacroAssemblerYprofile_not_taken_branch6MpnMRegisterImpl__v_;
+text: .text%__1cNTemplateTableHif_icmp6Fn0AJCondition__v_;
+text: .text%__1cNTemplateTableHif_acmp6Fn0AJCondition__v_;
+text: .text%__1cNTemplateTableF_goto6F_v_;
+text: .text%__1cNTemplateTableDjsr6F_v_;
+text: .text%__1cZInterpreterMacroAssemblerWdispatch_only_noverify6MnITosState__v_;
+text: .text%__1cNTemplateTableDret6F_v_;
+text: .text%__1cZInterpreterMacroAssemblerLprofile_ret6MpnMRegisterImpl_2_v_;
+text: .text%__1cNTemplateTableIwide_ret6F_v_;
+text: .text%__1cNTemplateTableLtableswitch6F_v_;
+text: .text%__1cZInterpreterMacroAssemblerTprofile_switch_case6MpnMRegisterImpl_22_v_;
+text: .text%__1cZInterpreterMacroAssemblerWprofile_switch_default6MpnMRegisterImpl__v_;
+text: .text%__1cNTemplateTableMlookupswitch6F_v_;
+text: .text%__1cNTemplateTableH_return6FnITosState__v_;
+text: .text%__1cNTemplateTableJgetstatic6Fi_v_;
+text: .text%__1cNTemplateTableSgetfield_or_static6Fii_v_;
+text: .text%__1cNTemplateTableZload_field_cp_cache_entry6FipnMRegisterImpl_22i_v_;
+text: .text%__1cNTemplateTableXresolve_cache_and_index6FipnMRegisterImpl_2_v_;
+text: .text%__1cJAssemblerHfistp_d6MnHAddress__v_;
+text: .text%__1cNTemplateTableJputstatic6Fi_v_;
+text: .text%__1cNTemplateTableSputfield_or_static6Fii_v_;
+text: .text%__1cNTemplateTableUjvmti_post_field_mod6Fii_v_;
+text: .text%__1cOMacroAssemblerLstore_check6MpnMRegisterImpl_nHAddress__v_;
+text: .text%__1cNTemplateTableQvolatile_barrier6F_v_;
+text: .text%__1cNTemplateTableIgetfield6Fi_v_;
+text: .text%__1cNTemplateTableOpatch_bytecode6FnJBytecodesECode_pnMRegisterImpl_4i_v_;
+text: .text%__1cNTemplateTableIputfield6Fi_v_;
+text: .text%__1cNTemplateTableNinvokevirtual6Fi_v_;
+text: .text%__1cNTemplateTableOprepare_invoke6FpnMRegisterImpl_2inJBytecodesECode__v_;
+text: .text%__1cNTemplateTablebAload_invoke_cp_cache_entry6FipnMRegisterImpl_22ii_v_;
+text: .text%__1cNTemplateTableUinvokevirtual_helper6FpnMRegisterImpl_22_v_;
+text: .text%__1cZInterpreterMacroAssemblerSprofile_final_call6MpnMRegisterImpl__v_;
+text: .text%__1cZInterpreterMacroAssemblerUprofile_virtual_call6MpnMRegisterImpl_22_v_;
+text: .text%__1cNTemplateTableNinvokespecial6Fi_v_;
+text: .text%__1cZInterpreterMacroAssemblerMprofile_call6MpnMRegisterImpl__v_;
+text: .text%__1cNTemplateTableMinvokestatic6Fi_v_;
+text: .text%__1cNTemplateTablePinvokeinterface6Fi_v_;
+text: .text%__1cOMacroAssemblerIround_to6MpnMRegisterImpl_i_v_;
+text: .text%__1cbCAbstractInterpreterGeneratorRset_unimplemented6Mi_v_;
+text: .text%__1cNTemplateTableE_new6F_v_;
+text: .text%__1cQGenCollectedHeapItop_addr6kM_ppnIHeapWord__;
+text: .text%__1cQDefNewGenerationItop_addr6kM_ppnIHeapWord__;
+text: .text%__1cQGenCollectedHeapIend_addr6kM_ppnIHeapWord__;
+text: .text%__1cQDefNewGenerationIend_addr6kM_ppnIHeapWord__;
+text: .text%__1cOMacroAssemblerJdecrement6MpnMRegisterImpl_i_v_;
+text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC22_v_;
+text: .text%__1cNTemplateTableInewarray6F_v_;
+text: .text%__1cNTemplateTableJanewarray6F_v_;
+text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC222_v_;
+text: .text%__1cNTemplateTableLarraylength6F_v_;
+text: .text%__1cNTemplateTableGathrow6F_v_;
+text: .text%__1cNTemplateTableJcheckcast6F_v_;
+text: .text%__1cNTemplateTableHcall_VM6FpnMRegisterImpl_pC_v_;
+text: .text%__1cNTemplateTableKinstanceof6F_v_;
+text: .text%__1cNTemplateTableMmonitorenter6F_v_;
+text: .text%__1cJAssemblerFcmovl6Mn0AJCondition_pnMRegisterImpl_3_v_;
+text: .text%__1cNTemplateTableLmonitorexit6F_v_;
+text: .text%__1cNTemplateTableEwide6F_v_;
+text: .text%__1cNTemplateTableOmultianewarray6F_v_;
+text: .text%__1cNTemplateTableKif_nullcmp6Fn0AJCondition__v_;
+text: .text%__1cNTemplateTableGgoto_w6F_v_;
+text: .text%__1cNTemplateTableFjsr_w6F_v_;
+text: .text%__1cNTemplateTableL_breakpoint6F_v_;
+text: .text%__1cZInterpreterMacroAssemblerUdispatch_only_normal6MnITosState__v_;
+text: .text%__1cNTemplateTableQfast_accessfield6FnITosState__v_;
+text: .text%__1cNTemplateTablePfast_storefield6FnITosState__v_;
+text: .text%__1cNTemplateTableZjvmti_post_fast_field_mod6F_v_;
+text: .text%__1cNTemplateTableMfast_xaccess6FnITosState__v_;
+text: .text%__1cNTemplateTableKfast_iload6F_v_;
+text: .text%__1cNTemplateTableLfast_iload26F_v_;
+text: .text%__1cNTemplateTableMfast_icaload6F_v_;
+text: .text%__1cNTemplateTableRfast_invokevfinal6Fi_v_;
+text: .text%__1cNTemplateTableRfast_linearswitch6F_v_;
+text: .text%__1cNTemplateTableRfast_binaryswitch6F_v_;
+text: .text%__1cNTemplateTableSshouldnotreachhere6F_v_;
+text: .text%__1cbCAbstractInterpreterGeneratorbCset_safepoints_for_all_bytes6M_v_;
+text: .text%__1cWinvocationCounter_init6F_v_;
+text: .text%__1cOmarksweep_init6F_v_;
+text: .text%__1cQaccessFlags_init6F_v_;
+text: .text%__1cStemplateTable_init6F_v_;
+text: .text%__1cVInterfaceSupport_init6F_v_;
+text: .text%__1cOuniverse2_init6F_v_;
+text: .text%__1cIUniverseHgenesis6FpnGThread__v_;
+text: .text%__1cIUniverseYcompute_base_vtable_size6F_v_;
+text: .text%__1cLClassLoaderVcompute_Object_vtable6F_i_;
+text: .text%__1cKklassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cFKlassVbase_create_klass_oop6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__pnMklassOopDesc__;
+text: .text%__1cKklassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: klassKlass.o;
+text: .text%__1cKKlass_vtbl2n6FIrnLKlassHandle_ipnGThread__pv_;
+text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: klass.o;
+text: .text%__1cKSharedHeapWpermanent_mem_allocate6MI_pnIHeapWord__: genCollectedHeap.o;
+text: .text%__1cRCompactingPermGenMmem_allocate6MI_pnIHeapWord__;
+text: .text%__1cbCOneContigSpaceCardGenerationIallocate6MIii_pnIHeapWord__: compactingPermGenGen.o;
+text: .text%__1cWOffsetTableContigSpaceIallocate6MI_pnIHeapWord__: space.o;
+text: .text%__1cPContiguousSpaceIallocate6MI_pnIHeapWord__;
+text: .text%__1cbBBlockOffsetArrayContigSpaceLalloc_block6MpnIHeapWord_2_v_: blockOffsetTable.o;
+text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: klass.o;
+text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: klass.o;
+text: .text%__1cRCardTableModRefBSEkind6M_nKBarrierSetEName__: cardTableModRefBS.o;
+text: .text%__1cFKlassMset_subklass6MpnMklassOopDesc__v_;
+text: .text%__1cFKlassQset_next_sibling6MpnMklassOopDesc__v_;
+text: .text%__1cKklassKlassOset_alloc_size6MI_v_: klassKlass.o;
+text: .text%__1cParrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cFKlassRbase_create_klass6FrnLKlassHandle_irknKKlass_vtbl_pnGThread__1_;
+text: .text%__1cParrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: arrayKlassKlass.o;
+text: .text%__1cKklassKlassOset_alloc_size6MI_v_: arrayKlassKlass.o;
+text: .text%__1cPjava_lang_ClassNcreate_mirror6FnLKlassHandle_pnGThread__pnHoopDesc__;
+text: .text%__1cFKlassWcompute_modifier_flags6kMpnGThread__i_;
+text: .text%__1cSobjArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cSobjArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: objArrayKlassKlass.o;
+text: .text%__1cKklassKlassOset_alloc_size6MI_v_: objArrayKlassKlass.o;
+text: .text%__1cSinstanceKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cSinstanceKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceKlassKlass.o;
+text: .text%__1cKklassKlassOset_alloc_size6MI_v_: instanceKlassKlass.o;
+text: .text%__1cTtypeArrayKlassKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cTtypeArrayKlassKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: typeArrayKlassKlass.o;
+text: .text%__1cbBBlockOffsetArrayContigSpaceQalloc_block_work6MpnIHeapWord_2_v_;
+text: .text%__1cKklassKlassOset_alloc_size6MI_v_: typeArrayKlassKlass.o;
+text: .text%__1cLsymbolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cLsymbolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: symbolKlass.o;
+text: .text%__1cLsymbolKlassOset_alloc_size6MI_v_: symbolKlass.o;
+text: .text%__1cKoopFactoryKnew_symbol6FpkcipnGThread__pnNsymbolOopDesc__;
+text: .text%__1cLSymbolTableGlookup6FpkcipnGThread__pnNsymbolOopDesc__;
+text: .text%__1cJHashtableLhash_symbol6Fpkci_I_: symbolTable.o;
+text: .text%__1cLSymbolTableGlookup6MipkciI_pnNsymbolOopDesc__;
+text: .text%__1cLSymbolTableJbasic_add6MipCiIpnGThread__pnNsymbolOopDesc__;
+text: .text%__1cLsymbolKlassPallocate_symbol6MpCipnGThread__pnNsymbolOopDesc__;
+text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: symbolKlass.o;
+text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: symbolKlass.o;
+text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: symbolKlass.o;
+text: .text%__1cJHashtableJnew_entry6MIpnHoopDesc__pnOHashtableEntry__;
+text: .text%__1cOBasicHashtableJnew_entry6MI_pnTBasicHashtableEntry__;
+text: .text%__1cOtypeArrayKlassMcreate_klass6FnJBasicType_ipnGThread__pnMklassOopDesc__;
+text: .text%__1cOtypeArrayKlassNexternal_name6FnJBasicType__pkc_;
+text: .text%__1cKarrayKlassXbase_create_array_klass6FrknKKlass_vtbl_inLKlassHandle_pnGThread__nQarrayKlassHandle__;
+text: .text%__1cOtypeArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: typeArrayKlass.o;
+text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: typeArrayKlass.o;
+text: .text%__1cLAccessFlagsPatomic_set_bits6Mi_v_;
+text: .text%__1cLlog2_intptr6Fi_i_: typeArrayKlass.o;
+text: .text%__1cKarrayKlassbBcomplete_create_array_klass6FnQarrayKlassHandle_nLKlassHandle_pnGThread__v_;
+text: .text%__1cFKlassRinitialize_supers6MpnMklassOopDesc_pnGThread__v_;
+text: .text%__1cKarrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__;
+text: .text%__1cKarrayKlassGvtable6kM_pnLklassVtable__;
+text: .text%__1cLklassVtableRinitialize_vtable6MpnGThread__v_;
+text: .text%__1cKarrayKlassKjava_super6kM_pnMklassOopDesc__;
+text: .text%__1cKarrayKlassWcompute_modifier_flags6kMpnGThread__i_;
+text: .text%__1cLmethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cLmethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: methodKlass.o;
+text: .text%__1cLmethodKlassOset_alloc_size6MI_v_: methodKlass.o;
+text: .text%__1cQconstMethodKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cQconstMethodKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: constMethodKlass.o;
+text: .text%__1cQconstMethodKlassOset_alloc_size6MI_v_: constMethodKlass.o;
+text: .text%__1cPmethodDataKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cPmethodDataKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: methodDataKlass.o;
+text: .text%__1cPmethodDataKlassOset_alloc_size6MI_v_: methodDataKlass.o;
+text: .text%__1cRconstantPoolKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cRconstantPoolKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: constantPoolKlass.o;
+text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: constantPoolKlass.o;
+text: .text%__1cWconstantPoolCacheKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cWconstantPoolCacheKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: cpCacheKlass.o;
+text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: cpCacheKlass.o;
+text: .text%__1cVcompiledICHolderKlassMcreate_klass6FpnGThread__pnMklassOopDesc__;
+text: .text%__1cVcompiledICHolderKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: compiledICHolderKlass.o;
+text: .text%__1cVcompiledICHolderKlassOset_alloc_size6MI_v_: compiledICHolderKlass.o;
+text: .text%__1cSobjArrayKlassKlassbEallocate_system_objArray_klass6MpnGThread__pnMklassOopDesc__;
+text: .text%__1cSobjArrayKlassKlassXallocate_objArray_klass6MinLKlassHandle_pnGThread__pnMklassOopDesc__;
+text: .text%__1cSobjArrayKlassKlassbCallocate_objArray_klass_impl6FnYobjArrayKlassKlassHandle_inLKlassHandle_pnGThread__pnMklassOopDesc__;
+text: .text%__1cFKlassNoop_is_symbol6kM_i_: typeArrayKlass.o;
+text: .text%__1cNsymbolOopDescLas_C_string6kM_pc_;
+text: .text%__1cNsymbolOopDescLas_C_string6kMpci_1_;
+text: .text%__1cFKlassPoop_is_instance6kM_i_: typeArrayKlass.o;
+text: .text%__1cNobjArrayKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: objArrayKlass.o;
+text: .text%__1cKarrayKlassOset_alloc_size6MI_v_: objArrayKlass.o;
+text: .text%__1cFKlassPoop_is_objArray6kM_i_: typeArrayKlass.o;
+text: .text%__1cLlog2_intptr6Fi_i_: objArrayKlassKlass.o;
+text: .text%__1cNobjArrayKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__;
+text: .text%__1cNobjArrayKlassWcompute_modifier_flags6kMpnGThread__i_;
+text: .text%__1cKoopFactoryXnew_permanent_byteArray6FipnGThread__pnQtypeArrayOopDesc__;
+text: .text%__1cOtypeArrayKlassSallocate_permanent6MipnGThread__pnQtypeArrayOopDesc__;
+text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o;
+text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: typeArrayKlass.o;
+text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: typeArrayKlass.o;
+text: .text%__1cKoopFactoryYnew_permanent_shortArray6FipnGThread__pnQtypeArrayOopDesc__;
+text: .text%__1cKoopFactoryWnew_permanent_intArray6FipnGThread__pnQtypeArrayOopDesc__;
+text: .text%__1cKoopFactoryTnew_system_objArray6FipnGThread__pnPobjArrayOopDesc__;
+text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: oopFactory.o;
+text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: oopFactory.o;
+text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: oopFactory.o;
+text: .text%__1cJvmSymbolsKinitialize6FpnGThread__v_;
+text: .text%__1cNsymbolOopDescGequals6kMpkci_i_;
+text: .text%__1cQSystemDictionaryKinitialize6FpnGThread__v_;
+text: .text%__1cKDictionary2t6Mi_v_;
+text: .text%__1cOBasicHashtable2t6Mii_v_: dictionary.o;
+text: .text%__1cQPlaceholderTable2t6Mi_v_;
+text: .text%__1cOBasicHashtable2t6Mii_v_: placeholders.o;
+text: .text%__1cVLoaderConstraintTable2t6Mi_v_;
+text: .text%__1cOBasicHashtable2t6Mii_v_: loaderConstraints.o;
+text: .text%__1cQSystemDictionarybCinitialize_preloaded_classes6FpnGThread__v_;
+text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_ipnGThread__pnMklassOopDesc__;
+text: .text%__1cQSystemDictionaryPresolve_or_fail6FnMsymbolHandle_nGHandle_2ipnGThread__pnMklassOopDesc__;
+text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__;
+text: .text%__1cQSystemDictionarybEresolve_instance_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__;
+text: .text%__1cVjava_lang_ClassLoaderbBnon_reflection_class_loader6FpnHoopDesc__2_;
+text: .text%__1cPTwoOopHashtableMcompute_hash6MnMsymbolHandle_nGHandle__I_: systemDictionary.o;
+text: .text%__1cHoopDescSslow_identity_hash6M_i_;
+text: .text%__1cSObjectSynchronizerXidentity_hash_value_for6FnGHandle__i_;
+text: .text%__1cNget_next_hash6F_i_: synchronizer.o;
+text: .text%__1cCosGrandom6F_l_;
+text: .text%__1cKDictionaryEfind6MiInMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__;
+text: .text%__1cKDictionaryJget_entry6MiInMsymbolHandle_nGHandle__pnPDictionaryEntry__;
+text: .text%__1cQSystemDictionarybAcompute_loader_lock_object6FnGHandle_pnGThread__1_;
+text: .text%__1cMObjectLocker2t6MnGHandle_pnGThread__v_;
+text: .text%__1cSObjectSynchronizerKfast_enter6FnGHandle_pnJBasicLock_pnGThread__v_;
+text: .text%__1cQSystemDictionaryKfind_class6FiInMsymbolHandle_nGHandle__pnMklassOopDesc__;
+text: .text%__1cKDictionaryKfind_class6MiInMsymbolHandle_nGHandle__pnMklassOopDesc__;
+text: .text%__1cQPlaceholderTableKfind_entry6MiInMsymbolHandle_nGHandle__pnNsymbolOopDesc__;
+text: .text%__1cQPlaceholderTableJadd_entry6MiInMsymbolHandle_nGHandle__v_;
+text: .text%__1cQPlaceholderTableJnew_entry6MipnNsymbolOopDesc_pnHoopDesc__pnQPlaceholderEntry__;
+text: .text%__1cQSystemDictionaryTload_instance_class6FnMsymbolHandle_nGHandle_pnGThread__nTinstanceKlassHandle__;
+text: .text%__1cQSystemDictionaryRload_shared_class6FnMsymbolHandle_nGHandle_pnGThread__nTinstanceKlassHandle__;
+text: .text%__1cQSystemDictionaryRfind_shared_class6FnMsymbolHandle__pnMklassOopDesc__;
+text: .text%__1cQSystemDictionaryRload_shared_class6FnTinstanceKlassHandle_nGHandle_pnGThread__1_;
+text: .text%__1cLClassLoaderOload_classfile6FnMsymbolHandle_pnGThread__nTinstanceKlassHandle__;
+text: .text%__1cFVTuneQstart_class_load6F_v_;
+text: .text%__1cJEventMark2t6MpkcE_v_: classLoader.o;
+text: .text%__1cSThreadProfilerMark2t6Mn0AGRegion__v_;
+text: .text%__1cMstringStream2t6MI_v_;
+text: .text%__1cMstringStreamFwrite6MpkcI_v_;
+text: .text%__1cMoutputStreamPupdate_position6MpkcI_v_;
+text: .text%__1cMstringStreamJas_string6M_pc_;
+text: .text%__1cMelapsedTimerFstart6M_v_;
+text: .text%__1cCosPelapsed_counter6F_x_;
+text: .text%__1cRClassPathZipEntryLopen_stream6Mpkc_pnPClassFileStream__;
+text: .text%__1cPClassFileStream2t6MpCipc_v_;
+text: .text%__1cMelapsedTimerEstop6M_v_;
+text: .text%__1cPClassFileParserOparseClassFile6MnMsymbolHandle_nGHandle_2r1pnGThread__nTinstanceKlassHandle__;
+text: .text%__1cIVerifierRshould_verify_for6FpnHoopDesc__i_;
+text: .text%__1cPClassFileStreamGget_u46MpnGThread__I_;
+text: .text%__1cPClassFileStreamGget_u26MpnGThread__H_;
+text: .text%__1cIVerifierQrelax_verify_for6FpnHoopDesc__i_;
+text: .text%__1cVjava_lang_ClassLoaderRis_trusted_loader6FpnHoopDesc__i_;
+text: .text%__1cQSystemDictionarySjava_system_loader6F_pnHoopDesc__;
+text: .text%__1cPClassFileParserTparse_constant_pool6MpnGThread__nSconstantPoolHandle__;
+text: .text%__1cPClassFileParserOcheck_property6MipkcipnGThread__v_;
+text: .text%__1cKoopFactoryQnew_constantPool6FipnGThread__pnTconstantPoolOopDesc__;
+text: .text%__1cRconstantPoolKlassIallocate6MipnGThread__pnTconstantPoolOopDesc__;
+text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: constantPoolKlass.o;
+text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: constantPoolKlass.o;
+text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: constantPoolKlass.o;
+text: .text%__1cPClassFileParserbBparse_constant_pool_entries6MnSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cPClassFileStreamGget_u16MpnGThread__C_;
+text: .text%__1cPClassFileParserbFparse_constant_pool_class_entry6MnSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cPClassFileParserbJparse_constant_pool_methodref_entry6MnSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cPClassFileParserbGparse_constant_pool_string_entry6MnSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cPClassFileParserbHparse_constant_pool_integer_entry6MnSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cPClassFileParserbEparse_constant_pool_utf8_entry6MnSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cPClassFileParserRverify_legal_utf86MpkCipnGThread__v_;
+text: .text%__1cPClassFileStreamHskip_u16MipnGThread__v_;
+text: .text%__1cPClassFileParserbLparse_constant_pool_nameandtype_entry6MnSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cTconstantPoolOopDescSklass_ref_index_at6Mi_i_;
+text: .text%__1cTconstantPoolOopDescbAname_and_type_ref_index_at6Mi_i_;
+text: .text%__1cTconstantPoolOopDescRname_ref_index_at6Mi_i_;
+text: .text%__1cTconstantPoolOopDescWsignature_ref_index_at6Mi_i_;
+text: .text%__1cPClassFileParserbCverify_legal_class_modifiers6MipnGThread__v_;
+text: .text%__1cPClassFileParserQparse_interfaces6MnSconstantPoolHandle_nGHandle_2pnGThread__nOobjArrayHandle__;
+text: .text%__1cPClassFileParserMparse_fields6MnSconstantPoolHandle_ipnUFieldAllocationCount_pnOobjArrayHandle_pnGThread__nPtypeArrayHandle__;
+text: .text%__1cUinitialize_hashtable6FppnLNameSigHash__v_;
+text: .text%__1cPclear_hashtable6FppnLNameSigHash__v_;
+text: .text%__1cPClassFileParserNparse_methods6MnSconstantPoolHandle_ipnLAccessFlags_ppnPobjArrayOopDesc_66pnGThread__nOobjArrayHandle__;
+text: .text%__1cPClassFileParserMparse_method6MnSconstantPoolHandle_ipnLAccessFlags_pnPtypeArrayHandle_55pnGThread__nMmethodHandle__;
+text: .text%__1cPClassFileParserYverify_legal_method_name6MnMsymbolHandle_pnGThread__v_;
+text: .text%__1cPClassFileParserbDverify_legal_method_modifiers6MiinMsymbolHandle_pnGThread__v_;
+text: .text%__1cPClassFileParserWparse_linenumber_table6MIIpipnGThread__pC_;
+text: .text%__1cbFCompressedLineNumberWriteStream2t6Mi_v_;
+text: .text%__1cVCompressedWriteStream2t6Mi_v_;
+text: .text%__1cQCompressedStream2t6MpCi_v_;
+text: .text%__1cbFCompressedLineNumberWriteStreamKwrite_pair6Mii_v_;
+text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: methodOop.o;
+text: .text%__1cKoopFactoryKnew_method6FinLAccessFlags_iiipnGThread__pnNmethodOopDesc__;
+text: .text%__1cKoopFactoryPnew_constMethod6FiiiipnGThread__pnSconstMethodOopDesc__;
+text: .text%__1cQconstMethodKlassIallocate6MiiiipnGThread__pnSconstMethodOopDesc__;
+text: .text%__1cSconstMethodOopDescLobject_size6Fiiii_i_;
+text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: constMethodKlass.o;
+text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: constMethodKlass.o;
+text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: constMethodKlass.o;
+text: .text%__1cSconstMethodOopDescZset_inlined_tables_length6Miii_v_;
+text: .text%__1cLmethodKlassIallocate6MnRconstMethodHandle_nLAccessFlags_pnGThread__pnNmethodOopDesc__;
+text: .text%__1cNmethodOopDescLobject_size6Fi_i_;
+text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: methodKlass.o;
+text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: methodKlass.o;
+text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: methodKlass.o;
+text: .text%__1cNmethodOopDescJinit_code6M_v_;
+text: .text%__1cRInvocationCounterEinit6M_v_;
+text: .text%__1cRInvocationCounterFreset6M_v_;
+text: .text%__1cRInvocationCounterJset_state6Mn0AFState__v_;
+text: .text%__1cLmethodKlassIoop_size6kMpnHoopDesc__i_;
+text: .text%__1cNmethodOopDescbAcompute_size_of_parameters6MpnGThread__v_;
+text: .text%__1cRSignatureIterator2t6MnMsymbolHandle__v_;
+text: .text%__1cRSignatureIteratorSiterate_parameters6M_v_;
+text: .text%__1cRSignatureIteratorGexpect6Mc_v_;
+text: .text%__1cSconstMethodOopDescbBcompressed_linenumber_table6kM_pC_;
+text: .text%__1cPClassFileParserUassemble_annotations6MpCi1ipnGThread__nPtypeArrayHandle__;
+text: .text%__1cNmethodOopDescVclear_native_function6M_v_;
+text: .text%__1cNmethodOopDescTset_native_function6MpC_v_;
+text: .text%__1cNmethodOopDescVset_signature_handler6MpC_v_;
+text: .text%__1cVCompressedWriteStreamEgrow6M_v_;
+text: .text%__1cRSignatureIteratorKparse_type6M_i_;
+text: .text%__1cNSignatureInfoJdo_object6Mii_v_: frame.o;
+text: .text%__1cUArgumentSizeComputerDset6MinJBasicType__v_: frame.o;
+text: .text%__1cPClassFileParserYparse_checked_exceptions6MpHInSconstantPoolHandle_pnGThread__1_;
+text: .text%__1cPClassFileStreamHskip_u26MipnGThread__v_;
+text: .text%__1cSconstMethodOopDescbEchecked_exceptions_length_addr6kM_pH_;
+text: .text%__1cSconstMethodOopDescYchecked_exceptions_start6kM_pnXCheckedExceptionElement__;
+text: .text%__1cXcopy_u2_with_conversion6FpH0i_v_: classFileParser.o;
+text: .text%__1cNSignatureInfoHdo_long6M_v_: frame.o;
+text: .text%__1cNSignatureInfoGdo_int6M_v_: frame.o;
+text: .text%__1cPClassFileParserbDcompute_transitive_interfaces6MnTinstanceKlassHandle_nOobjArrayHandle_pnGThread__2_;
+text: .text%__1cPClassFileParserMsort_methods6MnOobjArrayHandle_111pnGThread__nPtypeArrayHandle__;
+text: .text%__1cNmethodOopDescMsort_methods6FpnPobjArrayOopDesc_222_v_;
+text: .text%method_compare: methodOop.o;
+text: .text%__1cLklassVtablebKcompute_vtable_size_and_num_mirandas6Fri1pnMklassOopDesc_pnPobjArrayOopDesc_nLAccessFlags_pnHoopDesc_pnNsymbolOopDesc_5_v_;
+text: .text%__1cLklassVtableWneeds_new_vtable_entry6FpnNmethodOopDesc_pnMklassOopDesc_pnHoopDesc_pnNsymbolOopDesc_nLAccessFlags__i_;
+text: .text%__1cLklassVtableQget_num_mirandas6FpnMklassOopDesc_pnPobjArrayOopDesc_4_i_;
+text: .text%__1cLklassVtableMget_mirandas6FpnNGrowableArray4CpnNmethodOopDesc___pnMklassOopDesc_pnPobjArrayOopDesc_8_v_;
+text: .text%__1cLklassItableTcompute_itable_size6FnOobjArrayHandle__i_;
+text: .text%__1cUvisit_all_interfaces6FpnPobjArrayOopDesc_pnXInterfaceVisiterClosure__v_;
+text: .text%__1cPClassFileParserUcompute_oop_map_size6MnTinstanceKlassHandle_ii_i_;
+text: .text%__1cKoopFactoryRnew_instanceKlass6FiiiinNReferenceType_pnGThread__pnMklassOopDesc__;
+text: .text%__1cSinstanceKlassKlassXallocate_instance_klass6MiiiinNReferenceType_pnGThread__pnMklassOopDesc__;
+text: .text%__1cNinstanceKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceKlass.o;
+text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_: instanceKlass.o;
+text: .text%__1cNinstanceKlassQinit_implementor6M_v_;
+text: .text%__1cNinstanceKlassWcompute_modifier_flags6kMpnGThread__i_;
+text: .text%__1cTconstantPoolOopDescNklass_name_at6Mi_pnNsymbolOopDesc__;
+text: .text%__1cFKlassMoop_is_klass6kM_i_: symbolKlass.o;
+text: .text%__1cPClassFileParserbAparse_classfile_attributes6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cPClassFileParserbKparse_classfile_sourcefile_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cNinstanceKlassWdo_local_static_fields6MpFpnPfieldDescriptor_pnGThread__v4_v_;
+text: .text%__1cNinstanceKlassbBdo_local_static_fields_impl6FnTinstanceKlassHandle_pFpnPfieldDescriptor_pnGThread__v5_v_;
+text: .text%__1cNinstanceKlassYcompute_secondary_supers6MipnGThread__pnPobjArrayOopDesc__;
+text: .text%__1cLklassItableZsetup_itable_offset_table6FnTinstanceKlassHandle__v_;
+text: .text%__1cPClassFileParserNfill_oop_maps6MnTinstanceKlassHandle_ii_v_;
+text: .text%__1cFKlassKsuperklass6kM_pnNinstanceKlass__;
+text: .text%__1cPClassFileParserVset_precomputed_flags6MnTinstanceKlassHandle__v_;
+text: .text%__1cFKlassNlookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__;
+text: .text%__1cNinstanceKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__;
+text: .text%__1cNinstanceKlassLfind_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__;
+text: .text%__1cNinstanceKlassLfind_method6FpnPobjArrayOopDesc_pnNsymbolOopDesc_4_pnNmethodOopDesc__;
+text: .text%__1cNmethodOopDescPis_empty_method6kM_i_;
+text: .text%__1cPClassFileParserYcheck_super_class_access6FnTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cPClassFileParserbCcheck_super_interface_access6FnTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cPClassFileParserbBcheck_final_method_override6FnTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cTClassLoadingServiceTnotify_class_loaded6FpnNinstanceKlass_i_v_;
+text: .text%__1cTClassLoadingServiceScompute_class_size6FpnNinstanceKlass__I_;
+text: .text%__1cSinstanceKlassKlassIoop_size6kMpnHoopDesc__i_;
+text: .text%__1cNinstanceKlassPoop_is_instance6kM_i_: instanceKlass.o;
+text: .text%__1cRconstantPoolKlassIoop_size6kMpnHoopDesc__i_;
+text: .text%__1cLClassLoaderLadd_package6Fpkci_i_;
+text: .text%__1cLClassLoaderOlookup_package6Fpkc_pnLPackageInfo__;
+text: .text%__1cQPackageHashtableMcompute_hash6Mpkci_I_: classLoader.o;
+text: .text%__1cQPackageHashtableJget_entry6MiIpkcI_pnLPackageInfo__: classLoader.o;
+text: .text%__1cMstringStream2T6M_v_;
+text: .text%__1cSThreadProfilerMark2T6M_v_;
+text: .text%__1cFVTuneOend_class_load6F_v_;
+text: .text%__1cQSystemDictionaryVdefine_instance_class6FnTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cQSystemDictionaryRcheck_constraints6FiInTinstanceKlassHandle_nGHandle_pnGThread__v_;
+text: .text%__1cVLoaderConstraintTablePcheck_or_update6MnTinstanceKlassHandle_nGHandle_nMsymbolHandle__pkc_;
+text: .text%__1cVLoaderConstraintTableWfind_loader_constraint6MnMsymbolHandle_nGHandle__ppnVLoaderConstraintEntry__;
+text: .text%__1cQSystemDictionaryQadd_to_hierarchy6FnTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cFKlassWappend_to_sibling_list6M_v_;
+text: .text%__1cNinstanceKlassSprocess_interfaces6MpnGThread__v_;
+text: .text%__1cIUniverseTflush_dependents_on6FnTinstanceKlassHandle__v_;
+text: .text%__1cJCodeCachebKnumber_of_nmethods_with_dependencies6F_i_;
+text: .text%__1cQSystemDictionaryRupdate_dictionary6FiIiInTinstanceKlassHandle_nGHandle_pnGThread__v_;
+text: .text%__1cQSystemDictionaryQfind_placeholder6FiInMsymbolHandle_nGHandle__pnNsymbolOopDesc__;
+text: .text%__1cQPlaceholderTableMremove_entry6MiInMsymbolHandle_nGHandle__v_;
+text: .text%__1cKDictionaryJadd_klass6MnMsymbolHandle_nGHandle_nLKlassHandle__v_;
+text: .text%__1cPTwoOopHashtableMcompute_hash6MnMsymbolHandle_nGHandle__I_: dictionary.o;
+text: .text%__1cKDictionaryJnew_entry6MIpnMklassOopDesc_pnHoopDesc__pnPDictionaryEntry__;
+text: .text%__1cNinstanceKlassQeager_initialize6MpnGThread__v_;
+text: .text%__1cMObjectLocker2T6M_v_;
+text: .text%__1cSObjectSynchronizerJfast_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_;
+text: .text%__1cPClassFileParserbIparse_constant_pool_fieldref_entry6MnSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cPClassFileParserbSparse_constant_pool_interfacemethodref_entry6MnSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cPClassFileParserbEparse_constant_pool_long_entry6MnSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cPClassFileStreamGget_u86MpnGThread__X_;
+text: .text%__1cSObjectSynchronizerKslow_enter6FnGHandle_pnJBasicLock_pnGThread__v_;
+text: .text%__1cKJavaThreadNis_lock_owned6kMpC_i_;
+text: .text%__1cGThreadLis_in_stack6kMpC_i_;
+text: .text%__1cQSystemDictionaryVresolve_super_or_fail6FnMsymbolHandle_1nGHandle_2pnGThread__pnMklassOopDesc__;
+text: .text%__1cNinstanceKlassZcan_be_primary_super_slow6kM_i_;
+text: .text%__1cKReflectionTverify_class_access6FpnMklassOopDesc_2i_i_;
+text: .text%__1cPClassFileParserbBcheck_illegal_static_method6FnTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cPClassFileParserbJparse_classfile_signature_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cPClassFileParserbCverify_legal_field_modifiers6MiipnGThread__v_;
+text: .text%__1cPClassFileParserXverify_legal_field_name6MnMsymbolHandle_pnGThread__v_;
+text: .text%__1cPClassFileParserbCverify_legal_field_signature6MnMsymbolHandle_1pnGThread__v_;
+text: .text%__1cPClassFileParserWparse_field_attributes6MnSconstantPoolHandle_iHpHpi2pnPtypeArrayHandle_pnGThread__v_;
+text: .text%__1cTconstantPoolOopDescbBbasic_type_for_signature_at6Mi_nJBasicType__;
+text: .text%__1cJFieldTypeKbasic_type6FpnNsymbolOopDesc__nJBasicType__;
+text: .text%__1cJchar2type6Fc_nJBasicType__: fieldType.o;
+text: .text%__1cRSignatureIteratorSskip_optional_size6M_v_;
+text: .text%__1cNSignatureInfoIdo_array6Mii_v_: frame.o;
+text: .text%__1cPClassFileParserVparse_exception_table6MIInSconstantPoolHandle_pnGThread__nPtypeArrayHandle__;
+text: .text%__1cNSignatureInfoHdo_bool6M_v_: frame.o;
+text: .text%__1cNSignatureInfoHdo_char6M_v_: frame.o;
+text: .text%__1cNSignatureInfoIdo_float6M_v_: frame.o;
+text: .text%__1cNSignatureInfoJdo_double6M_v_: frame.o;
+text: .text%__1cbDreorder_based_on_method_index6FpnPobjArrayOopDesc_1ppnHoopDesc__v_: methodOop.o;
+text: .text%__1cLklassVtableYadd_new_mirandas_to_list6FpnNGrowableArray4CpnNmethodOopDesc___pnPobjArrayOopDesc_6pnMklassOopDesc__v_;
+text: .text%__1cLklassVtableKis_miranda6FpnNmethodOopDesc_pnPobjArrayOopDesc_pnMklassOopDesc__i_;
+text: .text%__1cWCountInterfacesClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o;
+text: .text%__1cPClassFileParserbNparse_classfile_inner_classes_attribute6MnSconstantPoolHandle_nTinstanceKlassHandle_pnGThread__H_;
+text: .text%__1cPfieldDescriptorKinitialize6MpnMklassOopDesc_i_v_;
+text: .text%__1cXinitialize_static_field6FpnPfieldDescriptor_pnGThread__v_: classFileParser.o;
+text: .text%__1cPfieldDescriptorSlong_initial_value6kM_x_;
+text: .text%__1cFKlassZcan_be_primary_super_slow6kM_i_;
+text: .text%__1cSSetupItableClosureEdoit6MpnMklassOopDesc_i_v_: klassVtable.o;
+text: .text%__1cNmethodOopDescWis_vanilla_constructor6kM_i_;
+text: .text%__1cNinstanceKlassPadd_implementor6MpnMklassOopDesc__v_;
+text: .text%__1cPClassFileParserXjava_lang_Class_fix_pre6MpnOobjArrayHandle_pnUFieldAllocationCount_pnGThread__v_;
+text: .text%__1cPClassFileParserYjava_lang_Class_fix_post6Mpi_v_;
+text: .text%__1cPfieldDescriptorRint_initial_value6kM_i_;
+text: .text%__1cIUniverseNfixup_mirrors6FpnGThread__v_;
+text: .text%__1cKSharedHeapYpermanent_object_iterate6MpnNObjectClosure__v_: genCollectedHeap.o;
+text: .text%__1cHPermGenOobject_iterate6MpnNObjectClosure__v_: permGen.o;
+text: .text%__1cRCompactingPermGenGas_gen6kM_pnKGeneration__: permGen.o;
+text: .text%__1cbCOneContigSpaceCardGenerationOobject_iterate6MpnNObjectClosure__v_;
+text: .text%__1cPContiguousSpaceOobject_iterate6MpnNObjectClosure__v_;
+text: .text%__1cPContiguousSpaceTobject_iterate_from6MnJWaterMark_pnNObjectClosure__v_;
+text: .text%__1cSFixupMirrorClosureJdo_object6MpnHoopDesc__v_: universe.o;
+text: .text%__1cKklassKlassMoop_is_klass6kM_i_: klassKlass.o;
+text: .text%__1cNinstanceKlassbBallocate_permanent_instance6MpnGThread__pnPinstanceOopDesc__;
+text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: instanceKlass.o;
+text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: instanceKlass.o;
+text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: instanceKlass.o;
+text: .text%__1cKklassKlassIoop_size6kMpnHoopDesc__i_;
+text: .text%__1cKklassKlassOklass_oop_size6kM_i_: klassKlass.o;
+text: .text%__1cKklassKlassOklass_oop_size6kM_i_: arrayKlassKlass.o;
+text: .text%__1cSobjArrayKlassKlassOklass_oop_size6kM_i_: objArrayKlassKlass.o;
+text: .text%__1cSinstanceKlassKlassOklass_oop_size6kM_i_: instanceKlassKlass.o;
+text: .text%__1cTtypeArrayKlassKlassOklass_oop_size6kM_i_: typeArrayKlassKlass.o;
+text: .text%__1cLsymbolKlassOklass_oop_size6kM_i_: symbolKlass.o;
+text: .text%__1cLsymbolKlassIoop_size6kMpnHoopDesc__i_;
+text: .text%__1cKklassKlassMoop_is_klass6kM_i_: typeArrayKlassKlass.o;
+text: .text%__1cTtypeArrayKlassKlassIoop_size6kMpnHoopDesc__i_: typeArrayKlassKlass.o;
+text: .text%__1cKarrayKlassLobject_size6kMi_i_;
+text: .text%__1cLmethodKlassOklass_oop_size6kM_i_: methodKlass.o;
+text: .text%__1cQconstMethodKlassOklass_oop_size6kM_i_: constMethodKlass.o;
+text: .text%__1cPmethodDataKlassOklass_oop_size6kM_i_: methodDataKlass.o;
+text: .text%__1cKklassKlassMoop_is_klass6kM_i_: arrayKlassKlass.o;
+text: .text%__1cRconstantPoolKlassOklass_oop_size6kM_i_: constantPoolKlass.o;
+text: .text%__1cWconstantPoolCacheKlassOklass_oop_size6kM_i_: cpCacheKlass.o;
+text: .text%__1cVcompiledICHolderKlassOklass_oop_size6kM_i_: compiledICHolderKlass.o;
+text: .text%__1cKklassKlassMoop_is_klass6kM_i_: objArrayKlassKlass.o;
+text: .text%__1cSobjArrayKlassKlassIoop_size6kMpnHoopDesc__i_: objArrayKlassKlass.o;
+text: .text%__1cFKlassMoop_is_klass6kM_i_: typeArrayKlass.o;
+text: .text%__1cFKlassMoop_is_klass6kM_i_: objArrayKlass.o;
+text: .text%__1cFKlassMoop_is_klass6kM_i_: constantPoolKlass.o;
+text: .text%__1cFKlassMoop_is_klass6kM_i_: constMethodKlass.o;
+text: .text%__1cQconstMethodKlassIoop_size6kMpnHoopDesc__i_;
+text: .text%__1cFKlassMoop_is_klass6kM_i_: methodKlass.o;
+text: .text%__1cSinstanceKlassKlassMoop_is_klass6kM_i_: instanceKlassKlass.o;
+text: .text%__1cFKlassMoop_is_klass6kM_i_: instanceKlass.o;
+text: .text%__1cFKlassXsearch_secondary_supers6kMpnMklassOopDesc__i_;
+text: .text%__1cPClassFileParserbFjava_lang_ref_Reference_fix_pre6MpnPtypeArrayHandle_nSconstantPoolHandle_pnUFieldAllocationCount_pnGThread__v_;
+text: .text%__1cQinstanceRefKlassZupdate_nonstatic_oop_maps6FpnMklassOopDesc__v_;
+text: .text%__1cQinstanceRefKlassSallocate_permanent6kMrnLKlassHandle_ipnGThread__pv_: instanceRefKlass.o;
+text: .text%__1cNinstanceKlassOset_alloc_size6MI_v_: instanceRefKlass.o;
+text: .text%__1cNinstanceKlassPoop_is_instance6kM_i_: instanceRefKlass.o;
+text: .text%__1cKReflectionVis_same_class_package6FpnMklassOopDesc_2_i_;
+text: .text%__1cNinstanceKlassVis_same_class_package6MpnMklassOopDesc__i_;
+text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceRefKlass.o;
+text: .text%__1cNinstanceKlassVis_same_class_package6FpnHoopDesc_pnNsymbolOopDesc_24_i_;
+text: .text%__1cEUTF8Hstrrchr6FpWiW_1_;
+text: .text%__1cEUTF8Fequal6FpWi1i_i_;
+text: .text%__1cPClassFileParserbFparse_constant_pool_float_entry6MnSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cNSignatureInfoHdo_byte6M_v_: frame.o;
+text: .text%__1cNSignatureInfoIdo_short6M_v_: frame.o;
+text: .text%__1cRappend_interfaces6FnOobjArrayHandle_ripnPobjArrayOopDesc__v_;
+text: .text%__1cQSystemDictionaryPresolve_or_null6FnMsymbolHandle_pnGThread__pnMklassOopDesc__;
+text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceKlass.o;
+text: .text%__1cPfieldDescriptorTfloat_initial_value6kM_f_;
+text: .text%__1cPClassFileParserbGparse_constant_pool_double_entry6MnSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cPfieldDescriptorUdouble_initial_value6kM_d_;
+text: .text%__1cQSystemDictionarybDinitialize_basic_type_mirrors6FpnGThread__v_;
+text: .text%__1cPjava_lang_ClassYcreate_basic_type_mirror6FpkcpnGThread__pnHoopDesc__;
+text: .text%__1cNobjArrayKlassZcan_be_primary_super_slow6kM_i_;
+text: .text%__1cXreferenceProcessor_init6F_v_;
+text: .text%__1cSReferenceProcessorMinit_statics6F_v_;
+text: .text%__1cbBjava_lang_ref_SoftReferenceJset_clock6Fx_v_;
+text: .text%__1cQjni_handles_init6F_v_;
+text: .text%__1cKJNIHandlesKinitialize6F_v_;
+text: .text%__1cOvmStructs_init6F_v_;
+text: .text%__1cVverificationType_init6F_v_;
+text: .text%__1cQVerificationTypeKinitialize6F_v_;
+text: .text%__1cOcompiler1_init6F_v_;
+text: .text%__1cKSharedInfoKset_stack06Fi_v_;
+text: .text%__1cKSharedInfoLset_regName6F_v_;
+text: .text%__1cIRegAllocYinit_register_allocation6F_v_;
+text: .text%__1cIFrameMapEinit6F_v_;
+text: .text%__1cKc1_RegMaskKinit_masks6Fi_v_;
+text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_FrameMap_x86.o;
+text: .text%__1cNc1_AllocTableLinit_tables6F_v_;
+text: .text%__1cIFrameMapOfirst_register6F_pnMRegisterImpl__;
+text: .text%__1cIFrameMapLcpu_reg2rnr6FpnMRegisterImpl__i_;
+text: .text%__1cIFrameMapLcpu_rnr2reg6Fi_pnMRegisterImpl__;
+text: .text%__1cIRuntime1Kinitialize6F_v_;
+text: .text%__1cKCodeBufferRinsts_memory_size6Fi_i_;
+text: .text%__1cKCodeBufferQlocs_memory_size6Fi_i_;
+text: .text%__1cIRuntime1Ninitialize_pd6F_v_;
+text: .text%__1cNSharedRuntimeTgenerate_deopt_blob6F_v_;
+text: .text%__1cKCodeBuffer2t6MiiiiiipnKBufferBlob_pnJrelocInfo_pnORelocateBuffer_ipnLOopRecorder_pkcii_v_;
+text: .text%__1cKCodeBufferQalloc_relocation6MI_v_;
+text: .text%__1cJOopMapSet2t6M_v_;
+text: .text%__1cJAssemblerEsubl6MnHAddress_i_v_;
+text: .text%__1cTsave_live_registers6FpnOMacroAssembler_i_pnGOopMap__: c1_Runtime1_x86.o;
+text: .text%__1cJAssemblerGfldenv6MnHAddress__v_;
+text: .text%__1cGOopMap2t6Mii_v_;
+text: .text%__1cGOopMapQset_callee_saved6MnHOptoRegEName_ii2_v_;
+text: .text%__1cGOopMapHset_xxx6MnHOptoRegEName_nLOopMapValueJoop_types_ii2_v_;
+text: .text%__1cGOopMapbEmap_compiler_reg_to_oopmap_reg6MnHOptoRegEName_ii_nFVMRegEName__;
+text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: oopMap.o;
+text: .text%__1cIFrameMapRfpu_stack_regname6Fi_nHOptoRegEName__;
+text: .text%__1cKRelocationJpack_data6M_i_: codeBlob.o;
+text: .text%__1cJrelocInfoNfinish_prefix6Mph_p0_;
+text: .text%__1cJrelocInfo2t6Mn0AJrelocType_ii_v_;
+text: .text%__1cOMacroAssemblerTset_last_Java_frame6MpnMRegisterImpl_22pC_v_;
+text: .text%__1cJOopMapSetKadd_gc_map6MiipnGOopMap__v_;
+text: .text%__1cOMacroAssemblerVreset_last_Java_frame6MpnMRegisterImpl_i_v_;
+text: .text%__1cJAssemblerEdecl6MnHAddress__v_;
+text: .text%__1cSDeoptimizationBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_iiii_p0_;
+text: .text%__1cICodeBlobPallocation_size6FpnKCodeBuffer_ii_I_;
+text: .text%__1cNRelocIteratorTlocs_and_index_size6Fii_i_;
+text: .text%__1cSDeoptimizationBlob2n6FII_pv_;
+text: .text%__1cSDeoptimizationBlob2t6MpnKCodeBuffer_ipnJOopMapSet_iiii_v_;
+text: .text%__1cICodeBlob2t6MpkcpnKCodeBuffer_iiipnJOopMapSet_i_v_;
+text: .text%__1cKCodeBufferPcopy_relocation6MpnICodeBlob__v_;
+text: .text%__1cNRelocIteratorMcreate_index6FpnKCodeBuffer_pnJrelocInfo_4_4_;
+text: .text%__1cKCodeBufferJcopy_code6MpnICodeBlob__v_;
+text: .text%__1cRAbstractAssemblerOcode_fill_byte6F_i_;
+text: .text%__1cICodeBlobWfix_relocation_at_move6Mi_v_;
+text: .text%__1cNRelocIteratorKinitialize6MipnICodeBlob_pC3_v_;
+text: .text%__1cNRelocIteratorKset_limits6MpC1_v_;
+text: .text%__1cVPatchingRelocIteratorHprepass6M_v_;
+text: .text%__1cNRelocIteratorEnext6M_i_: relocInfo.o;
+text: .text%__1cNRelocIteratorEnext6M_i_: codeBlob.o;
+text: .text%__1cNRelocIteratorFreloc6M_pnKRelocation__;
+text: .text%__1cOCallRelocationWfix_relocation_at_move6Mi_v_;
+text: .text%__1cOCallRelocationFvalue6M_pC_: codeBlob.o;
+text: .text%__1cKRelocationTpd_call_destination6M_pC_;
+text: .text%__1cKNativeCallLdestination6kM_pC_;
+text: .text%__1cOCallRelocationPset_destination6MpCi_v_;
+text: .text%__1cKRelocationXpd_set_call_destination6MpCi_v_;
+text: .text%__1cRNativeInstructionFwrote6Mi_v_;
+text: .text%__1cKRelocationLunpack_data6M_v_: relocInfo.o;
+text: .text%__1cKRelocationWfix_relocation_at_move6Mi_v_: relocInfo.o;
+text: .text%__1cVPatchingRelocIteratorIpostpass6M_v_;
+text: .text%__1cJOopMapSetJheap_size6kM_i_;
+text: .text%__1cGOopMapJheap_size6kM_i_;
+text: .text%__1cJOopMapSetHcopy_to6MpC_v_;
+text: .text%__1cGOopMapHcopy_to6MpC_v_;
+text: .text%__1cIRuntime1Rgenerate_blob_for6Fn0AGStubID__v_;
+text: .text%__1cIRuntime1Pnew_code_buffer6F_pnKCodeBuffer__;
+text: .text%__1cLOopRecorder2t6MpnFArena__v_;
+text: .text%__1cNStubAssembler2t6MpnKCodeBuffer__v_;
+text: .text%__1cIRuntime1Rgenerate_code_for6Fn0AGStubID_pnNStubAssembler_pi_pnJOopMapSet__;
+text: .text%__1cIRuntime1Iname_for6Fn0AGStubID__pkc_;
+text: .text%__1cLRuntimeStubQnew_runtime_stub6FpkcpnKCodeBuffer_ipnJOopMapSet_i_p0_;
+text: .text%__1cLRuntimeStub2n6FII_pv_;
+text: .text%__1cLRuntimeStub2t6MpkcpnKCodeBuffer_iipnJOopMapSet_i_v_;
+text: .text%__1cJStubFrame2t6MpnNStubAssembler_pkci_v_;
+text: .text%__1cNStubAssemblerIset_info6Mpkci_v_;
+text: .text%__1cNStubAssemblerHcall_RT6MpnMRegisterImpl_2pC2_i_;
+text: .text%__1cNStubAssemblerHcall_RT6MpnMRegisterImpl_2pCi_i_;
+text: .text%__1cJStubFrame2T6M_v_;
+text: .text%__1cIRuntime1Ygenerate_exception_throw6FpnNStubAssembler_pCpnMRegisterImpl__pnJOopMapSet__;
+text: .text%__1cOMacroAssemblerLtlab_refill6MrnFLabel_22_v_;
+text: .text%__1cLlog2_intptr6Fi_i_: assembler_x86.o;
+text: .text%__1cOMacroAssemblerNeden_allocate6MpnMRegisterImpl_2i2rnFLabel__v_;
+text: .text%__1cOMacroAssemblerLverify_tlab6M_v_;
+text: .text%__1cLlog2_intptr6Fi_i_: c1_Runtime1_x86.o;
+text: .text%__1cOMacroAssemblerNtlab_allocate6MpnMRegisterImpl_2i22rnFLabel__v_;
+text: .text%__1cRC1_MacroAssemblerRinitialize_object6MpnMRegisterImpl_22i22_v_;
+text: .text%__1cRC1_MacroAssemblerRinitialize_header6MpnMRegisterImpl_22_v_;
+text: .text%__1cRC1_MacroAssemblerPinitialize_body6MpnMRegisterImpl_2i2_v_;
+text: .text%__1cNStubAssemblerHcall_RT6MpnMRegisterImpl_2pC22_i_;
+text: .text%__1cNStubAssemblerHcall_RT6MpnMRegisterImpl_2pC222_i_;
+text: .text%__1cIRuntime1Iblob_for6Fn0AGStubID__pnICodeBlob__;
+text: .text%__1cJStubFrame2t6MpnNStubAssembler_pkcipnMRegisterImpl_6_v_;
+text: .text%__1cJStubFrame2t6MpnNStubAssembler_pkcipnMRegisterImpl__v_;
+text: .text%__1cIiEntries2t6Miiii_v_;
+text: .text%__1cRNativeGeneralJumpQjump_destination6kM_pC_;
+text: .text%__1cJAssemblerOlocate_operand6FpCn0AMWhichOperand__1_;
+text: .text%__1cIRuntime1Rgenerate_patching6FpnNStubAssembler_pC_pnJOopMapSet__;
+text: .text%__1cWrestore_live_registers6FpnOMacroAssembler__v_: c1_Runtime1_x86.o;
+text: .text%__1cNSafepointBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_;
+text: .text%__1cNSafepointBlob2n6FII_pv_;
+text: .text%__1cNSafepointBlob2t6MpnKCodeBuffer_ipnJOopMapSet_i_v_;
+text: .text%__1cJAssemblerFfldcw6MnHAddress__v_;
+text: .text%__1cJAssemblerGfnstcw6MnHAddress__v_;
+text: .text%__1cJAssemblerHfcomp_d6MnHAddress__v_;
+text: .text%__1cIiEntriesIset_base6MpC_v_;
+text: .text%__1cQvtableStubs_init6F_v_;
+text: .text%__1cLVtableStubsKinitialize6F_v_;
+text: .text%__1cWInlineCacheBuffer_init6F_v_;
+text: .text%__1cRInlineCacheBufferKinitialize6F_v_;
+text: .text%__1cRInlineCacheBufferOinit_next_stub6F_v_;
+text: .text%__1cRInlineCacheBufferRic_stub_code_size6F_i_;
+text: .text%__1cJStubQdDueueRrequest_committed6Mi_pnEStub__;
+text: .text%__1cPICStubInterfaceRcode_size_to_size6kMi_i_: icBuffer.o;
+text: .text%__1cPICStubInterfaceKinitialize6MpnEStub_i_v_: icBuffer.o;
+text: .text%__1cTcompilerOracle_init6F_v_;
+text: .text%__1cOCompilerOracleRparse_from_string6Fpkc_v_;
+text: .text%__1cOCompilerOracleOread_from_line6Fpc_v_;
+text: .text%__1cOCompilerOraclePparse_from_file6F_v_;
+text: .text%__1cHcc_file6F_pkc_: compilerOracle.o;
+text: .text%__1cXonStackReplacement_init6F_v_;
+text: .text%__1cSOnStackReplacementKinitialize6F_v_;
+text: .text%__1cUGenericGrowableArrayPraw_at_put_grow6MipknEGrET_3_v_;
+text: .text%__1cWcompilationPolicy_init6F_v_;
+text: .text%__1cSuniverse_post_init6F_v_;
+text: .text%__1cIUniverseWreinitialize_vtable_of6FpnFKlass_pnGThread__v_;
+text: .text%__1cNinstanceKlassGvtable6kM_pnLklassVtable__;
+text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__: instanceKlass.o;
+text: .text%__1cLklassVtableVinitialize_from_super6MnLKlassHandle__i_;
+text: .text%__1cFKlassMoop_is_array6kM_i_: instanceKlass.o;
+text: .text%__1cLklassVtableTupdate_super_vtable6MpnNinstanceKlass_pnNmethodOopDesc_i_i_;
+text: .text%__1cLklassVtableNput_method_at6MpnNmethodOopDesc_i_v_;
+text: .text%__1cLklassVtableQfill_in_mirandas6Mri_v_;
+text: .text%__1cFKlassIsubklass6kM_p0_;
+text: .text%__1cLklassVtableOcopy_vtable_to6MpnLvtableEntry__v_;
+text: .text%__1cLklassVtableXvtable_accessibility_at6Mi_n0AKAccessType__;
+text: .text%__1cFKlassMnext_sibling6kM_p0_;
+text: .text%__1cKarrayKlassMoop_is_array6kM_i_: objArrayKlass.o;
+text: .text%__1cFKlassPoop_is_instance6kM_i_: objArrayKlass.o;
+text: .text%__1cKarrayKlassMoop_is_array6kM_i_: constantPoolKlass.o;
+text: .text%__1cFKlassPoop_is_instance6kM_i_: constantPoolKlass.o;
+text: .text%__1cKarrayKlassMoop_is_array6kM_i_: typeArrayKlass.o;
+text: .text%__1cNinstanceKlassKjava_super6kM_pnMklassOopDesc__: instanceRefKlass.o;
+text: .text%__1cFKlassMoop_is_array6kM_i_: instanceRefKlass.o;
+text: .text%__1cIUniverseUreinitialize_itables6F_v_;
+text: .text%__1cQSystemDictionaryKclasses_do6FpFpnMklassOopDesc__v_v_;
+text: .text%__1cKDictionaryKclasses_do6MpFpnMklassOopDesc__v_v_;
+text: .text%__1cbBinitialize_itable_for_klass6FpnMklassOopDesc__v_;
+text: .text%__1cNinstanceKlassGitable6kM_pnLklassItable__;
+text: .text%__1cLklassItable2t6MnTinstanceKlassHandle__v_;
+text: .text%__1cLklassItableRinitialize_itable6M_v_;
+text: .text%__1cLklassItablebFinitialize_itable_for_interface6MpnMklassOopDesc_pnRitableMethodEntry__v_;
+text: .text%__1cRitableMethodEntryKinitialize6MpnNmethodOopDesc__v_;
+text: .text%__1cKoopFactoryMnew_objArray6FpnMklassOopDesc_ipnGThread__pnPobjArrayOopDesc__;
+text: .text%__1cNinstanceKlassRallocate_objArray6MiipnGThread__pnPobjArrayOopDesc__;
+text: .text%__1cNinstanceKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__;
+text: .text%__1cNinstanceKlassQarray_klass_impl6FnTinstanceKlassHandle_iipnGThread__pnMklassOopDesc__;
+text: .text%__1cFKlassTarray_klass_or_null6M_pnMklassOopDesc__;
+text: .text%__1cNinstanceKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__;
+text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceKlass.o;
+text: .text%__1cNobjArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__;
+text: .text%__1cNobjArrayKlassQarray_klass_impl6FnTobjArrayKlassHandle_iipnGThread__pnMklassOopDesc__;
+text: .text%__1cFKlassTarray_klass_or_null6Mi_pnMklassOopDesc__;
+text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: instanceKlass.o;
+text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: instanceKlass.o;
+text: .text%__1cNCollectedHeapXallocate_from_tlab_slow6FpnGThread_I_pnIHeapWord__;
+text: .text%__1cQGenCollectedHeapVunsafe_max_tlab_alloc6kM_I_;
+text: .text%__1cQDefNewGenerationVunsafe_max_tlab_alloc6kM_I_: defNewGeneration.o;
+text: .text%__1cQDefNewGenerationVunsafe_max_alloc_nogc6kM_I_;
+text: .text%__1cPContiguousSpaceEfree6kM_I_: space.o;
+text: .text%__1cWThreadLocalAllocBufferXclear_before_allocation6M_v_;
+text: .text%__1cWThreadLocalAllocBufferFreset6M_v_;
+text: .text%__1cQGenCollectedHeapRallocate_new_tlab6MI_pnIHeapWord__;
+text: .text%__1cQGenCollectedHeapMmem_allocate6MIii_pnIHeapWord__;
+text: .text%__1cbCTwoGenerationCollectorPolicyRmem_allocate_work6MIii_pnIHeapWord__;
+text: .text%__1cQGenCollectedHeapEheap6F_p0_;
+text: .text%__1cQDefNewGenerationPshould_allocate6MIii_i_: defNewGeneration.o;
+text: .text%__1cQDefNewGenerationMpar_allocate6MIii_pnIHeapWord__: defNewGeneration.o;
+text: .text%__1cJEdenSpaceMpar_allocate6MI_pnIHeapWord__;
+text: .text%__1cPContiguousSpaceRpar_allocate_impl6MIkpnIHeapWord__2_: space.o;
+text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: collectedHeap.o;
+text: .text%__1cWThreadLocalAllocBufferEfill6MpnIHeapWord_2I_v_;
+text: .text%__1cQjava_lang_StringPcreate_from_str6FpkcpnGThread__nGHandle__;
+text: .text%__1cQjava_lang_StringTcreate_oop_from_str6FpkcpnGThread__pnHoopDesc__;
+text: .text%__1cKoopFactoryNnew_charArray6FpkcpnGThread__pnQtypeArrayOopDesc__;
+text: .text%__1cEUTF8Ounicode_length6Fpkc_i_;
+text: .text%__1cEUTF8Enext6FpkcpH_pc_;
+text: .text%__1cOtypeArrayKlassIallocate6MipnGThread__pnQtypeArrayOopDesc__;
+text: .text%__1cQGenCollectedHeapVlarge_typearray_limit6M_I_;
+text: .text%__1cbCTwoGenerationCollectorPolicyYis_two_generation_policy6M_i_: collectorPolicy.o;
+text: .text%__1cbCTwoGenerationCollectorPolicyVlarge_typearray_limit6M_I_;
+text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o;
+text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: typeArrayKlass.o;
+text: .text%__1cEUTF8Sconvert_to_unicode6FpkcpHi_v_;
+text: .text%__1cQjava_lang_StringQbasic_create_oop6FpnQtypeArrayOopDesc_ipnGThread__pnHoopDesc__;
+text: .text%__1cNinstanceKlassRallocate_instance6MpnGThread__pnPinstanceOopDesc__;
+text: .text%__1cNCollectedHeapMobj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: instanceKlass.o;
+text: .text%__1cTjava_lang_ThrowableLset_message6FpnHoopDesc_2_v_;
+text: .text%__1cNinstanceKlassKlink_class6MpnGThread__v_;
+text: .text%__1cNinstanceKlassPlink_class_impl6FnTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cNinstanceKlassLverify_code6FnTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cIVerifierRverify_byte_codes6FnTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cNinstanceKlassWadd_loader_constraints6FnTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cNinstanceKlassNrewrite_class6MpnGThread__v_;
+text: .text%__1cIRewriterHrewrite6FnTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cIRewriterScompute_index_maps6FnSconstantPoolHandle_rpnIintArray_rpnIintStack__v_;
+text: .text%__1cIintArray2t6Mki1_v_: rewriter.o;
+text: .text%__1cIRewriterXnew_constant_pool_cache6FrnIintArray_pnGThread__nXconstantPoolCacheHandle__;
+text: .text%__1cKoopFactoryVnew_constantPoolCache6FipnGThread__pnYconstantPoolCacheOopDesc__;
+text: .text%__1cWconstantPoolCacheKlassIallocate6MipnGThread__pnYconstantPoolCacheOopDesc__;
+text: .text%__1cNCollectedHeapYpermanent_array_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: cpCacheKlass.o;
+text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: cpCacheKlass.o;
+text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: cpCacheKlass.o;
+text: .text%__1cYconstantPoolCacheOopDescKinitialize6MrnIintArray__v_;
+text: .text%__1cWConstantPoolCacheEntryRset_initial_state6Mi_v_;
+text: .text%__1cIRewriterOrewrite_method6FnMmethodHandle_rnIintArray_pnGThread__1_;
+text: .text%__1cNmethodOopDescLlink_method6FnMmethodHandle__v_;
+text: .text%__1cTAbstractInterpreterLmethod_kind6FnMmethodHandle__n0AKMethodKind__;
+text: .text%__1cNmethodOopDescbGupdate_compiled_code_entry_point6Mi_v_;
+text: .text%__1cIRuntime1Mientries_for6FnMmethodHandle__pnIiEntries__;
+text: .text%__1cNmethodOopDescLis_accessor6kM_i_;
+text: .text%__1cNmethodOopDescMintrinsic_id6kM_n0ALIntrinsicId__;
+text: .text%__1cQSystemDictionaryXcheck_signature_loaders6FnMsymbolHandle_nGHandle_2ipnGThread__v_;
+text: .text%__1cNSharedRuntimebIinitialize_StrictMath_entry_points6F_v_;
+text: .text%__1cNSharedRuntimeUlookup_function_DD_D6FrpFpnHJNIEnv__pnH_jclass_dd_dpkc_v_;
+text: .text%__1cMNativeLookupTbase_library_lookup6Fpkc22_pC_;
+text: .text%__1cMNativeLookupGlookup6FnMmethodHandle_ripnGThread__pC_;
+text: .text%__1cNmethodOopDescThas_native_function6kM_i_;
+text: .text%__1cMNativeLookupLlookup_base6FnMmethodHandle_ripnGThread__pC_;
+text: .text%__1cMNativeLookupNpure_jni_name6FnMmethodHandle__pc_;
+text: .text%__1cMoutputStreamFprint6MpkcE_v_;
+text: .text%__1cMoutputStreamMdo_vsnprintf6FpcIpkcpvirI_3_;
+text: .text%__1cNmethodOopDescKklass_name6kM_pnNsymbolOopDesc__;
+text: .text%__1cOmangle_name_on6FpnMoutputStream_pnNsymbolOopDesc__v_: nativeLookup.o;
+text: .text%__1cOmangle_name_on6FpnMoutputStream_pnNsymbolOopDesc_ii_v_: nativeLookup.o;
+text: .text%__1cMoutputStreamDput6Mc_v_;
+text: .text%__1cMNativeLookupMlookup_style6FnMmethodHandle_pcpkciiripnGThread__pC_;
+text: .text%__1cCosYprint_jni_name_prefix_on6FpnMoutputStream_i_v_;
+text: .text%__1cCosYprint_jni_name_suffix_on6FpnMoutputStream_i_v_;
+text: .text%__1cVlookup_special_native6Fpc_pC_: nativeLookup.o;
+text: .text%__1cbEinitialize_converter_functions6F_v_;
+text: .text%__1cIUniverseWupdate_heap_info_at_gc6F_v_;
+text: .text%__1cQGenCollectedHeapIcapacity6kM_I_;
+text: .text%__1cQDefNewGenerationIcapacity6kM_I_;
+text: .text%__1cQGenCollectedHeapEused6kM_I_;
+text: .text%__1cQDefNewGenerationEused6kM_I_;
+text: .text%__1cbCOneContigSpaceCardGenerationEused6kM_I_;
+text: .text%__1cQGenCollectedHeapPpost_initialize6M_v_;
+text: .text%__1cQGenCollectedHeapTref_processing_init6M_v_;
+text: .text%__1cKSharedHeapTref_processing_init6M_v_;
+text: .text%__1cKGenerationSref_processor_init6M_v_;
+text: .text%__1cKGenerationYrefs_discovery_is_atomic6kM_i_: compactingPermGenGen.o;
+text: .text%__1cKGenerationUrefs_discovery_is_mt6kM_i_: compactingPermGenGen.o;
+text: .text%__1cSReferenceProcessor2t6MnJMemRegion_iii_v_;
+text: .text%__1cKGenerationYrefs_discovery_is_atomic6kM_i_: defNewGeneration.o;
+text: .text%__1cKGenerationUrefs_discovery_is_mt6kM_i_: defNewGeneration.o;
+text: .text%__1cKGenerationYrefs_discovery_is_atomic6kM_i_: tenuredGeneration.o;
+text: .text%__1cKGenerationUrefs_discovery_is_mt6kM_i_: tenuredGeneration.o;
+text: .text%__1cNMemoryServiceRset_universe_heap6FpnNCollectedHeap__v_;
+text: .text%__1cQGenCollectedHeapEkind6M_nNCollectedHeapEName__: genCollectedHeap.o;
+text: .text%__1cNMemoryServicebBadd_gen_collected_heap_info6FpnQGenCollectedHeap__v_;
+text: .text%__1cPMarkSweepPolicyUis_mark_sweep_policy6M_i_: collectorPolicy.o;
+text: .text%__1cNMemoryManagerXget_copy_memory_manager6F_pnPGCMemoryManager__;
+text: .text%__1cPGCMemoryManager2t6M_v_;
+text: .text%__1cNMemoryManagerWget_msc_memory_manager6F_pnPGCMemoryManager__;
+text: .text%__1cNMemoryServicebAadd_generation_memory_pool6FpnKGeneration_pnNMemoryManager_4_v_;
+text: .text%__1cQDefNewGenerationEkind6M_nKGenerationEName__: defNewGeneration.o;
+text: .text%__1cNMemoryServiceJadd_space6FpnPContiguousSpace_pkciIi_pnKMemoryPool__;
+text: .text%__1cTContiguousSpacePool2t6MpnPContiguousSpace_pkcnKMemoryPoolIPoolType_Ii_v_;
+text: .text%__1cNMemoryServiceTadd_survivor_spaces6FpnQDefNewGeneration_pkciIi_pnKMemoryPool__;
+text: .text%__1cbBSurvivorContiguousSpacePool2t6MpnQDefNewGeneration_pkcnKMemoryPoolIPoolType_Ii_v_;
+text: .text%__1cRTenuredGenerationEkind6M_nKGenerationEName__: tenuredGeneration.o;
+text: .text%__1cNMemoryServiceHadd_gen6FpnKGeneration_pkcii_pnKMemoryPool__;
+text: .text%__1cOGenerationPool2t6MpnKGeneration_pkcnKMemoryPoolIPoolType_i_v_;
+text: .text%__1cKGenerationMmax_capacity6kM_I_;
+text: .text%__1cNMemoryServicebGadd_compact_perm_gen_memory_pool6FpnUCompactingPermGenGen_pnNMemoryManager__v_;
+text: .text%__1cQGenCollectedHeapNgc_threads_do6kMpnNThreadClosure__v_;
+text: .text%__1cPGCMemoryManagerXinitialize_gc_stat_info6M_v_;
+text: .text%__1cKGCStatInfo2t6Mi_v_;
+text: .text%__1cQjavaClasses_init6F_v_;
+text: .text%__1cLJavaClassesPcompute_offsets6F_v_;
+text: .text%__1cQjava_lang_SystemPcompute_offsets6F_v_;
+text: .text%__1cQjava_lang_ThreadPcompute_offsets6F_v_;
+text: .text%__1cNinstanceKlassQfind_local_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__i_;
+text: .text%__1cVjava_lang_ThreadGroupPcompute_offsets6F_v_;
+text: .text%__1cbIjava_security_AccessControlContextPcompute_offsets6F_v_;
+text: .text%__1cbIjava_lang_reflect_AccessibleObjectPcompute_offsets6F_v_;
+text: .text%__1cYjava_lang_reflect_MethodPcompute_offsets6F_v_;
+text: .text%__1cbDjava_lang_reflect_ConstructorPcompute_offsets6F_v_;
+text: .text%__1cXjava_lang_reflect_FieldPcompute_offsets6F_v_;
+text: .text%__1cPjava_nio_BufferPcompute_offsets6F_v_;
+text: .text%__1cYsun_reflect_ConstantPoolPcompute_offsets6F_v_;
+text: .text%__1cZsun_misc_AtomicLongCSImplPcompute_offsets6F_v_;
+text: .text%__1cSstubRoutines_init26F_v_;
+text: .text%__1cMStubRoutinesLinitialize26F_v_;
+text: .text%__1cNStubGeneratorYgenerate_throw_exception6MpkcpCi_3_: stubGenerator_x86.o;
+text: .text%__1cNStubGeneratorTgenerate_verify_oop6M_pC_: stubGenerator_x86.o;
+text: .text%__1cJAssemblerEincl6MnHAddress__v_;
+text: .text%__1cHThreadsDadd6FpnKJavaThread_i_v_;
+text: .text%__1cNThreadServiceKadd_thread6FpnKJavaThread_i_v_;
+text: .text%__1cGThreadbCis_hidden_from_external_view6kM_i_: thread.o;
+text: .text%__1cGThreadVis_jvmti_agent_thread6kM_i_: thread.o;
+text: .text%__1cGEventsDlog6FpkcE_v_: thread.o;
+text: .text%__1cLJvmtiExportbMtransition_pending_onload_raw_monitors6F_v_;
+text: .text%__1cUJvmtiPendingMonitorsXtransition_raw_monitors6F_v_;
+text: .text%__1cUGenericGrowableArrayUclear_and_deallocate6M_v_;
+text: .text%__1cIVMThreadGcreate6F_v_;
+text: .text%__1cIVMThread2t6M_v_;
+text: .text%__1cQVMOperationQdDueue2t6M_v_;
+text: .text%__1cCosNcreate_thread6FpnGThread_n0AKThreadType_I_i_;
+text: .text%__1cCosMstart_thread6FpnGThread__v_;
+text: .text%__1cCosPpd_start_thread6FpnGThread__v_;
+text: .text%__1cHMonitorEwait6Mil_i_;
+text: .text%_start: os_solaris.o;
+text: .text%__1cCosTset_native_priority6FpnGThread_i_nIOSReturn__;
+text: .text%__1cQset_lwp_priority6Fiii_i_;
+text: .text%__1cVscale_to_lwp_priority6Fiii_i_: os_solaris.o;
+text: .text%__1cIVMThreadMis_VM_thread6kM_i_: vmThread.o;
+text: .text%__1cIVMThreadDrun6M_v_;
+text: .text%__1cHMonitorGnotify6M_i_;
+text: .text%__1cIVMThreadEloop6M_v_;
+text: .text%__1cQVMOperationQdDueueLremove_next6M_pnMVM_Operation__;
+text: .text%__1cQVMOperationQdDueueLqueue_empty6Mi_i_;
+text: .text%__1cQVMOperationQdDueueSqueue_remove_front6Mi_pnMVM_Operation__;
+text: .text%__1cLJvmtiExportRenter_start_phase6F_v_;
+text: .text%__1cLJvmtiExportNpost_vm_start6F_v_;
+text: .text%__1cUJvmtiEventControllerIvm_start6F_v_;
+text: .text%__1cQinitialize_class6FnMsymbolHandle_pnGThread__v_: thread.o;
+text: .text%__1cNinstanceKlassKinitialize6MpnGThread__v_;
+text: .text%__1cNinstanceKlassVshould_be_initialized6kM_i_;
+text: .text%__1cNinstanceKlassPinitialize_impl6FnTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cNinstanceKlassWcall_class_initializer6MpnGThread__v_;
+text: .text%__1cNinstanceKlassbBcall_class_initializer_impl6FnTinstanceKlassHandle_pnGThread__v_;
+text: .text%__1cNinstanceKlassRclass_initializer6M_pnNmethodOopDesc__;
+text: .text%__1cJJavaCallsEcall6FpnJJavaValue_nMmethodHandle_pnRJavaCallArguments_pnGThread__v_;
+text: .text%__1cCosUos_exception_wrapper6FpFpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v2468_v_;
+text: .text%__1cJJavaCallsLcall_helper6FpnJJavaValue_pnMmethodHandle_pnRJavaCallArguments_pnGThread__v_;
+text: .text%__1cRCompilationPolicyOmustBeCompiled6FnMmethodHandle__i_;
+text: .text%__1cRCompilationPolicyNcanBeCompiled6FnMmethodHandle__i_;
+text: .text%__1cNmethodOopDescRis_not_compilable6kMi_i_;
+text: .text%__1cRruntime_type_from6FpnJJavaValue__nJBasicType__: javaCalls.o;
+text: .text%__1cCosbCstack_shadow_pages_available6FpnGThread_nMmethodHandle__i_;
+text: .text%__1cTAbstractInterpreterbFsize_top_interpreter_activation6FpnNmethodOopDesc__i_;
+text: .text%__1cPJavaCallWrapper2t6MnMmethodHandle_nGHandle_pnJJavaValue_pnGThread__v_;
+text: .text%__1cGThreadSis_Compiler_thread6kM_i_: thread.o;
+text: .text%__1cGThreadXclear_pending_exception6M_v_;
+text: .text%__1cRJavaCallArgumentsKparameters6M_pi_;
+text: .text%__1cSInterpreterRuntimeOresolve_invoke6FpnKJavaThread_nJBytecodesECode__v_;
+text: .text%__1cKJavaThreadPcook_last_frame6MnFframe__1_;
+text: .text%__1cFframeYinterpreter_frame_method6kM_pnNmethodOopDesc__;
+text: .text%__1cFframeVinterpreter_frame_bcp6kM_pC_;
+text: .text%__1cMLinkResolverOresolve_invoke6FrnICallInfo_nGHandle_nSconstantPoolHandle_inJBytecodesECode_pnGThread__v_;
+text: .text%__1cMLinkResolverUresolve_invokestatic6FrnICallInfo_nSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cMLinkResolverMresolve_pool6FrnLKlassHandle_rnMsymbolHandle_42nSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cMLinkResolverNresolve_klass6FrnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cTconstantPoolOopDescMklass_ref_at6MipnGThread__pnMklassOopDesc__;
+text: .text%__1cTconstantPoolOopDescNklass_at_impl6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__;
+text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__: instanceKlass.o;
+text: .text%__1cTconstantPoolOopDescbCverify_constant_pool_resolve6FnSconstantPoolHandle_nLKlassHandle_pnGThread__v_;
+text: .text%__1cMLinkResolverZcheck_klass_accessability6FnLKlassHandle_1pnGThread__v_;
+text: .text%__1cTconstantPoolOopDescMklass_at_put6MipnMklassOopDesc__v_: constantPoolOop.o;
+text: .text%__1cTconstantPoolOopDescLname_ref_at6Mi_pnNsymbolOopDesc__;
+text: .text%__1cTconstantPoolOopDescQsignature_ref_at6Mi_pnNsymbolOopDesc__;
+text: .text%__1cMLinkResolverTresolve_static_call6FrnICallInfo_rnLKlassHandle_nMsymbolHandle_53iipnGThread__v_;
+text: .text%__1cMLinkResolverbElinktime_resolve_static_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_;
+text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_;
+text: .text%__1cMLinkResolverYlookup_method_in_klasses6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_;
+text: .text%__1cMLinkResolverbAcheck_method_accessability6FnLKlassHandle_11nMmethodHandle_pnGThread__v_;
+text: .text%__1cKReflectionTverify_field_access6FpnMklassOopDesc_22nLAccessFlags_ii_i_;
+text: .text%__1cICallInfoDset6MnLKlassHandle_nMmethodHandle_pnGThread__v_;
+text: .text%__1cICallInfoDset6MnLKlassHandle_1nMmethodHandle_2ipnGThread__v_;
+text: .text%__1cSInterpreterRuntimeLcache_entry6FpnKJavaThread__pnWConstantPoolCacheEntry__: interpreterRuntime.o;
+text: .text%__1cWConstantPoolCacheEntryLis_resolved6kMnJBytecodesECode__i_: interpreterRuntime.o;
+text: .text%__1cWConstantPoolCacheEntryPbytecode_number6FnJBytecodesECode__i_: interpreterRuntime.o;
+text: .text%__1cWConstantPoolCacheEntryKset_method6MnJBytecodesECode_nMmethodHandle_i_v_;
+text: .text%__1cNmethodOopDescLresult_type6kM_nJBasicType__;
+text: .text%__1cRSignatureIteratorSiterate_returntype6M_v_;
+text: .text%__1cNSignatureInfoHdo_void6M_v_: bytecode.o;
+text: .text%__1cQResultTypeFinderDset6MinJBasicType__v_: bytecode.o;
+text: .text%__1cRSignatureIteratorTcheck_signature_end6M_v_;
+text: .text%__1cLas_TosState6FnJBasicType__nITosState__: cpCacheOop.o;
+text: .text%__1cNmethodOopDescPis_final_method6kM_i_;
+text: .text%__1cWConstantPoolCacheEntryIas_flags6MnITosState_iiiii_i_;
+text: .text%__1cWConstantPoolCacheEntryOset_bytecode_16MnJBytecodesECode__v_;
+text: .text%__1cWConstantPoolCacheEntryGverify6kMpnMoutputStream__v_;
+text: .text%__1cSInterpreterRuntimeTprepare_native_call6FpnKJavaThread_pnNmethodOopDesc__v_;
+text: .text%__1cXSignatureHandlerLibraryDadd6FnMmethodHandle__v_;
+text: .text%__1cXSignatureHandlerLibraryKinitialize6F_v_;
+text: .text%__1cXSignatureHandlerLibraryQset_handler_blob6F_pC_;
+text: .text%__1cRSignatureIterator2t6MpnNsymbolOopDesc__v_;
+text: .text%__1cNFingerprinterLfingerprint6M_X_: interpreterRuntime.o;
+text: .text%__1cNGrowableArray4CX_Efind6kMkX_i_: interpreterRuntime.o;
+text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIgenerate6MX_v_;
+text: .text%__1cRSignatureIteratorSiterate_parameters6MX_v_;
+text: .text%__1cXSignatureHandlerLibraryLset_handler6FpnKCodeBuffer__pC_;
+text: .text%__1cXSignatureHandlerLibraryOpd_set_handler6FpC_v_;
+text: .text%jni_RegisterNatives: jni.o;
+text: .text%__1cPjava_lang_ClassLas_klassOop6FpnHoopDesc__pnMklassOopDesc__;
+text: .text%__1cLSymbolTableFprobe6Fpkci_pnNsymbolOopDesc__;
+text: .text%__1cPregister_native6FnLKlassHandle_nMsymbolHandle_1pCpnGThread__i_: jni.o;
+text: .text%__1cPJavaCallWrapper2T6M_v_;
+text: .text%__1cOJNIHandleBlockNrelease_block6Fp0pnGThread__v_;
+text: .text%__1cNinstanceKlassbJset_initialization_state_and_notify6Mn0AKClassState_pnGThread__v_;
+text: .text%__1cNinstanceKlassbOset_initialization_state_and_notify_impl6FnTinstanceKlassHandle_n0AKClassState_pnGThread__v_;
+text: .text%__1cSObjectSynchronizerJnotifyall6FnGHandle_pnGThread__v_;
+text: .text%__1cSInterpreterRuntimeJanewarray6FpnKJavaThread_pnTconstantPoolOopDesc_ii_v_;
+text: .text%__1cSInterpreterRuntimePresolve_get_put6FpnKJavaThread_nJBytecodesECode__v_;
+text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_ipnGThread__v_;
+text: .text%__1cMLinkResolverNresolve_field6FrnPFieldAccessInfo_nSconstantPoolHandle_inJBytecodesECode_iipnGThread__v_;
+text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__;
+text: .text%__1cMLinkResolverZcheck_field_accessability6FnLKlassHandle_11rnPfieldDescriptor_pnGThread__v_;
+text: .text%__1cPFieldAccessInfoDset6MnLKlassHandle_nMsymbolHandle_iinJBasicType_nLAccessFlags__v_;
+text: .text%__1cLas_TosState6FnJBasicType__nITosState__: interpreterRuntime.o;
+text: .text%__1cWConstantPoolCacheEntryJset_field6MnJBytecodesECode_2nLKlassHandle_iinITosState_ii_v_;
+text: .text%__1cWConstantPoolCacheEntryOset_bytecode_26MnJBytecodesECode__v_;
+text: .text%__1cSInterpreterRuntimeE_new6FpnKJavaThread_pnTconstantPoolOopDesc_i_v_;
+text: .text%__1cNinstanceKlassbDcheck_valid_for_instantiation6MipnGThread__v_;
+text: .text%__1cMLinkResolverVresolve_invokespecial6FrnICallInfo_nSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cMLinkResolverUresolve_special_call6FrnICallInfo_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_;
+text: .text%__1cMLinkResolverbFlinktime_resolve_special_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_;
+text: .text%__1cMLinkResolverbEruntime_resolve_special_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_4ipnGThread__v_;
+text: .text%__1cWConstantPoolCacheEntryLis_resolved6kMnJBytecodesECode__i_: cpCacheOop.o;
+text: .text%__1cWConstantPoolCacheEntryPbytecode_number6FnJBytecodesECode__i_: cpCacheOop.o;
+text: .text%__1cNSignatureInfoJdo_object6Mii_v_: bytecode.o;
+text: .text%__1cNSignatureInfoHdo_long6M_v_: bytecode.o;
+text: .text%JVM_CurrentTimeMillis;
+text: .text%__1cbBcreate_initial_thread_group6FpnGThread__nGHandle__: thread.o;
+text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_;
+text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_;
+text: .text%__1cSInterpreterRuntimeDldc6FpnKJavaThread_i_v_;
+text: .text%__1cTconstantPoolOopDescOstring_at_impl6FnSconstantPoolHandle_ipnGThread__pnHoopDesc__;
+text: .text%__1cLsymbolKlassNoop_is_symbol6kM_i_: symbolKlass.o;
+text: .text%__1cLStringTableGintern6FpnNsymbolOopDesc_pnGThread__pnHoopDesc__;
+text: .text%__1cNsymbolOopDescKas_unicode6kMri_pH_;
+text: .text%__1cEUTF8Ounicode_length6Fpkci_i_;
+text: .text%__1cLStringTableGintern6FnGHandle_pHipnGThread__pnHoopDesc__;
+text: .text%__1cLStringTableLhash_string6FpHi_i_;
+text: .text%__1cLStringTableGlookup6MipHiI_pnHoopDesc__;
+text: .text%__1cLStringTableJbasic_add6MinGHandle_pHiIpnGThread__pnHoopDesc__;
+text: .text%__1cQjava_lang_StringbBcreate_tenured_from_unicode6FpHipnGThread__nGHandle__;
+text: .text%__1cKoopFactoryXnew_permanent_charArray6FipnGThread__pnQtypeArrayOopDesc__;
+text: .text%__1cQjava_lang_StringMbasic_create6FpnQtypeArrayOopDesc_ipnGThread__nGHandle__;
+text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_;
+text: .text%__1cNmethodOopDescIbci_from6kMpC_i_;
+text: .text%__1cPBytecode_invokeJsignature6kM_pnNsymbolOopDesc__;
+text: .text%__1cPBytecode_invokeFindex6kM_i_;
+text: .text%__1cNmethodOopDescIbcp_from6kMi_pC_;
+text: .text%__1cFframebGinterpreter_callee_receiver_addr6MnMsymbolHandle__ppnHoopDesc__;
+text: .text%__1cMLinkResolverVresolve_invokevirtual6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cMLinkResolverUresolve_virtual_call6FrnICallInfo_nGHandle_nLKlassHandle_4nMsymbolHandle_54iipnGThread__v_;
+text: .text%__1cMLinkResolverbFlinktime_resolve_virtual_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_;
+text: .text%__1cMLinkResolverbEruntime_resolve_virtual_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_;
+text: .text%__1cFKlassXcan_be_statically_bound6FpnNmethodOopDesc__i_;
+text: .text%__1cNSignatureInfoGdo_int6M_v_: bytecode.o;
+text: .text%__1cNSignatureInfoHdo_char6M_v_: bytecode.o;
+text: .text%__1cFKlassOis_subclass_of6kMpnMklassOopDesc__i_;
+text: .text%__1cNinstanceKlassVis_same_class_package6MpnHoopDesc_pnNsymbolOopDesc__i_;
+text: .text%__1cJBytecodesRspecial_length_at6FpC_i_;
+text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_5pnGThread__v_;
+text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_;
+text: .text%__1cLklassVtableIindex_of6kMpnNmethodOopDesc_i_i_;
+text: .text%__1cMNativeLookupNlong_jni_name6FnMmethodHandle__pc_;
+text: .text%__1cNFingerprinterJdo_object6Mii_v_: dump.o;
+text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: interpreterRuntime.o;
+text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorLpass_object6M_v_: interpreterRuntime.o;
+text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorDbox6Mii_v_;
+text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorEfrom6F_pnMRegisterImpl__;
+text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorCto6F_pnMRegisterImpl__;
+text: .text%JVM_DoPrivileged;
+text: .text%__1cLmethodKlassNoop_is_method6kM_i_: methodKlass.o;
+text: .text%__1cMvframeStream2t6MpnKJavaThread_i_v_;
+text: .text%__1cLRegisterMap2t6MpnKJavaThread_i_v_;
+text: .text%__1cLRegisterMapFclear6Mpi_v_;
+text: .text%__1cSvframeStreamCommonPfill_from_frame6M_i_;
+text: .text%__1cFframeUis_interpreted_frame6kM_i_;
+text: .text%__1cSvframeStreamCommonbBfill_from_interpreter_frame6M_v_;
+text: .text%__1cSvframeStreamCommonZsecurity_get_caller_frame6Mi_v_;
+text: .text%__1cSvframeStreamCommonbHskip_method_invoke_and_aux_frames6M_v_;
+text: .text%__1cSvframeStreamCommonEnext6M_v_;
+text: .text%__1cFframeGsender6kMpnLRegisterMap_pnICodeBlob__0_;
+text: .text%__1cFframeOis_entry_frame6kM_i_;
+text: .text%__1cRPrivilegedElementKinitialize6MpnMvframeStream_pnHoopDesc_p0pnGThread__v_;
+text: .text%__1cKJNIHandlesKmake_local6FpnHJNIEnv__pnHoopDesc__pnI_jobject__;
+text: .text%__1cOJNIHandleBlockPallocate_handle6MpnHoopDesc__pnI_jobject__;
+text: .text%__1cSInterpreterRuntimeNquicken_io_cc6FpnKJavaThread__v_;
+text: .text%__1cNSignatureInfoHdo_bool6M_v_: bytecode.o;
+text: .text%__1cNSharedRuntimeDf2i6Ff_i_;
+text: .text%jni_FindClass: jni.o;
+text: .text%__1cKJavaThreadZsecurity_get_caller_class6Mi_pnMklassOopDesc__;
+text: .text%__1cbCfind_class_from_class_loader6FpnHJNIEnv__nMsymbolHandle_CnGHandle_3CpnGThread__pnH_jclass__;
+text: .text%__1cRCompilationPolicyUcompleted_vm_startup6F_v_;
+text: .text%jni_NewGlobalRef: jni.o;
+text: .text%__1cKJNIHandlesLmake_global6FnGHandle_i_pnI_jobject__;
+text: .text%jni_GetStringUTFChars: jni.o;
+text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc__pc_;
+text: .text%__1cHUNICODEHas_utf86FpHi_pc_;
+text: .text%__1cHUNICODELutf8_length6FpHi_i_;
+text: .text%__1cKutf8_write6FpCH_0_: utf8.o;
+text: .text%JVM_FindPrimitiveClass;
+text: .text%__1cJname2type6Fpkc_nJBasicType__;
+text: .text%jni_ReleaseStringUTFChars;
+text: .text%__1cVcreate_initial_thread6FnGHandle_pnKJavaThread_pnGThread__pnHoopDesc__: thread.o;
+text: .text%__1cQjava_lang_ThreadKset_thread6FpnHoopDesc_pnKJavaThread__v_;
+text: .text%__1cQjava_lang_ThreadMset_priority6FpnHoopDesc_nOThreadPriority__v_;
+text: .text%JVM_CurrentThread;
+text: .text%__1cNSignatureInfoIdo_array6Mii_v_: bytecode.o;
+text: .text%__1cSInterpreterRuntimeInewarray6FpnKJavaThread_nJBasicType_i_v_;
+text: .text%__1cKoopFactoryNnew_typeArray6FnJBasicType_ipnGThread__pnQtypeArrayOopDesc__;
+text: .text%__1cNFingerprinterGdo_int6M_v_: dump.o;
+text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: interpreterRuntime.o;
+text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorIpass_int6M_v_: interpreterRuntime.o;
+text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorEmove6Mii_v_;
+text: .text%JVM_ArrayCopy;
+text: .text%__1cOtypeArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_;
+text: .text%__1cOtypeArrayKlassQoop_is_typeArray6kM_i_: typeArrayKlass.o;
+text: .text%JVM_GetStackAccessControlContext;
+text: .text%__1cbGJvmtiVMObjectAllocEventCollector2t6M_v_;
+text: .text%__1cJCodeCacheJfind_blob6Fpv_pnICodeBlob__;
+text: .text%__1cJCodeCacheQfind_blob_unsafe6Fpv_pnICodeBlob__;
+text: .text%__1cICodeHeapKfind_start6kMpv_1_;
+text: .text%__1cICodeBlobJis_zombie6kM_i_: codeBlob.o;
+text: .text%__1cICodeBlobKis_nmethod6kM_i_: codeBlob.o;
+text: .text%__1cFframeOis_first_frame6kM_i_;
+text: .text%__1cFframeUentry_frame_is_first6kM_i_;
+text: .text%__1cbIjava_security_AccessControlContextGcreate6FnOobjArrayHandle_inGHandle_pnGThread__pnHoopDesc__;
+text: .text%__1cbGJvmtiVMObjectAllocEventCollector2T6M_v_;
+text: .text%__1cTJvmtiEventCollectorYunset_jvmti_thread_state6M_v_;
+text: .text%JVM_GetInheritedAccessControlContext;
+text: .text%__1cQjava_lang_ThreadbGinherited_access_control_context6FpnHoopDesc__2_;
+text: .text%JVM_SetThreadPriority;
+text: .text%__1cQjava_lang_ThreadGthread6FpnHoopDesc__pnKJavaThread__;
+text: .text%__1cGThreadMset_priority6Fp0nOThreadPriority__v_;
+text: .text%__1cCosMset_priority6FpnGThread_nOThreadPriority__nIOSReturn__;
+text: .text%__1cQjava_lang_ThreadRset_thread_status6FpnHoopDesc_n0AMThreadStatus__v_;
+text: .text%JVM_IsThreadAlive;
+text: .text%__1cQjava_lang_ThreadIis_alive6FpnHoopDesc__i_;
+text: .text%JVM_StartThread;
+text: .text%__1cQjava_lang_ThreadMis_stillborn6FpnHoopDesc__i_;
+text: .text%__1cQjava_lang_ThreadJstackSize6FpnHoopDesc__x_;
+text: .text%__1cKJavaThread2t6MpFp0pnGThread__vI_v_;
+text: .text%__1cKJavaThreadHprepare6MpnI_jobject_nOThreadPriority__v_;
+text: .text%__1cQjava_lang_ThreadIpriority6FpnHoopDesc__nOThreadPriority__;
+text: .text%__1cQjava_lang_ThreadJis_daemon6FpnHoopDesc__i_;
+text: .text%__1cGThreadFstart6Fp0_v_;
+text: .text%__1cNinstanceKlassRprotection_domain6M_pnHoopDesc__: instanceRefKlass.o;
+text: .text%__1cbAcall_initializeSystemClass6FpnGThread__v_: thread.o;
+text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnGThread__v_;
+text: .text%__1cKJavaThreadDrun6M_v_;
+text: .text%__1cKJavaThreadRthread_main_inner6M_v_;
+text: .text%__1cMthread_entry6FpnKJavaThread_pnGThread__v_: jvm.o;
+text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_5pnGThread__v_;
+text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4pnRJavaCallArguments_pnGThread__v_;
+text: .text%__1cIBytecodeIset_code6MnJBytecodesECode__v_;
+text: .text%__1cNFingerprinterHdo_long6M_v_: dump.o;
+text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_: interpreterRuntime.o;
+text: .text%__1cSInterpreterRuntimeZSignatureHandlerGeneratorJpass_long6M_v_: interpreterRuntime.o;
+text: .text%JVM_MonitorWait;
+text: .text%__1cQjava_lang_ThreadRget_thread_status6FpnHoopDesc__n0AMThreadStatus__;
+text: .text%__1cSObjectSynchronizerEwait6FnGHandle_xpnGThread__v_;
+text: .text%__1cSObjectSynchronizerHinflate6FpnHoopDesc__pnNObjectMonitor__;
+text: .text%__1cNObjectMonitor2t6M_v_;
+text: .text%__1cNObjectMonitorHRecycle6M_v_;
+text: .text%__1cNObjectMonitorEwait6MxipnGThread__v_;
+text: .text%__1cGThreadOis_interrupted6Fp0i_i_;
+text: .text%__1cCosOis_interrupted6FpnGThread_i_i_;
+text: .text%__1cNObjectMonitorEexit6MpnGThread__v_;
+text: .text%__1cCosHSolarisFEventEpark6M_v_: objectMonitor_solaris.o;
+text: .text%jni_GetObjectClass: jni.o;
+text: .text%jni_GetMethodID: jni.o;
+text: .text%__1cNget_method_id6FpnHJNIEnv__pnH_jclass_pkc5ipnGThread__pnK_jmethodID__: jni.o;
+text: .text%__1cPjava_lang_ClassMis_primitive6FpnHoopDesc__i_;
+text: .text%__1cNmethodOopDescKjmethod_id6M_pnK_jmethodID__;
+text: .text%__1cMjniIdSupportNto_jmethod_id6FpnNmethodOopDesc__pnK_jmethodID__;
+text: .text%__1cMjniIdPrivateGid_for6FnTinstanceKlassHandle_i_i_: jniId.o;
+text: .text%__1cIjniIdMapGcreate6FnTinstanceKlassHandle__p0_;
+text: .text%__1cIjniIdMapRcompute_index_cnt6FnTinstanceKlassHandle__i_;
+text: .text%__1cIjniIdMap2t6MpnMklassOopDesc_i_v_;
+text: .text%__1cLjniIdBucket2t6MpnIjniIdMap_p0_v_;
+text: .text%jni_NewStringUTF: jni.o;
+text: .text%jni_CallObjectMethod: jni.o;
+text: .text%__1cMjniIdSupportNto_method_oop6FpnK_jmethodID__pnNmethodOopDesc__;
+text: .text%__1cRSignatureIterator2t6MpnGThread_pnNsymbolOopDesc__v_;
+text: .text%__1cUjni_invoke_nonstatic6FpnHJNIEnv__pnJJavaValue_pnI_jobject_nLJNICallType_pnK_jmethodID_pnSJNI_ArgumentPusher_pnGThread__v_: jni.o;
+text: .text%__1cNFingerprinterLfingerprint6M_X_: jni.o;
+text: .text%__1cXJNI_ArgumentPusherVaArgHiterate6MX_v_: jni.o;
+text: .text%__1cXJNI_ArgumentPusherVaArgKget_object6M_v_: jni.o;
+text: .text%jni_ExceptionOccurred: jni.o;
+text: .text%__1cbAjni_check_async_exceptions6FpnKJavaThread__v_: jni.o;
+text: .text%__1cKJavaThreadbHcheck_and_handle_async_exceptions6Mi_v_;
+text: .text%jni_DeleteLocalRef: jni.o;
+text: .text%__1cOJNIHandleBlockRrebuild_free_list6M_v_;
+text: .text%jni_EnsureLocalCapacity;
+text: .text%jni_GetStaticMethodID: jni.o;
+text: .text%jni_CallStaticObjectMethodV: jni.o;
+text: .text%__1cRjni_invoke_static6FpnHJNIEnv__pnJJavaValue_pnI_jobject_nLJNICallType_pnK_jmethodID_pnSJNI_ArgumentPusher_pnGThread__v_: jni.o;
+text: .text%__1cMLinkResolverbHlookup_instance_method_in_klasses6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_;
+text: .text%jni_ExceptionCheck: jni.o;
+text: .text%jni_NewString: jni.o;
+text: .text%__1cQjava_lang_StringXcreate_oop_from_unicode6FpHipnGThread__pnHoopDesc__;
+text: .text%JVM_InitProperties;
+text: .text%__1cMset_property6FnGHandle_pkc2pnGThread__v_: jvm.o;
+text: .text%__1cQjava_lang_StringbHcreate_from_platform_depended_str6FpkcpnGThread__nGHandle__;
+text: .text%__1cJJavaCallsMcall_virtual6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_533pnGThread__v_;
+text: .text%__1cKSharedHeapXfill_region_with_object6FnJMemRegion__v_;
+text: .text%__1cYNoJvmtiVMObjectAllocMark2t6M_v_;
+text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: sharedHeap.o;
+text: .text%__1cYNoJvmtiVMObjectAllocMark2T6M_v_;
+text: .text%__1cPfieldDescriptorUstring_initial_value6kMpnGThread__pnHoopDesc__;
+text: .text%jni_GetFieldID: jni.o;
+text: .text%__1cNinstanceKlassKfind_field6kMpnNsymbolOopDesc_2ipnPfieldDescriptor__pnMklassOopDesc__;
+text: .text%__1cNinstanceKlassSregister_finalizer6FpnPinstanceOopDesc_pnGThread__2_;
+text: .text%__1cJFieldTypeYis_valid_array_signature6FpnNsymbolOopDesc__i_;
+text: .text%__1cQSystemDictionarybBresolve_array_class_or_null6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__;
+text: .text%__1cJFieldTypeOget_array_info6FpnNsymbolOopDesc_pip2pnGThread__nJBasicType__;
+text: .text%__1cJFieldTypeSskip_optional_size6FpnNsymbolOopDesc_pi_v_;
+text: .text%__1cOtypeArrayKlassQarray_klass_impl6MiipnGThread__pnMklassOopDesc__;
+text: .text%__1cOtypeArrayKlassQarray_klass_impl6FnUtypeArrayKlassHandle_iipnGThread__pnMklassOopDesc__;
+text: .text%JVM_RegisterUnsafeMethods;
+text: .text%JVM_IsArrayClass;
+text: .text%JVM_GetComponentType;
+text: .text%__1cKReflectionUarray_component_type6FpnHoopDesc_pnGThread__2_;
+text: .text%__1cKReflectionbFbasic_type_arrayklass_to_mirror6FpnMklassOopDesc_pnGThread__pnHoopDesc__;
+text: .text%JVM_IsPrimitiveClass;
+text: .text%JVM_GetClassLoader;
+text: .text%JVM_DesiredAssertionStatus;
+text: .text%__1cOJavaAssertionsHenabled6Fpkci_i_;
+text: .text%__1cOJavaAssertionsLmatch_class6Fpkc_pn0AKOptionList__: javaAssertions.o;
+text: .text%__1cOJavaAssertionsNmatch_package6Fpkc_pn0AKOptionList__;
+text: .text%__1cNinstanceKlassUfind_interface_field6kMpnNsymbolOopDesc_2pnPfieldDescriptor__pnMklassOopDesc__;
+text: .text%JVM_InternString;
+text: .text%__1cLStringTableGintern6FpnHoopDesc_pnGThread__2_;
+text: .text%__1cQjava_lang_StringRas_unicode_string6FpnHoopDesc_ri_pH_;
+text: .text%__1cKSharedHeapPis_in_permanent6kMpkv_i_: genCollectedHeap.o;
+text: .text%__1cQjava_lang_StringGequals6FpnHoopDesc_pHi_i_;
+text: .text%JVM_NanoTime;
+text: .text%__1cCosNjavaTimeNanos6F_x_;
+text: .text%JVM_GetCallerClass;
+text: .text%JVM_SupportsCX8;
+text: .text%__1cNFingerprinterHdo_bool6M_v_: dump.o;
+text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: interpreterRuntime.o;
+text: .text%JVM_GetClassDeclaredFields;
+text: .text%__1cKReflectionJnew_field6FpnPfieldDescriptor_ipnGThread__pnHoopDesc__;
+text: .text%__1cKReflectionInew_type6FnMsymbolHandle_nLKlassHandle_pnGThread__nGHandle__;
+text: .text%__1cJvmSymbolsOsignature_type6FpnNsymbolOopDesc__nJBasicType__;
+text: .text%__1cXjava_lang_reflect_FieldGcreate6FpnGThread__nGHandle__;
+text: .text%__1cXjava_lang_reflect_FieldJset_clazz6FpnHoopDesc_2_v_;
+text: .text%__1cXjava_lang_reflect_FieldIset_slot6FpnHoopDesc_i_v_;
+text: .text%__1cXjava_lang_reflect_FieldIset_name6FpnHoopDesc_2_v_;
+text: .text%__1cXjava_lang_reflect_FieldIset_type6FpnHoopDesc_2_v_;
+text: .text%__1cXjava_lang_reflect_FieldNset_modifiers6FpnHoopDesc_i_v_;
+text: .text%__1cbIjava_lang_reflect_AccessibleObjectMset_override6FpnHoopDesc_C_v_;
+text: .text%__1cXjava_lang_reflect_FieldThas_signature_field6F_i_;
+text: .text%__1cXjava_lang_reflect_FieldVhas_annotations_field6F_i_;
+text: .text%__1cPfieldDescriptorLannotations6kM_pnQtypeArrayOopDesc__;
+text: .text%__1cXjava_lang_reflect_FieldPset_annotations6FpnHoopDesc_2_v_;
+text: .text%__1cMLinkResolverXresolve_invokeinterface6FrnICallInfo_nGHandle_nSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cMLinkResolverWresolve_interface_call6FrnICallInfo_nGHandle_nLKlassHandle_4nMsymbolHandle_54iipnGThread__v_;
+text: .text%__1cMLinkResolverbHlinktime_resolve_interface_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_;
+text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_43ipnGThread__v_;
+text: .text%__1cMLinkResolverbGruntime_resolve_interface_method6FrnICallInfo_nMmethodHandle_nLKlassHandle_nGHandle_4ipnGThread__v_;
+text: .text%__1cICallInfoDset6MnLKlassHandle_1nMmethodHandle_2pnGThread__v_;
+text: .text%__1cLklassItableUcompute_itable_index6FpnNmethodOopDesc__i_;
+text: .text%__1cWConstantPoolCacheEntrySset_interface_call6MnMmethodHandle_i_v_;
+text: .text%Unsafe_ObjectFieldOffset;
+text: .text%__1cRfind_field_offset6FpnI_jobject_ipnGThread__i_;
+text: .text%__1cXjava_lang_reflect_FieldFclazz6FpnHoopDesc__2_;
+text: .text%__1cXjava_lang_reflect_FieldEslot6FpnHoopDesc__i_;
+text: .text%__1cXjava_lang_reflect_FieldJmodifiers6FpnHoopDesc__i_;
+text: .text%__1cQjava_lang_StringScreate_from_symbol6FnMsymbolHandle_pnGThread__nGHandle__;
+text: .text%__1cXjava_lang_reflect_FieldNset_signature6FpnHoopDesc_2_v_;
+text: .text%JVM_IHashCode;
+text: .text%jni_GetStaticFieldID: jni.o;
+text: .text%__1cNinstanceKlassKjni_id_for6Mi_pnFJNIid__;
+text: .text%__1cNinstanceKlassPjni_id_for_impl6FnTinstanceKlassHandle_i_pnFJNIid__;
+text: .text%__1cFJNIid2t6MpnMklassOopDesc_ip0_v_;
+text: .text%jni_SetStaticObjectField: jni.o;
+text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceRefKlass.o;
+text: .text%__1cNobjArrayKlassPoop_is_objArray6kM_i_: objArrayKlass.o;
+text: .text%__1cKarrayKlassTallocate_arrayArray6MiipnGThread__pnPobjArrayOopDesc__;
+text: .text%__1cNobjArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__;
+text: .text%__1cFKlassNoop_is_symbol6kM_i_: objArrayKlass.o;
+text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: arrayKlass.o;
+text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: arrayKlass.o;
+text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: arrayKlass.o;
+text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: arrayKlass.o;
+text: .text%jni_GetStringUTFLength: jni.o;
+text: .text%__1cQjava_lang_StringLutf8_length6FpnHoopDesc__i_;
+text: .text%jni_GetStringLength: jni.o;
+text: .text%__1cQjava_lang_StringGlength6FpnHoopDesc__i_;
+text: .text%jni_GetStringUTFRegion: jni.o;
+text: .text%__1cQjava_lang_StringOas_utf8_string6FpnHoopDesc_ii_pc_;
+text: .text%JVM_FindClassFromClassLoader;
+text: .text%JVM_IsInterface;
+text: .text%JVM_GetClassDeclaredConstructors;
+text: .text%__1cNmethodOopDescOis_initializer6kM_i_;
+text: .text%__1cKReflectionPnew_constructor6FnMmethodHandle_pnGThread__pnHoopDesc__;
+text: .text%__1cNinstanceKlassQmethod_index_for6kMpnNmethodOopDesc_pnGThread__i_;
+text: .text%__1cKReflectionTget_parameter_types6FnMmethodHandle_ippnHoopDesc_pnGThread__nOobjArrayHandle__;
+text: .text%__1cPSignatureStream2t6MnMsymbolHandle_i_v_;
+text: .text%__1cPSignatureStreamEnext6M_v_;
+text: .text%__1cKReflectionTget_exception_types6FnMmethodHandle_pnGThread__nOobjArrayHandle__;
+text: .text%__1cNmethodOopDescbGresolved_checked_exceptions_impl6Fp0pnGThread__nOobjArrayHandle__;
+text: .text%__1cSconstMethodOopDescZchecked_exceptions_length6kM_i_;
+text: .text%__1cbDjava_lang_reflect_ConstructorGcreate6FpnGThread__nGHandle__;
+text: .text%__1cbDjava_lang_reflect_ConstructorJset_clazz6FpnHoopDesc_2_v_;
+text: .text%__1cbDjava_lang_reflect_ConstructorIset_slot6FpnHoopDesc_i_v_;
+text: .text%__1cbDjava_lang_reflect_ConstructorTset_parameter_types6FpnHoopDesc_2_v_;
+text: .text%__1cbDjava_lang_reflect_ConstructorTset_exception_types6FpnHoopDesc_2_v_;
+text: .text%__1cbDjava_lang_reflect_ConstructorNset_modifiers6FpnHoopDesc_i_v_;
+text: .text%__1cbDjava_lang_reflect_ConstructorThas_signature_field6F_i_;
+text: .text%__1cbDjava_lang_reflect_ConstructorVhas_annotations_field6F_i_;
+text: .text%__1cNmethodOopDescLannotations6kM_pnQtypeArrayOopDesc__;
+text: .text%__1cbDjava_lang_reflect_ConstructorPset_annotations6FpnHoopDesc_2_v_;
+text: .text%__1cbDjava_lang_reflect_ConstructorbFhas_parameter_annotations_field6F_i_;
+text: .text%__1cNmethodOopDescVparameter_annotations6kM_pnQtypeArrayOopDesc__;
+text: .text%__1cbDjava_lang_reflect_ConstructorZset_parameter_annotations6FpnHoopDesc_2_v_;
+text: .text%__1cKarrayKlassWuncached_lookup_method6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__;
+text: .text%JVM_Clone;
+text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: jvm.o;
+text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: jvm.o;
+text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: jvm.o;
+text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: jvm.o;
+text: .text%__1cRCardTableModRefBSPdirty_MemRegion6MnJMemRegion__v_;
+text: .text%JVM_GetClassAccessFlags;
+text: .text%JVM_GetClassName;
+text: .text%__1cFKlassNexternal_name6kM_pkc_;
+text: .text%__1cNsymbolOopDescWas_klass_external_name6kM_pkc_;
+text: .text%__1cLStringTableGintern6FpkcpnGThread__pnHoopDesc__;
+text: .text%JVM_GetClassModifiers;
+text: .text%jni_GetSuperclass: jni.o;
+text: .text%__1cKJNIHandlesKmake_local6FpnHoopDesc__pnI_jobject__;
+text: .text%__1cNFingerprinterIdo_array6Mii_v_: dump.o;
+text: .text%JVM_NewInstanceFromConstructor;
+text: .text%__1cKReflectionSinvoke_constructor6FpnHoopDesc_nOobjArrayHandle_pnGThread__2_;
+text: .text%__1cbDjava_lang_reflect_ConstructorFclazz6FpnHoopDesc__2_;
+text: .text%__1cbDjava_lang_reflect_ConstructorEslot6FpnHoopDesc__i_;
+text: .text%__1cbIjava_lang_reflect_AccessibleObjectIoverride6FpnHoopDesc__C_;
+text: .text%__1cbDjava_lang_reflect_ConstructorPparameter_types6FpnHoopDesc__2_;
+text: .text%__1cKReflectionGinvoke6FnTinstanceKlassHandle_nMmethodHandle_nGHandle_inOobjArrayHandle_nJBasicType_4ipnGThread__pnHoopDesc__;
+text: .text%__1cKReflectionDbox6FpnGjvalue_nJBasicType_pnGThread__pnHoopDesc__;
+text: .text%JVM_MaxMemory;
+text: .text%__1cQGenCollectedHeapMmax_capacity6kM_I_;
+text: .text%__1cQDefNewGenerationMmax_capacity6kM_I_;
+text: .text%Unsafe_AllocateMemory;
+text: .text%Unsafe_SetNativeLong;
+text: .text%__1cNSignatureInfoHdo_byte6M_v_: bytecode.o;
+text: .text%Unsafe_GetNativeByte;
+text: .text%Unsafe_FreeMemory;
+text: .text%__1cNSignatureInfoIdo_float6M_v_: bytecode.o;
+text: .text%__1cFJNIidEfind6Mi_p0_;
+text: .text%jni_NewObjectV: jni.o;
+text: .text%__1cMalloc_object6FpnH_jclass_pnGThread__pnPinstanceOopDesc__: jni.o;
+text: .text%jni_GetStringRegion: jni.o;
+text: .text%__1cQjava_lang_StringGoffset6FpnHoopDesc__i_;
+text: .text%__1cQjava_lang_StringFvalue6FpnHoopDesc__pnQtypeArrayOopDesc__;
+text: .text%jni_GetObjectField: jni.o;
+text: .text%jni_GetStringCritical: jni.o;
+text: .text%__1cJGC_lockerNlock_critical6FpnKJavaThread__v_: jni.o;
+text: .text%jni_ReleaseStringCritical: jni.o;
+text: .text%JVM_LoadLibrary;
+text: .text%JVM_FindLibraryEntry;
+text: .text%jni_GetJavaVM;
+text: .text%JVM_IsSupportedJNIVersion;
+text: .text%jni_SetIntField: jni.o;
+text: .text%jni_SetLongField: jni.o;
+text: .text%JVM_FindSignal;
+text: .text%JVM_RegisterSignal;
+text: .text%__1cCosMuser_handler6F_pv_;
+text: .text%__1cCosGsignal6Fipv_1_;
+text: .text%__1cWreset_vm_info_property6FpnGThread__v_: thread.o;
+text: .text%__1cVquicken_jni_functions6F_v_;
+text: .text%__1cQJNI_FastGetFieldbFgenerate_fast_get_boolean_field6F_pC_;
+text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_int_field06FnJBasicType__pC_;
+text: .text%__1cJAssemblerFtestb6MpnMRegisterImpl_i_v_;
+text: .text%__1cJAssemblerMemit_arith_b6MiipnMRegisterImpl_i_v_;
+text: .text%__1cYjni_GetBooleanField_addr6F_pC_;
+text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_byte_field6F_pC_;
+text: .text%__1cVjni_GetByteField_addr6F_pC_;
+text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_char_field6F_pC_;
+text: .text%__1cVjni_GetCharField_addr6F_pC_;
+text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_short_field6F_pC_;
+text: .text%__1cWjni_GetShortField_addr6F_pC_;
+text: .text%__1cQJNI_FastGetFieldbBgenerate_fast_get_int_field6F_pC_;
+text: .text%__1cUjni_GetIntField_addr6F_pC_;
+text: .text%__1cQJNI_FastGetFieldbCgenerate_fast_get_long_field6F_pC_;
+text: .text%__1cVjni_GetLongField_addr6F_pC_;
+text: .text%__1cQJNI_FastGetFieldbDgenerate_fast_get_float_field6F_pC_;
+text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_float_field06FnJBasicType__pC_;
+text: .text%__1cJAssemblerFfst_s6MnHAddress__v_;
+text: .text%__1cJAssemblerGfstp_d6Mi_v_;
+text: .text%__1cWjni_GetFloatField_addr6F_pC_;
+text: .text%__1cQJNI_FastGetFieldbEgenerate_fast_get_double_field6F_pC_;
+text: .text%__1cXjni_GetDoubleField_addr6F_pC_;
+text: .text%__1cSset_init_completed6F_v_;
+text: .text%__1cJTimeStampGupdate6M_v_;
+text: .text%__1cQSystemDictionarybAcompute_java_system_loader6FpnGThread__v_;
+text: .text%jni_NewObjectArray: jni.o;
+text: .text%__1cNobjArrayKlassKinitialize6MpnGThread__v_;
+text: .text%__1cNobjArrayKlassIallocate6MipnGThread__pnPobjArrayOopDesc__;
+text: .text%__1cNCollectedHeapOarray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: objArrayKlass.o;
+text: .text%__1cNCollectedHeapYcommon_mem_allocate_init6FIipnGThread__pnIHeapWord__: objArrayKlass.o;
+text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: objArrayKlass.o;
+text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: objArrayKlass.o;
+text: .text%jni_SetObjectArrayElement: jni.o;
+text: .text%jni_GetObjectArrayElement: jni.o;
+text: .text%__1cSInterpreterRuntimebAfrequency_counter_overflow6FpnKJavaThread_pC_x_;
+text: .text%__1cQSimpleCompPolicyXmethod_invocation_event6MnMmethodHandle_pnGThread__v_;
+text: .text%__1cRCompilationPolicybIreset_counter_for_invocation_event6MnMmethodHandle__v_;
+text: .text%__1cRInvocationCounterJset_carry6M_v_;
+text: .text%__1cNCompileBrokerOcompile_method6FnMmethodHandle_i1ipkcpnGThread__pnHnmethod__;
+text: .text%__1cQSimpleCompPolicyRcompilation_level6MnMmethodHandle_i_i_;
+text: .text%__1cNCompileBrokerTcompile_method_base6FnMmethodHandle_ii1ipkcpnGThread__pnHnmethod__;
+text: .text%__1cNobjArrayKlassKcopy_array6MpnMarrayOopDesc_i2iipnGThread__v_;
+text: .text%__1cLJvmtiExportQenter_live_phase6F_v_;
+text: .text%__1cLJvmtiExportTpost_vm_initialized6F_v_;
+text: .text%__1cUJvmtiEventControllerHvm_init6F_v_;
+text: .text%__1cHMonitorKnotify_all6M_i_;
+text: .text%__1cFChunkbDstart_chunk_pool_cleaner_task6F_v_;
+text: .text%__1cMPeriodicTask2t6MI_v_;
+text: .text%__1cMPeriodicTaskGenroll6M_v_;
+text: .text%__1cURecompilationMonitorbGstart_recompilation_monitor_task6F_v_;
+text: .text%__1cCosLsignal_init6F_v_;
+text: .text%__1cJJavaCallsMcall_special6FpnJJavaValue_nGHandle_nLKlassHandle_nMsymbolHandle_53pnGThread__v_;
+text: .text%__1cCosOsignal_init_pd6F_v_;
+text: .text%__1cQjava_lang_ThreadKset_daemon6FpnHoopDesc__v_;
+text: .text%__1cICompiler2t6M_v_;
+text: .text%__1cNCompileBrokerQcompilation_init6FpnQAbstractCompiler__v_;
+text: .text%__1cNCompileBrokerVinit_compiler_threads6Fi_v_;
+text: .text%__1cICompilerOneeds_adapters6M_i_: c1_Compiler.o;
+text: .text%__1cQCompilerCounters2t6MpkcipnGThread__v_;
+text: .text%__1cNCompileBrokerUmake_compiler_thread6FpkcpnMCompileQdDueue_pnQCompilerCounters_pnGThread__pnOCompilerThread__;
+text: .text%__1cOCompilerThread2t6MpnMCompileQdDueue_pnQCompilerCounters__v_;
+text: .text%__1cOCompilerThreadbCis_hidden_from_external_view6kM_i_: thread.o;
+text: .text%__1cCosFyield6F_v_;
+text: .text%__1cCosFsleep6FpnGThread_xi_i_;
+text: .text%__1cGThreadRis_Watcher_thread6kM_i_: thread.o;
+text: .text%__1cTsignal_thread_entry6FpnKJavaThread_pnGThread__v_: os.o;
+text: .text%__1cCosLsignal_wait6F_i_;
+text: .text%__1cVcheck_pending_signals6Fi_i_: os_solaris.o;
+text: .text%__1cVcompiler_thread_entry6FpnKJavaThread_pnGThread__v_: thread.o;
+text: .text%__1cNCompileBrokerUcompiler_thread_loop6F_v_;
+text: .text%__1cICompilerKinitialize6M_v_;
+text: .text%__1cMCompileQdDueueDget6M_pnLCompileTask__;
+text: .text%__1cKManagementKinitialize6FpnGThread__v_;
+text: .text%__1cRLowMemoryDetectorKinitialize6F_v_;
+text: .text%__1cXLowMemoryDetectorThreadbCis_hidden_from_external_view6kM_i_: lowMemoryDetector.o;
+text: .text%__1cKJavaThreadOis_Java_thread6kM_i_: lowMemoryDetector.o;
+text: .text%__1cLStatSamplerGengage6F_v_;
+text: .text%__1cLStatSamplerKinitialize6F_v_;
+text: .text%__1cLStatSamplerUcreate_misc_perfdata6F_v_;
+text: .text%__1cCosRelapsed_frequency6F_x_;
+text: .text%__1cLStatSamplerbMcreate_system_property_instrumentation6FpnGThread__v_;
+text: .text%__1cLStatSamplerTget_system_property6FpkcpnGThread__2_;
+text: .text%__1cJJavaCallsLcall_static6FpnJJavaValue_nLKlassHandle_nMsymbolHandle_4nGHandle_pnGThread__v_;
+text: .text%__1cLStatSamplerXcreate_sampled_perfdata6F_v_;
+text: .text%__1cPPerfDataManagerTcreate_long_counter6FnJCounterNS_pkcnIPerfDataFUnits_pnUPerfLongSampleHelper_pnGThread__pnPPerfLongCounter__;
+text: .text%__1cSHighResTimeSamplerLtake_sample6M_x_: statSampler.o;
+text: .text%__1cPPerfDataManagerHsampled6F_pnMPerfDataList__;
+text: .text%__1cMPerfDataListFclone6M_p0_;
+text: .text%__1cMPerfDataList2t6Mp0_v_;
+text: .text%__1cUGenericGrowableArrayNraw_appendAll6Mpk0_v_;
+text: .text%__1cNWatcherThreadFstart6F_v_;
+text: .text%__1cNWatcherThread2t6M_v_;
+text: .text%__1cJTimeStampMmilliseconds6kM_x_;
+text: .text%__1cKManagementWrecord_vm_startup_time6Fxx_v_;
+text: .text%__1cORuntimeServiceYrecord_application_start6F_v_;
+text: .text%__1cNWatcherThreadDrun6M_v_;
+text: .text%__1cMPeriodicTaskMtime_to_wait6F_I_: thread.o;
+text: .text%__1cNWatcherThreadRis_Watcher_thread6kM_i_: thread.o;
+text: .text%__1cIos_sleep6Fxi_i_: os_solaris.o;
+text: .text%__1cNgetTimeMillis6F_x_;
+text: .text%__1cQjava_lang_StringOchar_converter6FnGHandle_HHpnGThread__1_;
+text: .text%JVM_FindLoadedClass;
+text: .text%__1cQSystemDictionarybCfind_instance_or_array_klass6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__;
+text: .text%__1cQSystemDictionaryEfind6FnMsymbolHandle_nGHandle_2pnGThread__pnMklassOopDesc__;
+text: .text%__1cSInterpreterRuntimeLmonitorexit6FpnKJavaThread_pnPBasicObjectLock__v_;
+text: .text%__1cSObjectSynchronizerJslow_exit6FpnHoopDesc_pnJBasicLock_pnGThread__v_;
+text: .text%jni_CallStaticObjectMethod: jni.o;
+text: .text%jni_NewByteArray: jni.o;
+text: .text%jni_SetByteArrayRegion: jni.o;
+text: .text%__1cSInterpreterRuntimeMmonitorenter6FpnKJavaThread_pnPBasicObjectLock__v_;
+text: .text%__1cNObjectMonitorFenter6MpnGThread__v_;
+text: .text%jni_NewObject: jni.o;
+text: .text%__1cGThreadMis_VM_thread6kM_i_: lowMemoryDetector.o;
+text: .text%__1cRLowMemoryDetectorbGlow_memory_detector_thread_entry6FpnKJavaThread_pnGThread__v_;
+text: .text%__1cRLowMemoryDetectorUhas_pending_requests6F_i_;
+text: .text%__1cVjava_lang_ClassLoaderGparent6FpnHoopDesc__2_;
+text: .text%__1cKExceptionsK_throw_msg6FpnGThread_pkcipnNsymbolOopDesc_4_v_;
+text: .text%__1cKExceptionsK_throw_msg6FpnGThread_pkcinMsymbolHandle_4nGHandle_6_v_;
+text: .text%__1cKExceptionsRspecial_exception6FpnGThread_pkcinMsymbolHandle_4_i_;
+text: .text%__1cKExceptionsNnew_exception6FpnGThread_nMsymbolHandle_pkcnGHandle_6_6_;
+text: .text%__1cKExceptionsNnew_exception6FpnGThread_nMsymbolHandle_3pnRJavaCallArguments_nGHandle_6_6_;
+text: .text%JVM_FillInStackTrace;
+text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle__v_;
+text: .text%__1cIUniverseWis_out_of_memory_error6FnGHandle__i_;
+text: .text%__1cVPreserveExceptionMark2t6MrpnGThread__v_;
+text: .text%__1cKJavaThreadGactive6F_p0_;
+text: .text%__1cTjava_lang_ThrowableTfill_in_stack_trace6FnGHandle_pnGThread__v_;
+text: .text%__1cTjava_lang_ThrowableNset_backtrace6FpnHoopDesc_2_v_;
+text: .text%__1cTjava_lang_ThrowableQclear_stacktrace6FpnHoopDesc__v_;
+text: .text%__1cVPreserveExceptionMark2T6M_v_;
+text: .text%__1cKExceptionsG_throw6FpnGThread_pkcinGHandle__v_;
+text: .text%__1cKExceptionsRspecial_exception6FpnGThread_pkcinGHandle__i_;
+text: .text%__1cGThreadVset_pending_exception6MpnHoopDesc_pkci_v_;
+text: .text%__1cGEventsDlog6FpkcE_v_: exceptions.o;
+text: .text%__1cSInterpreterRuntimeXthrow_pending_exception6FpnKJavaThread__v_;
+text: .text%__1cNSharedRuntimebKexception_handler_for_return_address6FpC_1_;
+text: .text%__1cNSharedRuntimebOraw_exception_handler_for_return_address6FpC_1_;
+text: .text%__1cSInterpreterRuntimebFexception_handler_for_exception6FpnKJavaThread_pnHoopDesc__pC_;
+text: .text%__1cNmethodOopDescbEfast_exception_handler_bci_for6MnLKlassHandle_ipnGThread__i_;
+text: .text%__1cKJavaThreadNreguard_stack6MpC_i_;
+text: .text%__1cSInterpreterRuntimePset_bcp_and_mdp6FpCpnKJavaThread__v_;
+text: .text%__1cFframeZinterpreter_frame_set_bcp6MpC_v_;
+text: .text%__1cFframeZinterpreter_frame_set_bcx6Mi_v_;
+text: .text%__1cNCompileBrokerYcheck_compilation_result6FnMmethodHandle_iippnHnmethod__i_;
+text: .text%__1cNCompileBrokerXcompilation_is_in_queue6FnMmethodHandle_i_i_;
+text: .text%__1cNCompileBrokerZcompilation_is_prohibited6FnMmethodHandle_i_i_;
+text: .text%__1cNCompileBrokerTis_not_compile_only6FnMmethodHandle__i_;
+text: .text%__1cOCompilerOracleOshould_exclude6FnMmethodHandle__i_;
+text: .text%__1cNCompileBrokerRassign_compile_id6FnMmethodHandle_i_I_;
+text: .text%__1cNCompileBrokerTis_compile_blocking6FnMmethodHandle_i_i_;
+text: .text%__1cNCompileBrokerTcreate_compile_task6FpnMCompileQdDueue_inMmethodHandle_i3ipkcii_pnLCompileTask__;
+text: .text%__1cNCompileBrokerNallocate_task6F_pnLCompileTask__;
+text: .text%__1cLCompileTaskKinitialize6MinMmethodHandle_i1ipkcii_v_;
+text: .text%__1cMCompileQdDueueDadd6MpnLCompileTask__v_;
+text: .text%__1cNCompileBrokerTwait_for_completion6FpnLCompileTask__pnHnmethod__;
+text: .text%__1cCosPhint_no_preempt6F_v_;
+text: .text%__1cSCompileTaskWrapper2t6MpnLCompileTask__v_;
+text: .text%__1cNCompileBrokerZinvoke_compiler_on_method6FpnLCompileTask__v_;
+text: .text%__1cNCompileBrokerQset_last_compile6FpnOCompilerThread_nMmethodHandle_ii_v_;
+text: .text%__1cNCompileBrokerVpush_jni_handle_block6F_v_;
+text: .text%__1cNCompileBrokerOcheck_break_at6FnMmethodHandle_iii_i_;
+text: .text%__1cOCompilerOraclePshould_break_at6FnMmethodHandle__i_;
+text: .text%__1cFciEnv2t6MpnHJNIEnv__iii_v_;
+text: .text%__1cPciObjectFactory2t6MpnFArena_i_v_;
+text: .text%__1cPciObjectFactoryTinit_shared_objects6M_v_;
+text: .text%__1cUGenericGrowableArray2t6MpnFArena_iipnEGrET__v_;
+text: .text%__1cIciSymbol2t6MnMsymbolHandle__v_;
+text: .text%__1cIciObject2t6MnGHandle__v_;
+text: .text%__1cPciObjectFactoryEfind6MpnHoopDesc_pnNGrowableArray4CpnIciObject____i_;
+text: .text%__1cPciObjectFactoryLis_found_at6MipnHoopDesc_pnNGrowableArray4CpnIciObject____i_;
+text: .text%__1cPciObjectFactoryNinit_ident_of6MpnIciObject__v_;
+text: .text%__1cIciObjectJset_ident6MI_v_;
+text: .text%__1cPciObjectFactoryGinsert6MipnIciObject_pnNGrowableArray4C2___v_;
+text: .text%__1cGciType2t6MnJBasicType__v_;
+text: .text%__1cIciObject2t6M_v_;
+text: .text%__1cPciObjectFactoryDget6MpnHoopDesc__pnIciObject__;
+text: .text%__1cPciObjectFactoryNfind_non_perm6MpnHoopDesc__rpn0ANNonPermObject__;
+text: .text%__1cFKlassNoop_is_symbol6kM_i_: klassKlass.o;
+text: .text%__1cPciObjectFactoryRcreate_new_object6MpnHoopDesc__pnIciObject__;
+text: .text%__1cFKlassPoop_is_instance6kM_i_: methodKlass.o;
+text: .text%__1cFKlassPoop_is_objArray6kM_i_: methodKlass.o;
+text: .text%__1cFKlassQoop_is_typeArray6kM_i_: methodKlass.o;
+text: .text%__1cIciSymbolEmake6Fpkc_p0_;
+text: .text%__1cFciEnvIis_in_vm6F_i_;
+text: .text%__1cIciSymbolJmake_impl6Fpkc_p0_;
+text: .text%__1cHciKlass2t6MnLKlassHandle_pnIciSymbol__v_;
+text: .text%__1cGciType2t6MnLKlassHandle__v_;
+text: .text%__1cFKlassMoop_is_array6kM_i_: methodKlass.o;
+text: .text%__1cFKlassPoop_is_instance6kM_i_: symbolKlass.o;
+text: .text%__1cFKlassPoop_is_objArray6kM_i_: symbolKlass.o;
+text: .text%__1cFKlassQoop_is_typeArray6kM_i_: symbolKlass.o;
+text: .text%__1cFKlassNoop_is_method6kM_i_: symbolKlass.o;
+text: .text%__1cFKlassMoop_is_array6kM_i_: symbolKlass.o;
+text: .text%__1cFKlassPoop_is_instance6kM_i_: klassKlass.o;
+text: .text%__1cFKlassPoop_is_objArray6kM_i_: klassKlass.o;
+text: .text%__1cFKlassQoop_is_typeArray6kM_i_: klassKlass.o;
+text: .text%__1cFKlassNoop_is_method6kM_i_: klassKlass.o;
+text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: klassKlass.o;
+text: .text%__1cFKlassVoop_is_typeArrayKlass6kM_i_: klassKlass.o;
+text: .text%__1cFKlassUoop_is_instanceKlass6kM_i_: klassKlass.o;
+text: .text%__1cFKlassMoop_is_array6kM_i_: klassKlass.o;
+text: .text%__1cFKlassPoop_is_instance6kM_i_: instanceKlassKlass.o;
+text: .text%__1cFKlassPoop_is_objArray6kM_i_: instanceKlassKlass.o;
+text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceKlassKlass.o;
+text: .text%__1cFKlassNoop_is_method6kM_i_: instanceKlassKlass.o;
+text: .text%__1cFKlassNoop_is_symbol6kM_i_: instanceKlassKlass.o;
+text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: instanceKlassKlass.o;
+text: .text%__1cFKlassVoop_is_typeArrayKlass6kM_i_: instanceKlassKlass.o;
+text: .text%__1cSinstanceKlassKlassUoop_is_instanceKlass6kM_i_: instanceKlassKlass.o;
+text: .text%__1cFKlassMoop_is_array6kM_i_: instanceKlassKlass.o;
+text: .text%__1cFKlassPoop_is_instance6kM_i_: typeArrayKlassKlass.o;
+text: .text%__1cFKlassPoop_is_objArray6kM_i_: typeArrayKlassKlass.o;
+text: .text%__1cFKlassQoop_is_typeArray6kM_i_: typeArrayKlassKlass.o;
+text: .text%__1cFKlassNoop_is_method6kM_i_: typeArrayKlassKlass.o;
+text: .text%__1cFKlassNoop_is_symbol6kM_i_: typeArrayKlassKlass.o;
+text: .text%__1cFKlassUoop_is_objArrayKlass6kM_i_: typeArrayKlassKlass.o;
+text: .text%__1cTtypeArrayKlassKlassVoop_is_typeArrayKlass6kM_i_: typeArrayKlassKlass.o;
+text: .text%__1cFKlassMoop_is_array6kM_i_: typeArrayKlassKlass.o;
+text: .text%__1cFKlassPoop_is_instance6kM_i_: objArrayKlassKlass.o;
+text: .text%__1cFKlassPoop_is_objArray6kM_i_: objArrayKlassKlass.o;
+text: .text%__1cFKlassQoop_is_typeArray6kM_i_: objArrayKlassKlass.o;
+text: .text%__1cFKlassNoop_is_method6kM_i_: objArrayKlassKlass.o;
+text: .text%__1cFKlassNoop_is_symbol6kM_i_: objArrayKlassKlass.o;
+text: .text%__1cSobjArrayKlassKlassUoop_is_objArrayKlass6kM_i_: objArrayKlassKlass.o;
+text: .text%__1cFKlassMoop_is_array6kM_i_: objArrayKlassKlass.o;
+text: .text%__1cPciInstanceKlass2t6MnLKlassHandle__v_;
+text: .text%__1cHciKlass2t6MnLKlassHandle__v_;
+text: .text%__1cPciInstanceKlassFsuper6M_p0_;
+text: .text%__1cPciInstanceKlassTis_java_lang_Object6M_i_;
+text: .text%__1cIciObjectGequals6Mp0_i_;
+text: .text%__1cPciInstanceKlassLjava_mirror6M_pnKciInstance__;
+text: .text%__1cHciKlassLjava_mirror6M_pnKciInstance__;
+text: .text%__1cFKlassNoop_is_method6kM_i_: instanceKlass.o;
+text: .text%__1cFKlassRoop_is_methodData6kM_i_: instanceKlass.o;
+text: .text%__1cUciInstanceKlassKlassEmake6F_p0_;
+text: .text%__1cHciKlass2t6MpnIciSymbol_p0_v_;
+text: .text%__1cGciType2t6MpnHciKlass__v_;
+text: .text%__1cIciObject2t6MpnHciKlass__v_;
+text: .text%__1cIciObjectUis_array_klass_klass6M_i_: ciObjectFactory.o;
+text: .text%__1cPciObjArrayKlass2t6MpnIciSymbol_pnHciKlass_i_v_;
+text: .text%__1cUciObjArrayKlassKlassEmake6F_p0_;
+text: .text%__1cMciArrayKlass2t6MpnIciSymbol_ipnHciKlass__v_;
+text: .text%__1cRciArrayKlassKlassUis_array_klass_klass6M_i_: ciObjectFactory.o;
+text: .text%__1cQciTypeArrayKlass2t6MnLKlassHandle__v_;
+text: .text%__1cMciArrayKlass2t6MnLKlassHandle__v_;
+text: .text%__1cFciEnvWget_method_from_handle6MpnI_jobject__pnIciMethod__;
+text: .text%__1cFKlassNoop_is_symbol6kM_i_: methodKlass.o;
+text: .text%__1cIciMethod2t6MnMmethodHandle__v_;
+text: .text%__1cLciSignature2t6MpnHciKlass_pnIciSymbol__v_;
+text: .text%__1cPSignatureStreamJis_object6kM_i_;
+text: .text%__1cGciTypeEmake6FnJBasicType__p0_;
+text: .text%__1cJTraceTime2t6MpkcpnMelapsedTimer_iipnMoutputStream__v_;
+text: .text%__1cICompilerOcompile_method6MpnFciEnv_pnIciMethod_i_v_;
+text: .text%__1cLCompilation2t6MpnQAbstractCompiler_pnFciEnv_pnIciMethod_ipnRC1_MacroAssembler__v_;
+text: .text%__1cTExceptionRangeTable2t6Mi_v_;
+text: .text%__1cWImplicitExceptionTableIset_size6MI_v_;
+text: .text%__1cLCompilationOcompile_method6M_v_;
+text: .text%__1cLCompilationKinitialize6M_v_;
+text: .text%__1cLCompilationEcode6kM_pnKCodeBuffer__;
+text: .text%__1cYDebugInformationRecorder2t6MpnLOopRecorder__v_;
+text: .text%__1cUDebugInfoWriteStream2t6MpnYDebugInformationRecorder_i_v_;
+text: .text%__1cLCompilationTdebug_info_recorder6kM_pnYDebugInformationRecorder__;
+text: .text%__1cLCompilationbBis_optimized_library_method6kM_i_;
+text: .text%__1cLCompilationTcompile_java_method6MpnLCodeOffsets__i_;
+text: .text%__1cLCompilationTinitialize_oop_maps6M_v_;
+text: .text%__1cIciMethodMall_oop_maps6M_pnKciLocalMap__;
+text: .text%__1cSciGenerateLocalMap2t6MpnFArena_nMmethodHandle__v_;
+text: .text%__1cOGenerateOopMap2t6MnMmethodHandle__v_;
+text: .text%__1cSciGenerateLocalMapWfind_jsr_return_points6MnMmethodHandle__v_;
+text: .text%__1cRRawBytecodeStream2t6MnMmethodHandle__v_;
+text: .text%__1cRRawBytecodeStreamMset_interval6Mii_v_;
+text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: ciOopMap.o;
+text: .text%__1cOGenerateOopMapLcompute_map6MpnGThread__v_;
+text: .text%__1cIRetTableRcompute_ret_table6MnMmethodHandle__v_;
+text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: generateOopMap.o;
+text: .text%__1cOGenerateOopMapbImark_bbheaders_and_count_gc_points6M_v_;
+text: .text%__1cOGenerateOopMapNinitialize_bb6M_v_;
+text: .text%__1cOGenerateOopMapLbb_mark_fct6Fp0ipi_v_;
+text: .text%__1cOGenerateOopMapOset_bbmark_bit6Mi_v_;
+text: .text%__1cOGenerateOopMapPjump_targets_do6MpnOBytecodeStream_pFp0ipi_v4_i_;
+text: .text%__1cSciGenerateLocalMapRpossible_gc_point6MpnOBytecodeStream__i_;
+text: .text%__1cSciGenerateLocalMapUbytecode_is_gc_point6FnJBytecodesECode_ii_i_;
+text: .text%__1cOGenerateOopMapRdo_interpretation6M_v_;
+text: .text%__1cOGenerateOopMapRinit_basic_blocks6M_v_;
+text: .text%__1cOGenerateOopMapKinit_state6M_v_;
+text: .text%__1cOGenerateOopMapTmark_reachable_code6M_v_;
+text: .text%__1cOGenerateOopMapUreachable_basicblock6Fp0ipi_v_;
+text: .text%__1cOGenerateOopMapSget_basic_block_at6kMi_pnKBasicBlock__;
+text: .text%__1cOGenerateOopMapbAget_basic_block_containing6kMi_pnKBasicBlock__;
+text: .text%__1cOGenerateOopMapYsetup_method_entry_state6M_v_;
+text: .text%__1cOGenerateOopMapbAmake_context_uninitialized6M_v_;
+text: .text%__1cOGenerateOopMapTmethodsig_to_effect6MpnNsymbolOopDesc_ipnNCellTypeState__i_;
+text: .text%__1cOGenerateOopMapPinitialize_vars6M_v_;
+text: .text%__1cOGenerateOopMapTmerge_state_into_bb6MpnKBasicBlock__v_;
+text: .text%__1cOGenerateOopMapKcopy_state6MpnNCellTypeState_2_v_;
+text: .text%__1cOGenerateOopMapKinterp_all6M_v_;
+text: .text%__1cOGenerateOopMapJinterp_bb6MpnKBasicBlock__v_;
+text: .text%__1cOGenerateOopMapNrestore_state6MpnKBasicBlock__v_;
+text: .text%__1cOGenerateOopMapQnext_bb_start_pc6MpnKBasicBlock__i_;
+text: .text%__1cOGenerateOopMapHinterp16MpnOBytecodeStream__v_;
+text: .text%__1cOGenerateOopMapGppload6MpnNCellTypeState_i_v_;
+text: .text%__1cOGenerateOopMapHget_var6Mi_nNCellTypeState__;
+text: .text%__1cOGenerateOopMapEpush6MnNCellTypeState__v_;
+text: .text%__1cOGenerateOopMapIdo_field6Miiii_v_;
+text: .text%__1cOGenerateOopMapRsigchar_to_effect6McipnNCellTypeState__2_;
+text: .text%__1cOGenerateOopMapCpp6MpnNCellTypeState_2_v_;
+text: .text%__1cOGenerateOopMapEppop6MpnNCellTypeState__v_;
+text: .text%__1cOGenerateOopMapFppop16MnNCellTypeState__v_;
+text: .text%__1cOGenerateOopMapDpop6M_nNCellTypeState__;
+text: .text%__1cOGenerateOopMapKcheck_type6MnNCellTypeState_1_v_;
+text: .text%__1cOGenerateOopMapFppush6MpnNCellTypeState__v_;
+text: .text%__1cOGenerateOopMapGppush16MnNCellTypeState__v_;
+text: .text%__1cOGenerateOopMapHppstore6MpnNCellTypeState_i_v_;
+text: .text%__1cOGenerateOopMapHset_var6MinNCellTypeState__v_;
+text: .text%__1cOGenerateOopMapLmerge_state6Fp0ipi_v_;
+text: .text%__1cOGenerateOopMapJdo_astore6Mi_v_;
+text: .text%__1cOGenerateOopMapTmerge_state_vectors6MpnNCellTypeState_2_i_;
+text: .text%__1cNCellTypeStateFmerge6kM0i_0_;
+text: .text%__1cOGenerateOopMapIcopy_cts6MpnNCellTypeState_2_i_;
+text: .text%__1cOGenerateOopMapXdo_return_monitor_check6M_v_;
+text: .text%__1cOGenerateOopMapYrewrite_refval_conflicts6M_v_;
+text: .text%__1cSciGenerateLocalMapOreport_results6kM_i_: ciOopMap.o;
+text: .text%__1cOGenerateOopMapNreport_result6M_v_;
+text: .text%__1cSciGenerateLocalMapUfill_stackmap_prolog6Mi_v_;
+text: .text%__1cSciGenerateLocalMapZfill_stackmap_for_opcodes6MpnOBytecodeStream_pnNCellTypeState_4i_v_;
+text: .text%__1cKciLocalMap2t6MpnFArena_iii_v_;
+text: .text%__1cKciLocalMapRset_bci_for_index6Mii_v_;
+text: .text%__1cSciGenerateLocalMapUfill_stackmap_epilog6M_v_: ciOopMap.o;
+text: .text%__1cSciGenerateLocalMapOfill_init_vars6MpnNGrowableArray4Ci___v_;
+text: .text%__1cKciLocalMapSset_nof_initialize6Mi_v_;
+text: .text%__1cLCompilationJbuild_hir6M_v_;
+text: .text%__1cCIR2t6MpnLCompilation_pnIciMethod_i_v_;
+text: .text%__1cJValueTypeKinitialize6F_v_;
+text: .text%__1cMciNullObjectEmake6F_p0_;
+text: .text%__1cMGraphBuilderKinitialize6F_v_;
+text: .text%__1cHIRScope2t6MpnLCompilation_p0ipnIciMethod_ii_v_;
+text: .text%__1cOLocalSlotArray2t6MkikpnJLocalSlot__v_: c1_IR.o;
+text: .text%__1cGBitMap2t6MI_v_;
+text: .text%__1cGBitMapGresize6MI_v_;
+text: .text%__1cNWordSizeArray2t6Mki1_v_: c1_IR.o;
+text: .text%__1cJXHandlers2t6MpnIciMethod__v_;
+text: .text%__1cIciMethodJload_code6M_v_;
+text: .text%__1cIciMethodVhas_balanced_monitors6M_i_;
+text: .text%__1cHIRScopeLbuild_graph6MpnLCompilation_i_pnKBlockBegin__;
+text: .text%__1cQBlockListBuilder2t6MpnHIRScope_ii_v_;
+text: .text%__1cPBlockBeginArray2t6MkikpnKBlockBegin__v_: c1_GraphBuilder.o;
+text: .text%__1cQBlockListBuilderLset_leaders6M_v_;
+text: .text%__1cQciBytecodeStream2t6MpnIciMethod__v_;
+text: .text%__1cQciBytecodeStreamMset_interval6Mii_v_;
+text: .text%__1cQBlockListBuilderMnew_block_at6MinKBlockBeginEFlag__p1_;
+text: .text%__1cQBlockListBuilderUset_xhandler_entries6M_v_;
+text: .text%__1cKValueStack2t6MpnHIRScope_ii_v_;
+text: .text%__1cKValueArray2t6MkikpnLInstruction__v_: c1_ValueStack.o;
+text: .text%__1cJLocalSlot2t6M_v_;
+text: .text%__1cJLocalSlotIfor_type6MpnJValueType_ii_pnFLocal__: c1_IR.o;
+text: .text%__1cKObjectTypeDtag6kM_nIValueTag__: c1_ValueType.o;
+text: .text%__1cMGraphBuilder2t6MpnLCompilation_pnHIRScope_pnJBlockList_pnKBlockBegin__v_;
+text: .text%__1cMGraphBuilderPpush_root_scope6MpnHIRScope_pnJBlockList_pnKBlockBegin__v_;
+text: .text%__1cMGraphBuilderJScopeData2t6Mp1i_v_;
+text: .text%__1cMGraphBuilderJScopeDataJset_scope6MpnHIRScope__v_;
+text: .text%__1cMGraphBuilderUpush_exception_scope6M_v_;
+text: .text%__1cOExceptionScope2t6M_v_;
+text: .text%__1cOExceptionScopeEinit6M_v_;
+text: .text%__1cIValueMap2t6M_v_;
+text: .text%__1cMGraphBuilderJScopeDataQadd_to_work_list6MpnKBlockBegin__v_;
+text: .text%__1cNResourceArrayGexpand6MIiri_v_;
+text: .text%__1cMGraphBuilderSiterate_all_blocks6Mi_v_;
+text: .text%__1cMGraphBuilderJScopeDataVremove_from_work_list6M_pnKBlockBegin__;
+text: .text%__1cMGraphBuilderJScopeDataSis_work_list_empty6kM_i_;
+text: .text%__1cMGraphBuilderOconnect_to_end6MpnKBlockBegin__pnIBlockEnd__;
+text: .text%__1cIValueMapIkill_all6M_v_;
+text: .text%__1cIValueMapRnumber_of_buckets6kM_i_;
+text: .text%__1cIValueMapJbucket_at6Mi_pnGBucket__;
+text: .text%__1cGBucketIkill_all6M_v_;
+text: .text%__1cKValueStackEcopy6M_p0_;
+text: .text%__1cGValuesIpush_all6Mpk0_v_: c1_ValueStack.o;
+text: .text%__1cMGraphBuilderbBiterate_bytecodes_for_block6Mi_pnIBlockEnd__;
+text: .text%__1cLInstructionLas_BlockEnd6M_pnIBlockEnd__: c1_GraphBuilder.o;
+text: .text%__1cMGraphBuilderJScopeDataIblock_at6Mi_pnKBlockBegin__;
+text: .text%__1cMGraphBuilderKload_local6MpnJValueType_i_v_;
+text: .text%__1cJLocalSlotIfor_type6MpnJValueType_ii_pnFLocal__: c1_GraphBuilder.o;
+text: .text%__1cMGraphBuilderGappend6MpnLInstruction__2_;
+text: .text%__1cMGraphBuilderLappend_base6MpnLInstruction__2_;
+text: .text%__1cJLoadLocalFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cNCanonicalizerMdo_LoadLocal6MpnJLoadLocal__v_;
+text: .text%__1cIValueMapEfind6MpnLInstruction__2_;
+text: .text%__1cLInstructionOas_AccessField6M_pnLAccessField__: c1_GraphBuilder.o;
+text: .text%__1cLInstructionLas_UnsafeOp6M_pnIUnsafeOp__: c1_GraphBuilder.o;
+text: .text%__1cLInstructionMas_Intrinsic6M_pnJIntrinsic__: c1_GraphBuilder.o;
+text: .text%__1cLInstructionEhash6kM_i_: c1_GraphBuilder.o;
+text: .text%__1cLInstructionNas_StateSplit6M_pnKStateSplit__: c1_GraphBuilder.o;
+text: .text%__1cLInstructionIcan_trap6kM_i_: c1_GraphBuilder.o;
+text: .text%__1cKValueStackLclear_store6Mi_v_;
+text: .text%__1cKValueStackEpush6MpnJValueType_pnLInstruction__v_: c1_GraphBuilder.o;
+text: .text%__1cMGraphBuilderMaccess_field6MnJBytecodesECode__v_;
+text: .text%__1cQciBytecodeStreamJget_field6kM_pnHciField__;
+text: .text%__1cQciBytecodeStreamPget_field_index6kM_i_;
+text: .text%__1cFciEnvSget_field_by_index6MpnPciInstanceKlass_i_pnHciField__;
+text: .text%__1cFciEnvXget_field_by_index_impl6MpnPciInstanceKlass_i_pnHciField__;
+text: .text%__1cPciInstanceKlassLfield_cache6M_pnTciConstantPoolCache__;
+text: .text%__1cHciField2t6MpnPciInstanceKlass_i_v_;
+text: .text%__1cFciEnvSget_klass_by_index6MpnPciInstanceKlass_iri_pnHciKlass__;
+text: .text%__1cFciEnvXget_klass_by_index_impl6MpnPciInstanceKlass_iri_pnHciKlass__;
+text: .text%__1cTconstantPoolOopDescSklass_at_if_loaded6FnSconstantPoolHandle_i_pnMklassOopDesc__;
+text: .text%__1cPciObjectFactorySget_unloaded_klass6MpnHciKlass_pnIciSymbol_i_2_;
+text: .text%__1cPciInstanceKlassGloader6M_pnHoopDesc__;
+text: .text%__1cPciInstanceKlassRprotection_domain6M_pnHoopDesc__;
+text: .text%__1cHciFieldPinitialize_from6MpnPfieldDescriptor__v_;
+text: .text%__1cMas_ValueType6FnJBasicType__pnJValueType__;
+text: .text%__1cHciFieldJwill_link6MpnPciInstanceKlass_nJBytecodesECode__i_;
+text: .text%__1cMLinkResolverXresolve_klass_no_update6FrnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cTconstantPoolOopDescbCklass_ref_at_if_loaded_check6FnSconstantPoolHandle_ipnGThread__pnMklassOopDesc__;
+text: .text%__1cMGraphBuilderKlock_stack6M_pnKValueStack__;
+text: .text%__1cKValueStackKcopy_locks6M_p0_;
+text: .text%__1cJLoadFieldFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o;
+text: .text%__1cNCanonicalizerMdo_LoadField6MpnJLoadField__v_;
+text: .text%__1cLAccessFieldOas_AccessField6M_p0_: c1_Instruction.o;
+text: .text%__1cJLoadFieldEhash6kM_i_: c1_Instruction.o;
+text: .text%__1cJLoadFieldEname6kM_pkc_: c1_Instruction.o;
+text: .text%__1cIValueMapNlookup_bucket6Mi_pnGBucket__;
+text: .text%__1cGBucketEfind6MpnLInstruction_i_2_;
+text: .text%__1cGBucketGappend6MpnLInstruction_i_v_;
+text: .text%__1cLInstructionNas_StateSplit6M_pnKStateSplit__: c1_Instruction.o;
+text: .text%__1cLInstructionLas_BlockEnd6M_pnIBlockEnd__: c1_Instruction.o;
+text: .text%__1cLAccessFieldIcan_trap6kM_i_: c1_Instruction.o;
+text: .text%__1cOExceptionScopeEcopy6M_p0_;
+text: .text%__1cOExceptionScopeGlength6kM_i_;
+text: .text%__1cHIntTypeDtag6kM_nIValueTag__: c1_ValueType.o;
+text: .text%__1cMGraphBuilderLstore_local6MpnJValueType_i_v_;
+text: .text%__1cKValueStackDpop6MpnJValueType__pnLInstruction__: c1_GraphBuilder.o;
+text: .text%__1cMGraphBuilderLstore_local6MpnKValueStack_pnLInstruction_pnJValueType_ii_v_;
+text: .text%__1cJValueTypeNas_ObjectType6M_pnKObjectType__: c1_ValueType.o;
+text: .text%__1cKStoreLocalFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cNCanonicalizerNdo_StoreLocal6MpnKStoreLocal__v_;
+text: .text%__1cKValueStackLstore_local6MpnKStoreLocal_i_v_;
+text: .text%__1cKValueStackQpin_stack_locals6Mi_v_;
+text: .text%__1cKValueStackNpin_stack_all6MnLInstructionJPinReason__v_;
+text: .text%__1cHIntTypeEsize6kM_i_: c1_ValueType.o;
+text: .text%__1cMGraphBuilderHif_zero6MpnJValueType_nLInstructionJCondition__v_;
+text: .text%__1cIConstantFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o;
+text: .text%__1cNCanonicalizerLdo_Constant6MpnIConstant__v_;
+text: .text%__1cLInstructionOas_AccessField6M_pnLAccessField__: c1_Instruction.o;
+text: .text%__1cLInstructionLas_UnsafeOp6M_pnIUnsafeOp__: c1_Instruction.o;
+text: .text%__1cLInstructionMas_Intrinsic6M_pnJIntrinsic__: c1_Instruction.o;
+text: .text%__1cIConstantEhash6kM_i_;
+text: .text%__1cHIntTypeDtag6kM_nIValueTag__: c1_Canonicalizer.o;
+text: .text%__1cLIntConstantOas_IntConstant6M_p0_: c1_Canonicalizer.o;
+text: .text%__1cIConstantEname6kM_pkc_: c1_Instruction.o;
+text: .text%__1cIConstantIcan_trap6kM_i_: c1_Instruction.o;
+text: .text%__1cMGraphBuilderHif_node6MpnLInstruction_n0BJCondition_2pnKValueStack__v_;
+text: .text%__1cCIf2t6MpnLInstruction_n0BJCondition_i2pnKBlockBegin_5pnKValueStack_i_v_: c1_GraphBuilder.o;
+text: .text%__1cCIfFvisit6MpnSInstructionVisitor__v_: c1_Canonicalizer.o;
+text: .text%__1cNCanonicalizerFdo_If6MpnCIf__v_;
+text: .text%__1cJValueTypeLis_constant6kM_i_: c1_ValueType.o;
+text: .text%__1cLInstructionMas_CompareOp6M_pnJCompareOp__: c1_Instruction.o;
+text: .text%__1cLInstructionNas_InstanceOf6M_pnKInstanceOf__: c1_Instruction.o;
+text: .text%__1cLInstructionOas_AccessField6M_pnLAccessField__: c1_Canonicalizer.o;
+text: .text%__1cLInstructionLas_UnsafeOp6M_pnIUnsafeOp__: c1_Canonicalizer.o;
+text: .text%__1cLInstructionMas_Intrinsic6M_pnJIntrinsic__: c1_Canonicalizer.o;
+text: .text%__1cLInstructionEhash6kM_i_: c1_Canonicalizer.o;
+text: .text%__1cKStateSplitNas_StateSplit6M_p0_: c1_Canonicalizer.o;
+text: .text%__1cIBlockEndLas_BlockEnd6M_p0_: c1_Canonicalizer.o;
+text: .text%__1cLInstructionIcan_trap6kM_i_: c1_Canonicalizer.o;
+text: .text%__1cLInstructionJas_Return6M_pnGReturn__: c1_Canonicalizer.o;
+text: .text%__1cLInstructionIas_Throw6M_pnFThrow__: c1_Canonicalizer.o;
+text: .text%__1cKBlockBeginItry_join6MpnKValueStack__i_;
+text: .text%__1cKValueStack2t6Mp0_v_;
+text: .text%__1cKValueStackEinit6Mp0_v_;
+text: .text%__1cMGraphBuilderNmethod_return6MpnLInstruction__v_;
+text: .text%__1cGReturnFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cNCanonicalizerJdo_Return6MpnGReturn__v_;
+text: .text%__1cKStateSplitNas_StateSplit6M_p0_: c1_GraphBuilder.o;
+text: .text%__1cIBlockEndLas_BlockEnd6M_p0_: c1_GraphBuilder.o;
+text: .text%__1cGReturnJas_Return6M_p0_: c1_GraphBuilder.o;
+text: .text%__1cKValueStackbAeliminate_all_scope_stores6Mi_v_;
+text: .text%__1cKValueStackQeliminate_stores6Mi_v_;
+text: .text%__1cKValueStackMcaller_state6kM_p0_;
+text: .text%__1cFciEnvWget_klass_by_name_impl6MpnHciKlass_pnIciSymbol_i_2_;
+text: .text%__1cQSystemDictionarybOfind_constrained_instance_or_array_klass6FnMsymbolHandle_nGHandle_pnGThread__pnMklassOopDesc__;
+text: .text%__1cHciKlassGloader6M_pnHoopDesc__: ciTypeArrayKlass.o;
+text: .text%__1cFciEnvZcheck_klass_accessibility6MpnHciKlass_pnMklassOopDesc__i_;
+text: .text%__1cIciObjectMis_obj_array6M_i_: ciInstanceKlass.o;
+text: .text%__1cPciInstanceKlassRis_instance_klass6M_i_: ciInstanceKlass.o;
+text: .text%__1cKObjectTypeNas_ObjectType6M_p0_: c1_ValueType.o;
+text: .text%__1cJValueTypeOas_AddressType6M_pnLAddressType__: c1_ValueType.o;
+text: .text%__1cKObjectTypeEsize6kM_i_: c1_ValueType.o;
+text: .text%__1cMGraphBuilderHif_same6MpnJValueType_nLInstructionJCondition__v_;
+text: .text%__1cJValueTypeOas_IntConstant6M_pnLIntConstant__: c1_ValueType.o;
+text: .text%__1cKStoreFieldFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cNCanonicalizerNdo_StoreField6MpnKStoreField__v_;
+text: .text%__1cLAccessFieldOas_AccessField6M_p0_: c1_GraphBuilder.o;
+text: .text%__1cLAccessFieldIcan_trap6kM_i_: c1_GraphBuilder.o;
+text: .text%__1cIValueMapKkill_field6MpnHciField__v_;
+text: .text%__1cGBucketKkill_field6MpnHciField__v_;
+text: .text%__1cKValueStackQpin_stack_fields6MpnHciField__v_;
+text: .text%__1cKValueStackVis_same_across_scopes6Mp0_i_;
+text: .text%__1cMGraphBuilderNarithmetic_op6MpnJValueType_nJBytecodesECode_pnKValueStack__v_;
+text: .text%__1cJValueTypeEmeet6kMp0_1_;
+text: .text%__1cHIntTypeEbase6kM_pnJValueType__: c1_Canonicalizer.o;
+text: .text%__1cMArithmeticOpIcan_trap6kM_i_;
+text: .text%__1cMArithmeticOpFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o;
+text: .text%__1cNCanonicalizerPdo_ArithmeticOp6MpnMArithmeticOp__v_;
+text: .text%__1cNCanonicalizerGdo_Op26MpnDOp2__v_;
+text: .text%__1cLIntConstantLis_constant6kM_i_: c1_Canonicalizer.o;
+text: .text%__1cNCanonicalizerTmove_const_to_right6MpnDOp2__v_;
+text: .text%__1cMArithmeticOpOis_commutative6kM_i_;
+text: .text%__1cMArithmeticOpEhash6kM_i_: c1_Instruction.o;
+text: .text%__1cMArithmeticOpEname6kM_pkc_: c1_Instruction.o;
+text: .text%__1cMGraphBuilderJincrement6M_v_;
+text: .text%__1cHIntTypeEbase6kM_pnJValueType__: c1_ValueType.o;
+text: .text%__1cMGraphBuilderMload_indexed6MnJBasicType__v_;
+text: .text%__1cLLoadIndexedFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o;
+text: .text%__1cNCanonicalizerOdo_LoadIndexed6MpnLLoadIndexed__v_;
+text: .text%__1cLLoadIndexedEhash6kM_i_: c1_Instruction.o;
+text: .text%__1cLLoadIndexedEname6kM_pkc_: c1_Instruction.o;
+text: .text%__1cLAccessArrayIcan_trap6kM_i_: c1_Instruction.o;
+text: .text%__1cIConstantIis_equal6kMpnLInstruction__i_;
+text: .text%__1cIConstantLas_Constant6M_p0_: c1_Instruction.o;
+text: .text%__1cEGotoFvisit6MpnSInstructionVisitor__v_: c1_Canonicalizer.o;
+text: .text%__1cNCanonicalizerHdo_Goto6MpnEGoto__v_;
+text: .text%__1cHIRScopeMheader_block6MpnKBlockBegin_n0BEFlag__2_;
+text: .text%__1cCIRIoptimize6M_v_;
+text: .text%__1cJOptimizer2t6MpnCIR__v_;
+text: .text%__1cJOptimizerbHeliminate_conditional_expressions6M_v_;
+text: .text%__1cCIRQiterate_preorder6MpnMBlockClosure__v_;
+text: .text%__1cKBlockBeginQiterate_preorder6MpnMBlockClosure__v_;
+text: .text%__1cJboolArray2t6Mki1_v_: c1_Instruction.o;
+text: .text%__1cKBlockBeginQiterate_preorder6MrnJboolArray_pnMBlockClosure__v_;
+text: .text%__1cNCE_EliminatorIblock_do6MpnKBlockBegin__v_: c1_Optimizer.o;
+text: .text%__1cLInstructionFas_If6M_pnCIf__: c1_IR.o;
+text: .text%__1cLInstructionFas_If6M_pnCIf__: c1_Canonicalizer.o;
+text: .text%__1cCIfFas_If6M_p0_: c1_Canonicalizer.o;
+text: .text%__1cHIntTypeKas_IntType6M_p0_: c1_ValueType.o;
+text: .text%__1cNCE_EliminatorRsimple_value_copy6MpnLInstruction__2_: c1_Optimizer.o;
+text: .text%__1cLInstructionLas_Constant6M_pnIConstant__: c1_GraphBuilder.o;
+text: .text%__1cJLoadLocalMas_LoadLocal6M_p0_: c1_GraphBuilder.o;
+text: .text%__1cLInstructionHas_Goto6M_pnEGoto__: c1_GraphBuilder.o;
+text: .text%__1cLInstructionFas_If6M_pnCIf__: c1_GraphBuilder.o;
+text: .text%__1cJOptimizerQeliminate_blocks6M_v_;
+text: .text%__1cUGenericGrowableArray2t6MiipnEGrET_i_v_;
+text: .text%__1cSPredecessorCounterIblock_do6MpnKBlockBegin__v_: c1_Optimizer.o;
+text: .text%__1cLBlockMergerIblock_do6MpnKBlockBegin__v_: c1_Optimizer.o;
+text: .text%__1cLBlockMergerJtry_merge6MpnKBlockBegin__i_: c1_Optimizer.o;
+text: .text%__1cLInstructionHas_Goto6M_pnEGoto__: c1_IR.o;
+text: .text%__1cEGotoHas_Goto6M_p0_: c1_Canonicalizer.o;
+text: .text%__1cLInstructionHas_Goto6M_pnEGoto__: c1_Canonicalizer.o;
+text: .text%__1cJOptimizerVeliminate_null_checks6M_v_;
+text: .text%__1cGBitMapFclear6M_v_;
+text: .text%__1cGBitMapUclear_range_of_words6MII_v_: bitMap.o;
+text: .text%__1cNValueSetArray2t6MkikpnIValueSet__v_: c1_Optimizer.o;
+text: .text%__1cTNullCheckEliminatorHiterate6MpnKBlockBegin__v_;
+text: .text%__1cTNullCheckEliminatorLiterate_all6M_v_;
+text: .text%__1cTNullCheckEliminatorLiterate_one6MpnKBlockBegin__v_;
+text: .text%__1cJLocalSlotIfor_type6MpnJValueType_ii_pnFLocal__: c1_Optimizer.o;
+text: .text%__1cGBitMapIset_from6M0_v_;
+text: .text%__1cKStateSplitPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o;
+text: .text%__1cKBlockBeginFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cQNullCheckVisitorNdo_BlockBegin6MpnKBlockBegin__v_;
+text: .text%__1cKStateSplitPinput_values_do6MpFppnLInstruction__v_v_: c1_IR.o;
+text: .text%__1cEBaseFvisit6MpnSInstructionVisitor__v_: c1_IR.o;
+text: .text%__1cQNullCheckVisitorHdo_Base6MpnEBase__v_;
+text: .text%__1cTNullCheckEliminatorPmerge_state_for6MpnKBlockBegin_pnKValueStack_pnIValueSet__i_;
+text: .text%__1cPBlockBeginArrayIindex_of6kMkpnKBlockBegin__i_: c1_Optimizer.o;
+text: .text%__1cKStateSplitPinput_values_do6MpFppnLInstruction__v_v_: c1_Canonicalizer.o;
+text: .text%__1cQNullCheckVisitorHdo_Goto6MpnEGoto__v_;
+text: .text%__1cLInstructionMas_NullCheck6M_pnJNullCheck__: c1_GraphBuilder.o;
+text: .text%__1cLInstructionMas_NullCheck6M_pnJNullCheck__: c1_Instruction.o;
+text: .text%__1cKStoreLocalPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o;
+text: .text%__1cTNullCheckEliminatorIdo_value6FppnLInstruction__v_;
+text: .text%__1cFLocalPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o;
+text: .text%__1cFLocalFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cQNullCheckVisitorIdo_Local6MpnFLocal__v_;
+text: .text%__1cLAccessFieldPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o;
+text: .text%__1cLAccessLocalPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o;
+text: .text%__1cQNullCheckVisitorMdo_LoadLocal6MpnJLoadLocal__v_;
+text: .text%__1cTNullCheckEliminatorQhandle_LoadLocal6MpnJLoadLocal__v_;
+text: .text%__1cQNullCheckVisitorMdo_LoadField6MpnJLoadField__v_;
+text: .text%__1cTNullCheckEliminatorShandle_AccessField6MpnLAccessField__v_;
+text: .text%__1cQNullCheckVisitorNdo_StoreLocal6MpnKStoreLocal__v_;
+text: .text%__1cTNullCheckEliminatorRhandle_StoreLocal6MpnKStoreLocal__v_;
+text: .text%__1cCIfPinput_values_do6MpFppnLInstruction__v_v_: c1_Canonicalizer.o;
+text: .text%__1cIConstantPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o;
+text: .text%__1cQNullCheckVisitorLdo_Constant6MpnIConstant__v_;
+text: .text%__1cQNullCheckVisitorFdo_If6MpnCIf__v_;
+text: .text%__1cDOp2Pinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o;
+text: .text%__1cQNullCheckVisitorPdo_ArithmeticOp6MpnMArithmeticOp__v_;
+text: .text%__1cNAccessIndexedPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o;
+text: .text%__1cQNullCheckVisitorOdo_LoadIndexed6MpnLLoadIndexed__v_;
+text: .text%__1cTNullCheckEliminatorShandle_LoadIndexed6MpnLLoadIndexed__v_;
+text: .text%__1cGBitMapbCset_intersection_with_result6M0_i_;
+text: .text%__1cKStoreFieldPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o;
+text: .text%__1cQNullCheckVisitorNdo_StoreField6MpnKStoreField__v_;
+text: .text%__1cGReturnPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o;
+text: .text%__1cQNullCheckVisitorJdo_Return6MpnGReturn__v_;
+text: .text%__1cJboolArray2t6Mki1_v_: c1_Optimizer.o;
+text: .text%__1cCIRTcompute_locals_size6M_v_;
+text: .text%__1cHIRScopePallocate_locals6MipnMWordSizeList__i_;
+text: .text%__1cHIRScopeGlocals6M_pnJLocalList__;
+text: .text%__1cJLocalSlotOcollect_locals6MpnJLocalList__v_;
+text: .text%__1cHIRScopePargument_locals6M_pnJLocalList__;
+text: .text%__1cJLocalSlotXcollect_argument_locals6MpnJLocalList__v_;
+text: .text%__1cCIRTallocate_local_name6M_i_;
+text: .text%__1cMWordSizeListEgrow6Mki1_v_: c1_IR.o;
+text: .text%__1cCIRSnotice_used_offset6Mi_v_;
+text: .text%__1cCIRNcompute_loops6M_v_;
+text: .text%__1cIciMethodJhas_loops6kM_i_;
+text: .text%__1cNmethodOopDescWcompute_has_loops_flag6M_i_;
+text: .text%__1cOBytecodeStreamEnext6M_nJBytecodesECode__: methodOop.o;
+text: .text%__1cKLoopFinder2t6MpnCIR_i_v_;
+text: .text%__1cSBlockLoopInfoArray2t6MkikpnNBlockLoopInfo__v_: c1_Loops.o;
+text: .text%__1cKLoopFinderNcompute_loops6Mi_pnILoopList__;
+text: .text%__1cJboolArray2t6Mki1_v_: c1_Loops.o;
+text: .text%__1cKLoopFinderScompute_dominators6MpnJboolArray__v_;
+text: .text%__1cGBitMapGat_put6MIi_v_;
+text: .text%__1cRCreateInfoClosureIblock_do6MpnKBlockBegin__v_: c1_Loops.o;
+text: .text%__1cNBlockLoopInfo2t6MpnKBlockBegin_i_v_;
+text: .text%__1cPSetPredsClosureIblock_do6MpnKBlockBegin__v_: c1_Loops.o;
+text: .text%__1cKLoopFinderSdominator_walk_sux6MpnKBlockBegin_pnJboolArray__v_;
+text: .text%__1cGBitMapQset_intersection6M0_v_;
+text: .text%__1cGBitMapHis_same6M0_i_;
+text: .text%__1cKLoopFinderOfind_backedges6MpnJboolArray__pnILoopList__;
+text: .text%__1cELoop2t6MpnKBlockBegin_2_v_: c1_Loops.o;
+text: .text%__1cKLoopFinderSgather_loop_blocks6MpnILoopList__v_;
+text: .text%__1cPBlockBeginArrayIindex_of6kMkpnKBlockBegin__i_: c1_Loops.o;
+text: .text%__1cKLoopFinderKfind_loops6MpnILoopList_i_2_;
+text: .text%__1cKScanBlocks2t6MpnJBlockList__v_;
+text: .text%__1cIintStack2t6M_v_: c1_ScanBlocks.o;
+text: .text%__1cKScanBlocksEscan6MpnKScanResult_i_v_;
+text: .text%__1cKScanBlocksKscan_block6MpnKBlockBegin_pnKScanResult_i_v_;
+text: .text%__1cLIllegalTypeDtag6kM_nIValueTag__: c1_ValueType.o;
+text: .text%__1cLInstructionJas_Invoke6M_pnGInvoke__: c1_GraphBuilder.o;
+text: .text%__1cLInstructionLas_NewArray6M_pnINewArray__: c1_GraphBuilder.o;
+text: .text%__1cLInstructionOas_NewInstance6M_pnLNewInstance__: c1_GraphBuilder.o;
+text: .text%__1cLInstructionQas_AccessMonitor6M_pnNAccessMonitor__: c1_GraphBuilder.o;
+text: .text%__1cLInstructionOas_AccessLocal6M_pnLAccessLocal__: c1_Instruction.o;
+text: .text%__1cLAccessLocalOas_AccessLocal6M_p0_: c1_GraphBuilder.o;
+text: .text%__1cLInstructionNas_StoreLocal6M_pnKStoreLocal__: c1_GraphBuilder.o;
+text: .text%__1cKStoreLocalNas_StoreLocal6M_p0_: c1_GraphBuilder.o;
+text: .text%__1cKScanBlocksRaccumulate_access6MinIValueTag_i_v_;
+text: .text%__1cKScanBlocksPincrement_count6MnIValueTag_ii_v_;
+text: .text%__1cKScanBlocksJget_array6MnIValueTag__pnIintStack__;
+text: .text%__1cIintStackEgrow6Mki1_v_: c1_ScanBlocks.o;
+text: .text%__1cKScanBlocksLupdate_type6MinIValueTag__v_;
+text: .text%__1cLInstructionJas_Invoke6M_pnGInvoke__: c1_Canonicalizer.o;
+text: .text%__1cLInstructionLas_NewArray6M_pnINewArray__: c1_Canonicalizer.o;
+text: .text%__1cLInstructionOas_NewInstance6M_pnLNewInstance__: c1_Canonicalizer.o;
+text: .text%__1cLInstructionQas_AccessMonitor6M_pnNAccessMonitor__: c1_Canonicalizer.o;
+text: .text%__1cJ_LoopListIpush_all6Mpk0_v_: c1_Loops.o;
+text: .text%__1cKLoopFinderbEcompute_loop_exits_and_entries6MpnILoopList__v_;
+text: .text%__1cKLoopFinderRfind_loop_entries6MpnKBlockBegin_pnELoop__v_;
+text: .text%__1cKLoopFinderPfind_loop_exits6MpnKBlockBegin_pnELoop__v_;
+text: .text%__1cKLoopFinderbDcompute_single_precision_flag6MpnILoopList__v_;
+text: .text%__1cKLoopFinderNinsert_blocks6MpnILoopList__v_;
+text: .text%__1cIintArray2t6Mki1_v_: c1_Loops.o;
+text: .text%__1cJBlockListPiterate_forward6MpnMBlockClosure__v_;
+text: .text%__1cGTaggerIblock_do6MpnKBlockBegin__v_: c1_Loops.o;
+text: .text%__1cNPairCollectorIblock_do6MpnKBlockBegin__v_: c1_Loops.o;
+text: .text%__1cNResourceArrayEsort6MIpGpkv2_i_v_;
+text: .text%__1cRsort_by_block_ids6FppnJBlockPair_2_i_: c1_Loops.o;
+text: .text%__1cKLoopFinderUinsert_caching_block6MpnILoopList_pnKBlockBegin_4_4_;
+text: .text%__1cLInstructionQas_CachingChange6M_pnNCachingChange__: c1_GraphBuilder.o;
+text: .text%__1cKStateSplitFscope6kM_pnHIRScope__;
+text: .text%__1cKLoopFinderJnew_block6MpnHIRScope_i_pnKBlockBegin__;
+text: .text%__1cIBlockEndOsubstitute_sux6MpnKBlockBegin_2_v_;
+text: .text%__1cILoopListMupdate_loops6MpnKBlockBegin_22_v_;
+text: .text%__1cELoopSupdate_loop_blocks6MpnKBlockBegin_22_v_;
+text: .text%__1cCIRMcompute_code6M_v_;
+text: .text%__1cJboolArray2t6Mki1_v_: c1_IR.o;
+text: .text%__1cCIRWiterate_and_set_weight6kMrnJboolArray_pnKBlockBegin_pnJBlockList_i_v_;
+text: .text%__1cKBlockBeginKset_weight6Mi_v_;
+text: .text%__1cLInstructionIas_Throw6M_pnFThrow__: c1_IR.o;
+text: .text%__1cLInstructionJas_Return6M_pnGReturn__: c1_IR.o;
+text: .text%__1cLInstructionIas_Throw6M_pnFThrow__: c1_GraphBuilder.o;
+text: .text%__1cDcmp6FppnKBlockBegin_2_i_: c1_IR.o;
+text: .text%__1cUSuxAndWeightAdjusterIblock_do6MpnKBlockBegin__v_: c1_IR.o;
+text: .text%__1cJBlockListJblocks_do6MpFpnKBlockBegin__v_v_;
+text: .text%__1cQUseCountComputerRcompute_use_count6FpnKBlockBegin__v_: c1_IR.o;
+text: .text%__1cQUseCountComputerXbasic_compute_use_count6FpnKBlockBegin__v_: c1_IR.o;
+text: .text%__1cQUseCountComputerQupdate_use_count6FppnLInstruction__v_: c1_IR.o;
+text: .text%__1cFLocalIas_Local6M_p0_: c1_GraphBuilder.o;
+text: .text%__1cKStateSplitPstate_values_do6MpFppnLInstruction__v_v_;
+text: .text%__1cKValueStackJvalues_do6MpFppnLInstruction__v_v_;
+text: .text%__1cQUseCountComputerPupdated_pinning6FpnKBlockBegin__i_: c1_IR.o;
+text: .text%__1cNCachingChangePinput_values_do6MpFppnLInstruction__v_v_: c1_Loops.o;
+text: .text%__1cLInstructionLas_BlockEnd6M_pnIBlockEnd__: c1_Loops.o;
+text: .text%__1cIBlockEndLas_BlockEnd6M_p0_: c1_IR.o;
+text: .text%__1cLCompilationIemit_lir6M_v_;
+text: .text%__1cIFrameMap2t6Mi_v_;
+text: .text%__1cIFrameMapLFpuStackSim2t6M_v_;
+text: .text%__1cLCompilationNinit_framemap6MpnIFrameMap__v_;
+text: .text%__1cIFrameMapbCset_local_name_to_offset_map6MpnMWordSizeList__v_;
+text: .text%__1cLLIR_Emitter2t6MpnLCompilation__v_;
+text: .text%__1cIValueGenOinit_value_gen6F_v_;
+text: .text%__1cIRegAlloc2t6M_v_;
+text: .text%__1cNc1_AllocTable2t6Mi_v_;
+text: .text%__1cIRegAllocFclear6M_v_;
+text: .text%__1cNCodeGenerator2t6MpnIValueGen_pnRValueGenInvariant__v_;
+text: .text%__1cNCodeGeneratorIblock_do6MpnKBlockBegin__v_;
+text: .text%__1cLLIR_EmitterMmust_bailout6kM_i_;
+text: .text%__1cNCodeGeneratorPblock_do_prolog6MpnKBlockBegin__v_;
+text: .text%__1cIValueGenLstart_block6MpnKBlockBegin__v_;
+text: .text%__1cLLIR_EmitterLstart_block6MpnKBlockBegin__v_;
+text: .text%__1cILIR_List2t6MpnLCompilation__v_;
+text: .text%__1cIValueGenQbind_block_entry6MpnKBlockBegin__v_;
+text: .text%__1cLLIR_EmitterQbind_block_entry6MpnKBlockBegin__v_;
+text: .text%__1cIValueGenMblock_prolog6MpnKBlockBegin__v_;
+text: .text%__1cIValueGenHdo_root6MpnLInstruction__v_;
+text: .text%__1cLInstructionGas_Phi6M_pnDPhi__: c1_GraphBuilder.o;
+text: .text%__1cIValueGenNdo_BlockBegin6MpnKBlockBegin__v_;
+text: .text%__1cQDelayedSpillMark2T6M_v_: c1_CodeGenerator.o;
+text: .text%__1cLInstructionGas_Phi6M_pnDPhi__: c1_IR.o;
+text: .text%__1cIValueGenHdo_Base6MpnEBase__v_;
+text: .text%__1cIValueGenNreceiverRInfo6F_nFRInfo__;
+text: .text%__1cIValueGenMicKlassRInfo6F_nFRInfo__;
+text: .text%__1cLCompilationNget_init_vars6M_pnIintStack__;
+text: .text%__1cLLIR_EmitterJstd_entry6MpnHIRScope_pnIintStack_nFRInfo_5_v_;
+text: .text%__1cILIR_ListWunverified_entry_point6MnFRInfo_1_v_: c1_LIREmitter.o;
+text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_LIREmitter.o;
+text: .text%__1cLLIR_EmitterGmethod6kM_pnIciMethod__;
+text: .text%__1cMCodeEmitInfo2t6MpnLLIR_Emitter_ipnIintStack_pnKValueStack_pnOExceptionScope_pnPRInfoCollection__v_;
+text: .text%__1cLCompilationVvalue_stack2lir_stack6MpnKValueStack__pnNGrowableArray4CpnLLIR_OprDesc____;
+text: .text%__1cIValueGenMblock_epilog6MpnKBlockBegin__v_;
+text: .text%__1cNCodeGeneratorPblock_do_epilog6MpnKBlockBegin__v_;
+text: .text%__1cLInstructionGas_Phi6M_pnDPhi__: c1_Canonicalizer.o;
+text: .text%__1cIValueGenHdo_Goto6MpnEGoto__v_;
+text: .text%__1cIValueGenNset_no_result6MpnLInstruction__v_;
+text: .text%__1cIValueGenLmove_to_phi6MpnKValueStack_i_i_;
+text: .text%__1cIValueGenWgoto_default_successor6MpnIBlockEnd_pnMCodeEmitInfo__v_;
+text: .text%__1cLInstructionGas_Phi6M_pnDPhi__: c1_Instruction.o;
+text: .text%__1cIValueGenMdo_LoadField6MpnJLoadField__v_;
+text: .text%__1cLAccessFieldKlock_stack6kM_pnKValueStack__: c1_Instruction.o;
+text: .text%__1cIValueGenEwalk6MpnLInstruction__v_;
+text: .text%__1cIValueGenMdo_LoadLocal6MpnJLoadLocal__v_;
+text: .text%__1cIValueGenJload_item6MpnEItem__v_;
+text: .text%__1cEItemGupdate6M_v_;
+text: .text%__1cIValueGenQset_maynot_spill6MpnEItem__v_;
+text: .text%__1cIValueGenSfpu_fanout_handled6MpnEItem__i_;
+text: .text%__1cEItemEtype6kM_pnJValueType__: c1_Items.o;
+text: .text%__1cIValueGenPlock_free_rinfo6MpnLInstruction_pnJValueType__nFRInfo__;
+text: .text%__1cIRegAllocMhas_free_reg6kMpnJValueType__i_;
+text: .text%__1cIRegAllocMhas_free_reg6kMnIValueTag__i_;
+text: .text%__1cNc1_AllocTableMhas_one_free6kM_i_;
+text: .text%__1cIRegAllocNget_lock_temp6MpnLInstruction_pnJValueType__nFRInfo__;
+text: .text%__1cIRegAllocMget_free_reg6MpnJValueType__nFRInfo__;
+text: .text%__1cIRegAllocMget_free_reg6MnIValueTag__nFRInfo__;
+text: .text%__1cNc1_AllocTableIget_free6M_i_;
+text: .text%__1cNc1_AllocTablePget_free_helper6Mi_i_;
+text: .text%__1cIRegAllocIlock_reg6MpnLInstruction_nFRInfo_i_v_;
+text: .text%__1cJRInfo2RegFdo_it6M_v_: c1_RegAlloc.o;
+text: .text%__1cHLockRegGdo_cpu6Mi_v_: c1_RegAlloc.o;
+text: .text%__1cIRegAllocOset_locked_cpu6MipnLInstruction_i_v_;
+text: .text%__1cNc1_AllocTableKset_locked6Mi_v_;
+text: .text%__1cLCompilationIitem2lir6MpknEItem__pnLLIR_OprDesc__;
+text: .text%__1cLCompilationKitem2stack6MpknEItem__i_;
+text: .text%__1cJValueTypeNas_DoubleType6M_pnKDoubleType__: c1_ValueType.o;
+text: .text%__1cMas_BasicType6FpnJValueType__nJBasicType__;
+text: .text%__1cJValueTypeMas_ArrayType6M_pnJArrayType__: c1_ValueType.o;
+text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_Compilation.o;
+text: .text%__1cLLIR_EmitterEmove6MpnLLIR_OprDesc_nFRInfo__v_;
+text: .text%__1cILIR_ListEmove6MpnLLIR_OprDesc_2pnMCodeEmitInfo__v_: c1_LIREmitter.o;
+text: .text%__1cIValueGenJitem_free6MpnEItem__v_;
+text: .text%__1cIRegAllocPincr_spill_lock6MnFRInfo__v_;
+text: .text%__1cQChangeSpillCountGdo_cpu6Mi_v_: c1_RegAlloc.o;
+text: .text%__1cIValueGenFrfree6MpnEItem__v_;
+text: .text%__1cIRegAllocPdecr_spill_lock6MnFRInfo__v_;
+text: .text%__1cIRegAllocIfree_reg6MnFRInfo__v_;
+text: .text%__1cHFreeRegGdo_cpu6Mi_v_: c1_RegAlloc.o;
+text: .text%__1cIRegAllocMset_free_cpu6Mi_v_;
+text: .text%__1cNc1_AllocTableIset_free6Mi_v_;
+text: .text%__1cIValueGenWrlock_result_with_hint6MpnLInstruction_pknEItem__nFRInfo__;
+text: .text%__1cIValueGenFrlock6MpnLInstruction_pknEItem__nFRInfo__;
+text: .text%__1cIRegAllocMget_lock_reg6MpnLInstruction_pnJValueType__nFRInfo__;
+text: .text%__1cLLIR_EmitterKfield_load6MnFRInfo_pnHciField_pnLLIR_OprDesc_iiipnMCodeEmitInfo__v_;
+text: .text%__1cILIR_ListMload_mem_reg6MnFRInfo_i1nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_;
+text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_LIR.o;
+text: .text%__1cIRegAllocHset_reg6MnFRInfo_ipnLInstruction__v_;
+text: .text%__1cGSetRegGdo_cpu6Mi_v_: c1_RegAlloc.o;
+text: .text%__1cIRegAllocLset_cpu_reg6MiipnLInstruction__v_;
+text: .text%__1cIValueGenNdo_StoreLocal6MpnKStoreLocal__v_;
+text: .text%__1cEItemRhandle_float_kind6M_v_;
+text: .text%__1cEItemNset_from_item6Mpk0_v_: c1_Items.o;
+text: .text%__1cIValueGenXcan_inline_any_constant6kM_i_;
+text: .text%__1cIValueGenSmust_copy_register6MpnEItem__i_;
+text: .text%__1cIValueGenUcheck_float_register6MpnEItem__v_;
+text: .text%__1cIRegAllocLis_free_reg6kMnFRInfo__i_;
+text: .text%__1cJIsFreeRegGdo_cpu6Mi_v_: c1_RegAlloc.o;
+text: .text%__1cNc1_AllocTableHis_free6kMi_i_;
+text: .text%__1cLLIR_EmitterJopr2local6MipnLLIR_OprDesc__v_;
+text: .text%__1cILIR_ListQreg2single_stack6MnFRInfo_inJBasicType__v_: c1_LIREmitter.o;
+text: .text%__1cIValueGenFdo_If6MpnCIf__v_;
+text: .text%__1cIHintItemNset_from_item6MpknEItem__v_;
+text: .text%__1cIHintItemEtype6kM_pnJValueType__: c1_Items.o;
+text: .text%__1cJValueTypeMas_FloatType6M_pnJFloatType__: c1_ValueType.o;
+text: .text%__1cIValueGenLdo_Constant6MpnIConstant__v_;
+text: .text%__1cJValueTypeRas_ObjectConstant6M_pnOObjectConstant__: c1_Canonicalizer.o;
+text: .text%__1cIValueGenOdont_load_item6MpnEItem__v_;
+text: .text%__1cIValueGenWdont_load_item_nocheck6MpnEItem__v_;
+text: .text%__1cLLIR_OprFactKvalue_type6FpnJValueType__pnLLIR_OprDesc__;
+text: .text%__1cLLIR_EmitterFif_op6MinLInstructionJCondition_pnLLIR_OprDesc_4pnKBlockBegin_66pnMCodeEmitInfo__v_;
+text: .text%__1cJLIR_ConstEtype6kM_nJBasicType__: c1_CacheLocals.o;
+text: .text%__1cJLIR_ConstLas_constant6M_p0_: c1_CacheLocals.o;
+text: .text%__1cLLIR_EmitterIlir_cond6MnLInstructionJCondition__nMLIR_OpBranchNLIR_Condition__;
+text: .text%__1cILIR_ListDcmp6MnMLIR_OpBranchNLIR_Condition_pnLLIR_OprDesc_4pnMCodeEmitInfo__v_: c1_LIREmitter.o;
+text: .text%__1cILIR_ListGbranch6MnMLIR_OpBranchNLIR_Condition_pnKBlockBegin__v_;
+text: .text%__1cMLIR_OpBranch2t6Mn0ANLIR_Condition_pnKBlockBegin_pnMCodeEmitInfo__v_;
+text: .text%__1cEItemEtype6kM_pnJValueType__: c1_CodeGenerator.o;
+text: .text%__1cJArrayTypeMas_ArrayType6M_p0_: c1_ValueType.o;
+text: .text%__1cLLIR_EmitterHopr2int6MpnLLIR_OprDesc__i_;
+text: .text%__1cILIR_ListJint2stack6Mii_v_: c1_LIREmitter.o;
+text: .text%__1cLInstructionGas_Phi6M_pnDPhi__: c1_Loops.o;
+text: .text%__1cNCachingChangeFvisit6MpnSInstructionVisitor__v_: c1_Loops.o;
+text: .text%__1cIValueGenQdo_CachingChange6MpnNCachingChange__v_;
+text: .text%__1cIValueGenPdo_ArithmeticOp6MpnMArithmeticOp__v_;
+text: .text%__1cIValueGenTdo_ArithmeticOp_Int6MpnMArithmeticOp__v_;
+text: .text%__1cIValueGenOload_item_hint6MpnEItem_pk1_v_;
+text: .text%__1cEItemRget_jint_constant6kM_i_;
+text: .text%__1cLLIR_EmitterRarithmetic_op_int6MnJBytecodesECode_pnLLIR_OprDesc_44nFRInfo__v_;
+text: .text%__1cLLIR_EmitterNarithmetic_op6MnJBytecodesECode_pnLLIR_OprDesc_44inFRInfo_pnMCodeEmitInfo__v_;
+text: .text%__1cLLIR_EmitterYstrength_reduce_multiply6MpnLLIR_OprDesc_i22_i_;
+text: .text%__1cILIR_ListHreg2reg6MnFRInfo_1nJBasicType__v_: c1_LIREmitter_x86.o;
+text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_LIREmitter_x86.o;
+text: .text%__1cLlog2_intptr6Fi_i_: c1_LIREmitter_x86.o;
+text: .text%__1cILIR_ListKshift_left6MpnLLIR_OprDesc_222_v_;
+text: .text%__1cILIR_ListDsub6MpnLLIR_OprDesc_22pnMCodeEmitInfo__v_: c1_LIREmitter_x86.o;
+text: .text%__1cIValueGenWcan_inline_as_constant6MpnEItem__i_;
+text: .text%__1cIRegAllocPget_register_rc6kMnFRInfo__i_;
+text: .text%__1cLGetRefCountGdo_cpu6Mi_v_: c1_RegAlloc.o;
+text: .text%__1cILIR_ListHreg2reg6MnFRInfo_1nJBasicType__v_: c1_LIREmitter.o;
+text: .text%__1cILIR_ListDadd6MpnLLIR_OprDesc_22_v_: c1_LIREmitter.o;
+text: .text%__1cIValueGenOdo_LoadIndexed6MpnLLoadIndexed__v_;
+text: .text%__1cJValueTypeLas_LongType6M_pnILongType__: c1_ValueType.o;
+text: .text%__1cLAccessArrayKlock_stack6kM_pnKValueStack__: c1_Instruction.o;
+text: .text%__1cMCodeEmitInfoVfill_expression_stack6M_v_;
+text: .text%__1cLLIR_EmitterRarray_range_check6MpnLLIR_OprDesc_2pnMCodeEmitInfo_4_v_;
+text: .text%__1cORangeCheckStub2t6MpnMCodeEmitInfo_nFRInfo_ii_v_;
+text: .text%__1cMCodeEmitInfo2t6Mp0i_v_;
+text: .text%__1cLLIR_EmitterLcmp_reg_mem6MnMLIR_OpBranchNLIR_Condition_nFRInfo_3inJBasicType_pnMCodeEmitInfo__v_;
+text: .text%__1cILIR_ListLcmp_reg_mem6MnMLIR_OpBranchNLIR_Condition_nFRInfo_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo__v_;
+text: .text%__1cILIR_ListGbranch6MnMLIR_OpBranchNLIR_Condition_pnICodeStub__v_;
+text: .text%__1cMLIR_OpBranch2t6Mn0ANLIR_Condition_pnICodeStub_pnMCodeEmitInfo__v_;
+text: .text%__1cLLIR_EmitterMindexed_load6MnFRInfo_nJBasicType_pnLLIR_OprDesc_4pnMCodeEmitInfo__v_;
+text: .text%__1cLLIR_EmitterNarray_address6MpnLLIR_OprDesc_2inJBasicType__pnLLIR_Address__;
+text: .text%__1cLLIR_AddressFscale6FnJBasicType__n0AFScale__;
+text: .text%__1cILIR_ListEmove6MpnLLIR_Address_pnLLIR_OprDesc_pnMCodeEmitInfo__v_: c1_LIREmitter_x86.o;
+text: .text%__1cIRegAllocNoops_in_spill6kM_pnIintStack__;
+text: .text%__1cIRegAllocRoops_in_registers6kM_pnPRInfoCollection__;
+text: .text%__1cIValueGenbDsafepoint_poll_needs_register6F_i_;
+text: .text%__1cILIR_ListJsafepoint6MnFRInfo_pnMCodeEmitInfo__v_: c1_CodeGenerator.o;
+text: .text%__1cLLIR_EmitterHgoto_op6MpnKBlockBegin_pnMCodeEmitInfo__v_;
+text: .text%__1cILIR_ListEjump6MpnKBlockBegin_pnMCodeEmitInfo__v_;
+text: .text%__1cIValueGenNdo_StoreField6MpnKStoreField__v_;
+text: .text%__1cIValueGenOscratch1_RInfo6kM_nFRInfo__;
+text: .text%__1cIValueGenUprefer_alu_registers6kM_i_;
+text: .text%__1cLLIR_EmitterLfield_store6MpnHciField_pnLLIR_OprDesc_i4iipnMCodeEmitInfo_nFRInfo__v_;
+text: .text%__1cILIR_ListNstore_mem_reg6MnFRInfo_1inJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_;
+text: .text%__1cIValueGenJdo_Return6MpnGReturn__v_;
+text: .text%__1cJValueTypeLas_VoidType6M_pnIVoidType__: c1_ValueType.o;
+text: .text%__1cIValueGenTresult_register_for6FpnJValueType_i_nFRInfo__;
+text: .text%__1cIValueGenMreturn1RInfo6F_nFRInfo__;
+text: .text%__1cIValueGenPload_item_force6MpnEItem_nFRInfo__v_;
+text: .text%__1cIValueGenPlock_spill_temp6MpnLInstruction_nFRInfo__v_;
+text: .text%__1cIRegAllocJlock_temp6MpnLInstruction_nFRInfo__v_;
+text: .text%__1cLLIR_EmitterJreturn_op6MpnLLIR_OprDesc__v_;
+text: .text%__1cNCodeGeneratorXclear_instruction_items6FpnKBlockBegin__v_;
+text: .text%__1cQLIR_LocalCaching2t6MpnCIR__v_;
+text: .text%__1cQLIR_LocalCachingQpreferred_locals6MpknIciMethod__pnMLocalMapping__;
+text: .text%__1cMLocalMappingQinit_cached_regs6M_v_;
+text: .text%__1cPRegisterManager2t6M_v_;
+text: .text%__1cMLocalMappingNget_cache_reg6kMi_nFRInfo__;
+text: .text%__1cQLIR_LocalCachingVcompute_cached_locals6M_v_;
+text: .text%__1cQLIR_LocalCachingMcache_locals6M_v_;
+text: .text%__1cLInstructionQas_CachingChange6M_pnNCachingChange__: c1_IR.o;
+text: .text%__1cLInstructionQas_CachingChange6M_pnNCachingChange__: c1_Canonicalizer.o;
+text: .text%__1cNCachingChangeQas_CachingChange6M_p0_: c1_Loops.o;
+text: .text%__1cRBlockListScanInfo2t6MpnJBlockList__v_: c1_CacheLocals.o;
+text: .text%__1cOLIR_OprRefList2t6M_v_: c1_CacheLocals.o;
+text: .text%__1cRBlockListScanInfoItraverse6MpnKBlockBegin_pnKLIR_OpList__v_: c1_CacheLocals.o;
+text: .text%__1cLLIR_OpLabelFvisit6MpnQLIR_OpVisitState__v_;
+text: .text%__1cHLIR_Op1Fvisit6MpnQLIR_OpVisitState__v_;
+text: .text%__1cPRegisterManagerElock6MnFRInfo__v_;
+text: .text%__1cHLIR_Op2Fvisit6MpnQLIR_OpVisitState__v_;
+text: .text%__1cMLIR_OpBranchFvisit6MpnQLIR_OpVisitState__v_;
+text: .text%__1cORangeCheckStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
+text: .text%__1cQLIR_OpVisitStateGappend6MnFRInfo__v_: c1_CodeStubs_x86.o;
+text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CodeStubs_x86.o;
+text: .text%__1cNc1_AllocTableFmerge6Mp0_v_;
+text: .text%__1cGLIR_OpFvisit6MpnQLIR_OpVisitState__v_;
+text: .text%__1cQLIR_LocalCachingXcache_locals_for_blocks6MpnJBlockList_pnPRegisterManager_i_pnMLocalMapping__;
+text: .text%__1cLInstructionNas_StateSplit6M_pnKStateSplit__: c1_Loops.o;
+text: .text%__1cLInstructionOas_AccessField6M_pnLAccessField__: c1_Loops.o;
+text: .text%__1cLInstructionOas_AccessLocal6M_pnLAccessLocal__: c1_Loops.o;
+text: .text%__1cKStateSplitNas_StateSplit6M_p0_: c1_IR.o;
+text: .text%__1cLInstructionJas_Invoke6M_pnGInvoke__: c1_IR.o;
+text: .text%__1cLInstructionLas_NewArray6M_pnINewArray__: c1_IR.o;
+text: .text%__1cLInstructionOas_NewInstance6M_pnLNewInstance__: c1_IR.o;
+text: .text%__1cLInstructionQas_AccessMonitor6M_pnNAccessMonitor__: c1_IR.o;
+text: .text%__1cLInstructionMas_Intrinsic6M_pnJIntrinsic__: c1_IR.o;
+text: .text%__1cKScanBlocksQmost_used_locals6M_pnKALocalList__;
+text: .text%__1cKScanBlocksMint_count_at6kMi_i_;
+text: .text%__1cKScanBlocksIcount_at6kMnIValueTag_i_i_;
+text: .text%__1cKScanBlocksJget_array6kMnIValueTag__pknIintStack__;
+text: .text%__1cKScanBlocksNlong_count_at6kMi_i_;
+text: .text%__1cKScanBlocksMobj_count_at6kMi_i_;
+text: .text%__1cKScanBlocksLis_obj_only6kMi_i_;
+text: .text%__1cKScanBlocksLis_int_only6kMi_i_;
+text: .text%__1cGALocalUsort_by_access_count6Fpp02_i_: c1_ScanBlocks.o;
+text: .text%__1cQLIR_LocalCachingPcompute_caching6MpnKALocalList_pnPRegisterManager__pnMLocalMapping__;
+text: .text%__1cPRegisterManagerMnum_free_cpu6M_i_;
+text: .text%__1cMLocalMappingNget_cache_reg6kMinIValueTag__nFRInfo__;
+text: .text%__1cPRegisterManagerMhas_free_reg6MnIValueTag__i_;
+text: .text%__1cPRegisterManagerNlock_free_reg6MnIValueTag__nFRInfo__;
+text: .text%__1cQLIR_LocalCachingQadd_at_all_names6FpnPRInfoCollection_inFRInfo_pnMWordSizeList__v_;
+text: .text%__1cIintStackEgrow6Mki1_v_: c1_CacheLocals.o;
+text: .text%__1cMLocalMappingFmerge6Mp0_v_;
+text: .text%__1cGALocalNsort_by_index6Fpp02_i_: c1_CacheLocals.o;
+text: .text%__1cSLocalMappingSetterIblock_do6MpnKBlockBegin__v_;
+text: .text%__1cMLocalMappingEjoin6Mp0_v_;
+text: .text%__1cPRegisterManagerLis_free_reg6MnFRInfo__i_;
+text: .text%__1cQLIR_LocalCachingYinsert_transition_blocks6M_v_;
+text: .text%__1cPBlockTransitionIblock_do6MpnKBlockBegin__v_: c1_CacheLocals.o;
+text: .text%__1cGLIR_OpLas_OpBranch6M_pnMLIR_OpBranch__: c1_LIR.o;
+text: .text%__1cMLocalMappingPemit_transition6FpnILIR_List_p03pnCIR__v_;
+text: .text%__1cCIRThighest_used_offset6kM_i_;
+text: .text%__1cILIR_ListQreg2single_stack6MnFRInfo_inJBasicType__v_: c1_CacheLocals.o;
+text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CacheLocals.o;
+text: .text%__1cILIR_ListQsingle_stack2reg6MinFRInfo_nJBasicType__v_;
+text: .text%__1cLInstructionQas_CachingChange6M_pnNCachingChange__: c1_Instruction.o;
+text: .text%__1cLCompilationbBemit_code_prolog_non_native6MpnIFrameMap__v_;
+text: .text%__1cHIRScopeJmax_stack6kM_i_;
+text: .text%__1cNLIR_Optimizer2t6MpnCIR__v_;
+text: .text%__1cRLIR_PeepholeState2t6M_v_;
+text: .text%__1cRLIR_PeepholeStateKinitialize6MpnMLocalMapping__v_;
+text: .text%__1cRLIR_PeepholeStateMclear_values6M_v_;
+text: .text%__1cOLIR_OprRefList2t6M_v_: c1_LIROptimizer.o;
+text: .text%__1cNLIR_OptimizerIoptimize6M_v_;
+text: .text%__1cNLIR_OptimizerIoptimize6MpnJBlockList__v_;
+text: .text%__1cNLIR_OptimizerIoptimize6MpnKBlockBegin__v_;
+text: .text%__1cNLIR_OptimizerMblock_prolog6M_v_;
+text: .text%__1cNLIR_OptimizerKprocess_op6M_v_;
+text: .text%__1cGLIR_OpGas_Op16M_pnHLIR_Op1__: c1_LIR.o;
+text: .text%__1cLLIR_OpLabelKas_OpLabel6M_p0_: c1_LIR.o;
+text: .text%__1cRLIR_PeepholeStateVfinish_forward_branch6MpnFLabel__v_;
+text: .text%__1cJLabelListIindex_of6kMkpnFLabel__i_: c1_LIROptimizer.o;
+text: .text%__1cRLIR_PeepholeStateYset_disable_optimization6Mi_v_;
+text: .text%__1cLLIR_OpLabelJemit_code6MpnVLIR_AbstractAssembler__v_;
+text: .text%__1cNLIR_OptimizerMemit_opLabel6MpnLLIR_OpLabel__v_;
+text: .text%__1cNLIR_OptimizerFvisit6M_v_: c1_LIROptimizer_x86.o;
+text: .text%__1cHLIR_Op0Jemit_code6MpnVLIR_AbstractAssembler__v_;
+text: .text%__1cNLIR_OptimizerIemit_op06MpnHLIR_Op0__v_;
+text: .text%__1cHLIR_Op2Jemit_code6MpnVLIR_AbstractAssembler__v_;
+text: .text%__1cNLIR_OptimizerIemit_op26MpnHLIR_Op2__v_;
+text: .text%__1cNLIR_OptimizerKhandle_opr6MpnLLIR_OprDesc_nQLIR_OpVisitStateHOprMode__2_;
+text: .text%__1cNLIR_OptimizerJis_cached6MpnLLIR_OprDesc__i_;
+text: .text%__1cNLIR_OptimizerUrecord_opr_reference6MpnLLIR_OprDesc__v_;
+text: .text%__1cRLIR_PeepholeStateUrecord_opr_reference6MpnLLIR_OprDesc__i_;
+text: .text%__1cRLIR_PeepholeStateLdefining_op6MpnLLIR_OprDesc__i_;
+text: .text%__1cRLIR_PeepholeStateJreg2index6MpnLLIR_OprDesc__i_;
+text: .text%__1cIintStackEgrow6Mki1_v_: c1_LIROptimizer.o;
+text: .text%__1cNLIR_OptimizerMblock_epilog6M_v_;
+text: .text%__1cRLIR_PeepholeStateRis_safe_to_delete6kMi_i_;
+text: .text%__1cHLIR_Op1Gas_Op16M_p0_: c1_LIR.o;
+text: .text%__1cHLIR_Op1Jemit_code6MpnVLIR_AbstractAssembler__v_;
+text: .text%__1cNLIR_OptimizerIemit_op16MpnHLIR_Op1__v_;
+text: .text%__1cNLIR_OptimizerMprocess_move6MpnHLIR_Op1__v_;
+text: .text%__1cMLocalMappingNget_cache_reg6kMpnLLIR_OprDesc__2_;
+text: .text%__1cRLIR_PeepholeStateTmark_safe_to_delete6Mi_v_;
+text: .text%__1cNLIR_OptimizerRreplace_stack_opr6MpnLLIR_OprDesc__2_;
+text: .text%__1cNLIR_OptimizerNoptimize_move6MpnHLIR_Op1_rpnLLIR_OprDesc_5_i_;
+text: .text%__1cRLIR_PeepholeStatebFequivalent_register_or_constant6MpnLLIR_OprDesc__2_;
+text: .text%__1cRLIR_PeepholeStateOequivalent_opr6MpnLLIR_OprDesc__2_;
+text: .text%__1cNLIR_OptimizerKmaybe_opto6MpnLLIR_OprDesc_2_2_: c1_LIROptimizer_x86.o;
+text: .text%__1cNLIR_OptimizerMis_cache_reg6MpnLLIR_OprDesc__i_;
+text: .text%__1cMLocalMappingMis_cache_reg6kMpnLLIR_OprDesc__i_;
+text: .text%__1cMLocalMappingMis_cache_reg6kMnFRInfo__i_;
+text: .text%__1cRLIR_PeepholeStateSequivalent_address6MpnLLIR_OprDesc__2_;
+text: .text%__1cNLIR_OptimizerRresult_substitute6M_v_;
+text: .text%__1cNLIR_OptimizerRnext_op_with_code6MnILIR_Code__pnGLIR_Op__;
+text: .text%__1cNLIR_OptimizerFop_at6Mi_pnGLIR_Op__;
+text: .text%__1cRLIR_PeepholeStateMkill_operand6MpnLLIR_OprDesc_i_v_;
+text: .text%__1cRLIR_PeepholeStateQkill_equivalents6MpnLLIR_OprDesc__v_;
+text: .text%__1cRLIR_PeepholeStateNkill_register6Mi_v_;
+text: .text%__1cRLIR_PeepholeStateSrecord_defining_op6MpnLLIR_OprDesc_i_v_;
+text: .text%__1cRLIR_PeepholeStatePset_defining_op6Mii_v_;
+text: .text%__1cRLIR_PeepholeStateHdo_move6MpnLLIR_OprDesc_2_v_;
+text: .text%__1cLLIR_OprListEgrow6MkikpnLLIR_OprDesc__v_: c1_LIROptimizer.o;
+text: .text%__1cLLIR_AddressKas_address6M_p0_: c1_LIR.o;
+text: .text%__1cRLIR_PeepholeStateTequivalent_register6MpnLLIR_OprDesc__2_;
+text: .text%__1cKLIR_OprPtrLas_constant6M_pnJLIR_Const__: c1_LIR.o;
+text: .text%__1cNLIR_OptimizerKallow_opto6M_i_;
+text: .text%__1cNLIR_OptimizerLrecord_opto6MpnLLIR_OprDesc_2_2_;
+text: .text%__1cLLIR_AddressEtype6kM_nJBasicType__: c1_LIR.o;
+text: .text%__1cRLIR_PeepholeStateNincrement_ref6Mi_v_;
+text: .text%__1cKLIR_OprPtrKas_address6M_pnLLIR_Address__: c1_CacheLocals.o;
+text: .text%__1cMLIR_OpBranchLas_OpBranch6M_p0_: c1_LIR.o;
+text: .text%__1cMLIR_OpBranchJemit_code6MpnVLIR_AbstractAssembler__v_;
+text: .text%__1cNLIR_OptimizerNemit_opBranch6MpnMLIR_OpBranch__v_;
+text: .text%__1cNLIR_OptimizerQopr_live_on_exit6MpnLLIR_OprDesc__i_;
+text: .text%__1cNResourceArrayJremove_at6MIi_v_;
+text: .text%__1cRLIR_PeepholeStateLstack2index6MpnLLIR_OprDesc__i_;
+text: .text%__1cRLIR_PeepholeStatePkill_stack_slot6Mi_v_;
+text: .text%__1cRLIR_PeepholeStatebCequivalent_register_or_stack6MpnLLIR_OprDesc__2_;
+text: .text%__1cNLIR_OptimizerKmaybe_opto6MpnLLIR_OprDesc_2_2_: c1_LIROptimizer.o;
+text: .text%__1cNLIR_OptimizerLhandle_info6MpnMCodeEmitInfo__v_;
+text: .text%__1cMCodeEmitInfoRset_local_mapping6MpnMLocalMapping__v_;
+text: .text%__1cNLIR_OptimizerUrecord_register_oops6MpnMCodeEmitInfo__v_;
+text: .text%__1cNLIR_OptimizerOemit_code_stub6MpnICodeStub__v_;
+text: .text%__1cLCompilationOemit_code_body6MpnLCodeOffsets__i_;
+text: .text%__1cNLIR_Assembler2t6MpnLCompilation_pnLCodeOffsets__v_;
+text: .text%__1cNConstantTable2t6M_v_;
+text: .text%__1cNLIR_AssemblerJemit_code6MpnJBlockList__v_;
+text: .text%__1cQCollectConstantsIblock_do6MpnKBlockBegin__v_: c1_LIRAssembler.o;
+text: .text%__1cLInstructionLas_Constant6M_pnIConstant__: c1_IR.o;
+text: .text%__1cLInstructionLas_Constant6M_pnIConstant__: c1_Canonicalizer.o;
+text: .text%__1cLInstructionLas_Constant6M_pnIConstant__: c1_Instruction.o;
+text: .text%__1cJValueTypeNas_DoubleType6M_pnKDoubleType__: c1_Canonicalizer.o;
+text: .text%__1cJValueTypeMas_FloatType6M_pnJFloatType__: c1_Canonicalizer.o;
+text: .text%__1cLInstructionLas_Constant6M_pnIConstant__: c1_Loops.o;
+text: .text%__1cNLIR_AssemblerOemit_constants6M_v_;
+text: .text%__1cNConstantTableMemit_entries6MpnOMacroAssembler_i_v_;
+text: .text%__1cLLIR_CodeGenIblock_do6MpnKBlockBegin__v_;
+text: .text%__1cNLIR_AssemblerPcheck_codespace6M_v_;
+text: .text%__1cNLIR_AssemblerMemit_opLabel6MpnLLIR_OpLabel__v_;
+text: .text%__1cNLIR_AssemblerIemit_op06MpnHLIR_Op0__v_;
+text: .text%__1cNLIR_AssemblerIemit_op26MpnHLIR_Op2__v_;
+text: .text%__1cNLIR_AssemblerMneeds_icache6kMpnIciMethod__i_;
+text: .text%__1cFRInfoLas_register6kM_pnMRegisterImpl__;
+text: .text%__1cNLIR_AssemblerMcheck_icache6MpnMRegisterImpl_2_i_;
+text: .text%__1cRC1_MacroAssemblerSinline_cache_check6MpnMRegisterImpl_2_v_;
+text: .text%__1cRC1_MacroAssemblerOverified_entry6M_v_;
+text: .text%__1cNLIR_AssemblerLbuild_frame6M_v_;
+text: .text%__1cNLIR_AssemblerbBinitial_frame_size_in_bytes6M_i_;
+text: .text%__1cIFrameMapJframesize6kM_i_;
+text: .text%__1cRC1_MacroAssemblerLbuild_frame6Mi_v_;
+text: .text%__1cRAbstractAssemblerbDgenerate_stack_overflow_check6Mi_v_;
+text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: c1_Compiler.o;
+text: .text%__1cNLIR_AssemblerVsetup_locals_at_entry6M_v_;
+text: .text%__1cIFrameMapYsignature_type_array_for6FpknIciMethod__pnNBasicTypeList__;
+text: .text%__1cIFrameMapScalling_convention6FpknIciMethod_pnIintArray__pnRCallingConvention__;
+text: .text%__1cIFrameMapScalling_convention6FirknOBasicTypeArray_pnIintArray__pnRCallingConvention__;
+text: .text%__1cIintArray2t6Mki1_v_: c1_FrameMap_x86.o;
+text: .text%__1cIFrameMapRname_for_argument6Fi_i_;
+text: .text%__1cIFrameMapSfp_offset_for_name6kMiii_i_;
+text: .text%__1cIFrameMapPnum_local_names6kM_i_;
+text: .text%__1cIFrameMapNlocal_to_slot6kMii_i_;
+text: .text%__1cIFrameMapSfp_offset_for_slot6kMi_i_;
+text: .text%__1cQArgumentLocation2t6Mci_v_: c1_FrameMap_x86.o;
+text: .text%__1cQArgumentLocationSset_stack_location6Mi_v_;
+text: .text%__1cIFrameMapQaddress_for_name6kMiii_nHAddress__;
+text: .text%__1cIFrameMapQmake_new_address6kMi_nHAddress__;
+text: .text%__1cNLIR_AssemblerIemit_op16MpnHLIR_Op1__v_;
+text: .text%__1cNLIR_AssemblerHmove_op6MpnLLIR_OprDesc_2nJBasicType_nHLIR_Op1NLIR_PatchCode_pnMCodeEmitInfo__v_;
+text: .text%__1cNLIR_AssemblerHmem2reg6MpnLLIR_Address_nFRInfo_nJBasicType_nHLIR_Op1NLIR_PatchCode_pnMCodeEmitInfo__v_;
+text: .text%__1cNLIR_AssemblerKas_Address6MpnLLIR_Address__nHAddress__;
+text: .text%__1cNLIR_AssemblerHcomp_op6MnMLIR_OpBranchNLIR_Condition_pnLLIR_OprDesc_4nJBasicType__v_;
+text: .text%__1cNLIR_AssemblerNemit_opBranch6MpnMLIR_OpBranch__v_;
+text: .text%__1cNLIR_AssemblerJreg2stack6MnFRInfo_inJBasicType__v_;
+text: .text%__1cNLIR_AssemblerLconst2stack6MpnJLIR_Const_i_v_;
+text: .text%__1cNLIR_AssemblerJstack2reg6MpnLLIR_OprDesc_2nJBasicType__v_;
+text: .text%__1cNLIR_AssemblerHreg2reg6MnFRInfo_1_v_;
+text: .text%__1cNLIR_AssemblerJmove_regs6MpnMRegisterImpl_2_v_;
+text: .text%__1cNLIR_AssemblerIshift_op6MnILIR_Code_nFRInfo_i2_v_;
+text: .text%__1cNLIR_AssemblerIarith_op6MnILIR_Code_pnLLIR_OprDesc_33pnMCodeEmitInfo__v_;
+text: .text%__1cNLIR_AssemblerbIadd_debug_info_for_null_check_here6MpnMCodeEmitInfo__v_;
+text: .text%__1cNLIR_AssemblerLcode_offset6kM_i_;
+text: .text%__1cNLIR_AssemblerbDadd_debug_info_for_null_check6MipnMCodeEmitInfo__v_;
+text: .text%__1cNLIR_AssemblerOemit_code_stub6MpnICodeStub__v_;
+text: .text%__1cVImplicitNullCheckStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
+text: .text%__1cNLIR_AssemblerCpc6kM_pC_;
+text: .text%__1cICodeStubLset_code_pc6MpC_v_: c1_CodeStubs_x86.o;
+text: .text%__1cICodeStubMis_call_stub6kM_i_: c1_CodeStubs_x86.o;
+text: .text%__1cNCodeStubArrayIindex_of6kMkpnICodeStub__i_: c1_LIRAssembler.o;
+text: .text%__1cORangeCheckStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
+text: .text%__1cNLIR_AssemblerOsafepoint_poll6MnFRInfo_pnMCodeEmitInfo__v_;
+text: .text%__1cNLIR_AssemblerZadd_debug_info_for_branch6MpnMCodeEmitInfo__v_;
+text: .text%__1cPpoll_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o;
+text: .text%__1cMCodeEmitInfoRrecord_debug_info6MpnYDebugInformationRecorder_ii_v_;
+text: .text%__1cMCodeEmitInfoHoop_map6M_pnGOopMap__;
+text: .text%__1cMCodeEmitInfoScompute_debug_info6M_v_;
+text: .text%__1cMCodeEmitInfoOcreate_oop_map6M_pnGOopMap__;
+text: .text%__1cIFrameMapRoop_map_arg_count6M_i_;
+text: .text%__1cMCodeEmitInfoTrecord_spilled_oops6kMpnIFrameMap_pnGOopMap__v_;
+text: .text%__1cKciLocalMapNindex_for_bci6kMi_i_;
+text: .text%__1cSciLocalMapIteratorJfind_next6M_v_: c1_LIREmitter.o;
+text: .text%__1cJLocalSlotIfor_type6MpnJValueType_ii_pnFLocal__: c1_LIREmitter.o;
+text: .text%__1cMCodeEmitInfoNget_cache_reg6kMinIValueTag__nFRInfo__;
+text: .text%__1cIFrameMapTsingle_word_regname6kMi_nHOptoRegEName__;
+text: .text%__1cIFrameMapMfp2sp_offset6kMi_i_;
+text: .text%__1cGOopMapHset_oop6MnHOptoRegEName_ii_v_;
+text: .text%__1cMCodeEmitInfoVlir_stack2value_stack6MpnNGrowableArray4CpnLLIR_OprDesc____pnNGrowableArray4CpnKScopeValue____;
+text: .text%__1cMCodeEmitInfobCcompute_debug_info_for_scope6MpnHIRScope_ipnNGrowableArray4CpnKScopeValue___inGValues_i_pnQIRScopeDebugInfo__;
+text: .text%__1cMCodeEmitInfobCscope_value_for_local_offset6MinILocationEType_ppnKScopeValue__4_;
+text: .text%__1cMCodeEmitInfobEget_cache_reg_for_local_offset6kMi_nFRInfo__;
+text: .text%__1cMLocalMappingbEget_cache_reg_for_local_offset6kMi_nFRInfo__;
+text: .text%__1cMCodeEmitInfoZlocation_for_local_offset6MinILocationEType__1_;
+text: .text%__1cIFrameMapZlocation_for_local_offset6kMinILocationEType_p1_i_;
+text: .text%__1cIFrameMapWlocation_for_fp_offset6kMinILocationEType_p1_i_;
+text: .text%__1cILocationVlegal_offset_in_bytes6Fi_i_;
+text: .text%__1cMCodeEmitInfoYscope_value_for_register6MnFRInfo_ppnKScopeValue_4nILocationEType__v_;
+text: .text%__1cGOopMapJdeep_copy6M_p0_;
+text: .text%__1cGOopMap2t6Mn0ANDeepCopyToken_p0_v_;
+text: .text%__1cMOopMapStream2t6MpnGOopMap__v_;
+text: .text%__1cMOopMapStreamJfind_next6M_v_;
+text: .text%__1cUCompressedReadStreamIread_int6M_i_: oopMap.o;
+text: .text%__1cYDebugInformationRecorderNadd_safepoint6MiipnGOopMap__v_;
+text: .text%__1cYDebugInformationRecorderLcheck_phase6Mn0AFPhase__v_;
+text: .text%__1cYDebugInformationRecorderKadd_oopmap6MiipnGOopMap__v_;
+text: .text%__1cYDebugInformationRecorderTcreate_scope_values6MpnNGrowableArray4CpnKScopeValue____pnKDebugToken__;
+text: .text%__1cYDebugInformationRecorderWserialize_scope_values6MpnNGrowableArray4CpnKScopeValue____i_;
+text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: debugInfoRec.o;
+text: .text%__1cNLocationValueIwrite_on6MpnUDebugInfoWriteStream__v_;
+text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: debugInfo.o;
+text: .text%__1cILocationIwrite_on6MpnUDebugInfoWriteStream__v_;
+text: .text%__1cVCompressedWriteStreamJwrite_int6Mi_v_: location.o;
+text: .text%__1cYDebugInformationRecorderVcreate_monitor_values6MpnNGrowableArray4CpnMMonitorValue____pnKDebugToken__;
+text: .text%__1cYDebugInformationRecorderYserialize_monitor_values6MpnNGrowableArray4CpnMMonitorValue____i_;
+text: .text%__1cYDebugInformationRecorderOdescribe_scope6MpnIciMethod_ipnKDebugToken_44_v_;
+text: .text%__1cYDebugInformationRecorderNappend_handle6MpnI_jobject__i_;
+text: .text%__1cLOopRecorderOallocate_index6MpnI_jobject__i_;
+text: .text%__1cLCompilationbEadd_exception_handlers_for_pco6MiipnOExceptionScope__v_;
+text: .text%__1cNExceptionInfo2t6MiipnOExceptionScope__v_;
+text: .text%__1cJAssemblerFtestl6MpnMRegisterImpl_nHAddress__v_;
+text: .text%__1cNLIR_AssemblerHreg2mem6MnFRInfo_pnLLIR_Address_nJBasicType_nHLIR_Op1NLIR_PatchCode_pnMCodeEmitInfo__v_;
+text: .text%__1cLLIR_OprDescGis_oop6kM_i_;
+text: .text%__1cNLIR_AssemblerJreturn_op6MnFRInfo_i_v_;
+text: .text%__1cWpoll_return_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o;
+text: .text%__1cRC1_MacroAssemblerLmethod_exit6Mi_v_;
+text: .text%__1cLCompilationQemit_code_epilog6MpnNLIR_Assembler__v_;
+text: .text%__1cNLIR_AssemblerUemit_slow_case_stubs6M_v_;
+text: .text%__1cNLIR_AssemblerKemit_stubs6MpnMCodeStubList__v_;
+text: .text%__1cVImplicitNullCheckStubJemit_code6MpnNLIR_Assembler__v_;
+text: .text%__1cWImplicitExceptionTableGappend6MII_v_;
+text: .text%__1cZresource_reallocate_bytes6FpcII_0_;
+text: .text%__1cFArenaIArealloc6MpvII_1_;
+text: .text%__1cNLIR_AssemblerNadd_call_info6MipnMCodeEmitInfo__v_;
+text: .text%__1cOdummy_location6FnIValueTag__pnKScopeValue__: c1_LIREmitter.o;
+text: .text%__1cQConstantIntValueIwrite_on6MpnUDebugInfoWriteStream__v_;
+text: .text%__1cORangeCheckStubJemit_code6MpnNLIR_Assembler__v_;
+text: .text%__1cNLIR_AssemblerWemit_exception_handler6M_i_;
+text: .text%__1cRC1_MacroAssemblerRexception_handler6Mii_v_;
+text: .text%__1cNLIR_AssemblerPemit_call_stubs6M_v_;
+text: .text%__1cNLIR_AssemblerbCmaybe_adjust_stack_alignment6MpnIciMethod__v_;
+text: .text%__1cKreal_index6FpnIFrameMap_i_i_: c1_LIRAssembler_x86.o;
+text: .text%__1cLCompilationbEgenerate_exception_range_table6M_v_;
+text: .text%__1cOExceptionScopeGequals6kMp0_i_;
+text: .text%__1cLCompilationbBadd_exception_range_entries6MiipnOExceptionScope_ip2pi_v_;
+text: .text%__1cTExceptionRangeTablebCcompute_modified_at_call_pco6Fii_i_;
+text: .text%__1cOExceptionScopeMcaller_scope6kM_p0_;
+text: .text%__1cLLIR_EmitterKframe_size6M_i_;
+text: .text%__1cNLIR_Assembler2T6M_v_;
+text: .text%__1cLCompilationMinstall_code6MpnLCodeOffsets_i_v_;
+text: .text%__1cFciEnvPregister_method6MpnIciMethod_iiiiiipnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler_ii_v_;
+text: .text%__1cFciEnvbOcheck_for_system_dictionary_modification6MpnIciMethod__v_;
+text: .text%__1cFciEnvbUsystem_dictionary_modification_counter_changed6M_i_;
+text: .text%__1cHnmethodLnew_nmethod6FnMmethodHandle_iiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler__p0_;
+text: .text%__1cLOopRecorderIoop_size6M_i_;
+text: .text%__1cYDebugInformationRecorderIpcs_size6M_i_;
+text: .text%__1cYDebugInformationRecorderJdata_size6M_i_;
+text: .text%__1cHnmethod2n6FIi_pv_;
+text: .text%__1cHnmethod2t6MpnNmethodOopDesc_iiiiiiipnYDebugInformationRecorder_pnKCodeBuffer_ipnJOopMapSet_pnVExceptionHandlerTable_pnWImplicitExceptionTable_pnTExceptionRangeTable_pnQAbstractCompiler__v_;
+text: .text%__1cKRelocationWfix_relocation_at_move6Mi_v_: codeBlob.o;
+text: .text%__1cLPcDescCache2t6M_v_;
+text: .text%__1cHnmFlagsFclear6M_v_;
+text: .text%__1cYDebugInformationRecorderHcopy_to6MpnHnmethod__v_;
+text: .text%__1cLOopRecorderHcopy_to6MpnICodeBlob__v_;
+text: .text%__1cICodeBlobJcopy_oops6MppnI_jobject_i_v_;
+text: .text%__1cIUniverseMnon_oop_word6F_pv_;
+text: .text%__1cHnmethodQcopy_scopes_data6MpCi_v_;
+text: .text%__1cGPcDesc2t6Miii_v_;
+text: .text%__1cHnmethodKcopy_pc_at6MipnGPcDesc__v_;
+text: .text%__1cHnmethodSresolve_JNIHandles6M_v_;
+text: .text%__1cNRelocIteratorEnext6M_i_: nmethod.o;
+text: .text%__1cJCodeCacheGcommit6FpnICodeBlob__v_;
+text: .text%__1cHnmethodKis_nmethod6kM_i_: nmethod.o;
+text: .text%__1cHnmethodUnumber_of_dependents6kM_i_: nmethod.o;
+text: .text%__1cFVTuneOcreate_nmethod6FpnHnmethod__v_;
+text: .text%__1cWImplicitExceptionTableHcopy_to6MpnHnmethod__v_;
+text: .text%__1cTExceptionRangeTableHcopy_to6MpnHnmethod__v_;
+text: .text%__1cKNativeJumpbEcheck_verified_entry_alignment6FpC1_v_;
+text: .text%__1cGEventsDlog6FpkcE_v_: nmethod.o;
+text: .text%__1cFciEnvKcompile_id6M_I_;
+text: .text%__1cNmethodOopDescIset_code6MpnHnmethod__v_;
+text: .text%__1cFciEnvbFpost_compiled_method_load_event6MpnHnmethod__v_;
+text: .text%__1cLCompilation2T6M_v_;
+text: .text%__1cFArena2T6M_v_;
+text: .text%__1cFArenaRdestruct_contents6M_v_;
+text: .text%__1cICHeapObj2k6Fpv_v_;
+text: .text%__1cTExceptionRangeTable2T6M_v_;
+text: .text%__1cFciEnvVnum_inlined_bytecodes6kM_i_;
+text: .text%__1cMelapsedTimerDadd6M0_v_;
+text: .text%__1cFciEnv2T6M_v_;
+text: .text%__1cNCompileBrokerUpop_jni_handle_block6F_v_;
+text: .text%__1cNCompileBrokerScollect_statistics6FpnOCompilerThread_nMelapsedTimer_pnLCompileTask__v_;
+text: .text%__1cHnmethodKtotal_size6kM_i_;
+text: .text%__1cHnmethodJcode_size6kM_i_: nmethod.o;
+text: .text%__1cHnmethodOexception_size6kM_i_: nmethod.o;
+text: .text%__1cHnmethodJstub_size6kM_i_: nmethod.o;
+text: .text%__1cHnmethodQscopes_data_size6kM_i_: nmethod.o;
+text: .text%__1cHnmethodPscopes_pcs_size6kM_i_: nmethod.o;
+text: .text%__1cLAccessFlagsRatomic_clear_bits6Mi_v_;
+text: .text%__1cSCompileTaskWrapper2T6M_v_;
+text: .text%__1cNCompileBrokerJfree_task6FpnLCompileTask__v_;
+text: .text%__1cLCompileTaskEfree6M_v_;
+text: .text%__1cKJNIHandlesOdestroy_global6FpnI_jobject_i_v_;
+text: .text%__1cNSignatureInfoGdo_int6M_v_: reflection.o;
+text: .text%__1cNArgumentCountDset6MinJBasicType__v_: reflection.o;
+text: .text%__1cZget_mirror_from_signature6FnMmethodHandle_pnPSignatureStream_pnGThread__pnHoopDesc__;
+text: .text%__1cPjava_lang_ClassQprimitive_mirror6FnJBasicType__pnHoopDesc__;
+text: .text%__1cNSignatureInfoHdo_long6M_v_: reflection.o;
+text: .text%__1cNSignatureInfoJdo_object6Mii_v_: reflection.o;
+text: .text%__1cPSignatureStreamJas_symbol6MpnGThread__pnNsymbolOopDesc__;
+text: .text%__1cKReflectionbFbasic_type_mirror_to_basic_type6FpnHoopDesc_pnGThread__nJBasicType__;
+text: .text%__1cPjava_lang_ClassOprimitive_type6FpnHoopDesc__nJBasicType__;
+text: .text%__1cQSystemDictionaryQjava_mirror_type6FpnHoopDesc__nJBasicType__;
+text: .text%__1cKReflectionTunbox_for_primitive6FpnHoopDesc_pnGjvalue_pnGThread__nJBasicType__;
+text: .text%__1cXjava_lang_boxing_objectJget_value6FpnHoopDesc_pnGjvalue__nJBasicType__;
+text: .text%__1cUGenericGrowableArrayGgrow646Mi_v_;
+text: .text%__1cRComputeEntryStackGdo_int6M_v_: generateOopMap.o;
+text: .text%__1cOGenerateOopMapJppdupswap6Mipkc_v_;
+text: .text%__1cOGenerateOopMapJdo_method6Miiii_v_;
+text: .text%__1cQComputeCallStackHdo_void6M_v_: generateOopMap.o;
+text: .text%__1cQComputeCallStackGdo_int6M_v_: generateOopMap.o;
+text: .text%__1cLciSignatureHtype_at6kMi_pnGciType__;
+text: .text%__1cLInstructionMas_CompareOp6M_pnJCompareOp__: c1_GraphBuilder.o;
+text: .text%__1cLInstructionNas_InstanceOf6M_pnKInstanceOf__: c1_GraphBuilder.o;
+text: .text%__1cMGraphBuilderMnew_instance6Mi_v_;
+text: .text%__1cQciBytecodeStreamJget_klass6kM_pnHciKlass__;
+text: .text%__1cQciBytecodeStreamPget_klass_index6kM_i_;
+text: .text%__1cVLoaderConstraintTableWfind_constrained_klass6MnMsymbolHandle_nGHandle__pnMklassOopDesc__;
+text: .text%__1cIciSymbolHbyte_at6Mi_i_;
+text: .text%__1cPciInstanceKlassNloader_handle6M_pnI_jobject__;
+text: .text%__1cPciInstanceKlassYprotection_domain_handle6M_pnI_jobject__;
+text: .text%__1cGciTypeMis_classless6kM_i_: ciInstanceKlass.o;
+text: .text%__1cMGraphBuilderMappend_split6MpnKStateSplit__pnLInstruction__;
+text: .text%__1cLNewInstanceFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o;
+text: .text%__1cNCanonicalizerOdo_NewInstance6MpnLNewInstance__v_;
+text: .text%__1cLInstructionEhash6kM_i_: c1_Instruction.o;
+text: .text%__1cKStateSplitNas_StateSplit6M_p0_: c1_Instruction.o;
+text: .text%__1cLInstructionJas_Invoke6M_pnGInvoke__: c1_Instruction.o;
+text: .text%__1cKValueStackMclear_locals6M_v_;
+text: .text%__1cKValueStackMclear_stores6M_v_;
+text: .text%__1cKValueStackZpin_stack_for_state_split6M_v_;
+text: .text%__1cLNewInstanceIcan_trap6kM_i_: c1_Instruction.o;
+text: .text%__1cMGraphBuilderIstack_op6MnJBytecodesECode__v_;
+text: .text%__1cMGraphBuilderGinvoke6MnJBytecodesECode__v_;
+text: .text%__1cQciBytecodeStreamKget_method6kM_pnIciMethod__;
+text: .text%__1cQciBytecodeStreamQget_method_index6kM_i_;
+text: .text%__1cFciEnvTget_method_by_index6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__;
+text: .text%__1cFciEnvYget_method_by_index_impl6MpnPciInstanceKlass_inJBytecodesECode__pnIciMethod__;
+text: .text%__1cFciEnvbTget_instance_klass_for_declared_method_holder6FpnHciKlass__pnPciInstanceKlass__;
+text: .text%__1cPciObjectFactoryTget_unloaded_method6MpnPciInstanceKlass_pnIciSymbol_4_pnIciMethod__;
+text: .text%__1cIciMethod2t6MpnPciInstanceKlass_pnIciSymbol_4_v_;
+text: .text%__1cNciMethodKlassEmake6F_p0_;
+text: .text%__1cIciObjectMis_classless6kM_i_: ciMethod.o;
+text: .text%__1cQciBytecodeStreambAget_declared_method_holder6M_pnHciKlass__;
+text: .text%__1cQciBytecodeStreamXget_method_holder_index6M_i_;
+text: .text%__1cLciSignatureLreturn_type6kM_pnGciType__;
+text: .text%__1cKValueStackNpop_arguments6Mi_pnGValues__;
+text: .text%__1cGInvoke2t6MnJBytecodesECode_pnJValueType_pnLInstruction_pnGValues_iiii_v_;
+text: .text%__1cGInvokeFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o;
+text: .text%__1cNCanonicalizerJdo_Invoke6MpnGInvoke__v_;
+text: .text%__1cGInvokeJas_Invoke6M_p0_: c1_Instruction.o;
+text: .text%__1cLInstructionMas_LoadLocal6M_pnJLoadLocal__: c1_Instruction.o;
+text: .text%__1cGInvokeIcan_trap6kM_i_: c1_Instruction.o;
+text: .text%__1cMGraphBuilderIthrow_op6M_v_;
+text: .text%__1cFThrowFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o;
+text: .text%__1cNCanonicalizerIdo_Throw6MpnFThrow__v_;
+text: .text%__1cIBlockEndLas_BlockEnd6M_p0_: c1_Instruction.o;
+text: .text%__1cFThrowIcan_trap6kM_i_: c1_Instruction.o;
+text: .text%__1cLInstructionJas_Return6M_pnGReturn__: c1_Instruction.o;
+text: .text%__1cFThrowIas_Throw6M_p0_: c1_Instruction.o;
+text: .text%__1cLInstructionFas_If6M_pnCIf__: c1_Instruction.o;
+text: .text%__1cLInstructionHas_Goto6M_pnEGoto__: c1_Instruction.o;
+text: .text%__1cKStateSplitPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o;
+text: .text%__1cQNullCheckVisitorOdo_NewInstance6MpnLNewInstance__v_;
+text: .text%__1cTNullCheckEliminatorShandle_NewInstance6MpnLNewInstance__v_;
+text: .text%__1cGInvokePinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o;
+text: .text%__1cQNullCheckVisitorJdo_Invoke6MpnGInvoke__v_;
+text: .text%__1cTNullCheckEliminatorNhandle_Invoke6MpnGInvoke__v_;
+text: .text%__1cFThrowPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o;
+text: .text%__1cQNullCheckVisitorIdo_Throw6MpnFThrow__v_;
+text: .text%__1cPBlockBeginArrayIindex_of6kMkpnKBlockBegin__i_: c1_IR.o;
+text: .text%__1cLInstructionGnegate6Fn0AJCondition__1_;
+text: .text%__1cFThrowPstate_values_do6MpFppnLInstruction__v_v_;
+text: .text%__1cFRInfoIoverlaps6kMk0_i_;
+text: .text%__1cIValueGenOdo_NewInstance6MpnLNewInstance__v_;
+text: .text%__1cIValueGenVspill_values_on_stack6MpnKValueStack_nFRInfo_i_v_;
+text: .text%__1cIRegAllocNlock_register6MpnLInstruction_nFRInfo__v_;
+text: .text%__1cHHideReg2t6MpnIValueGen_pnJValueType__v_;
+text: .text%__1cHHideReg2T6M_v_;
+text: .text%__1cLLIR_EmitterMnew_instance6MnFRInfo_pnPciInstanceKlass_1111pnMCodeEmitInfo__v_;
+text: .text%__1cLLIR_EmitterZjobject2reg_with_patching6MnFRInfo_pnIciObject_pnMCodeEmitInfo__v_;
+text: .text%__1cILIR_ListNoop2reg_patch6MpnI_jobject_nFRInfo_pnMCodeEmitInfo__v_;
+text: .text%__1cPNewInstanceStub2t6MnFRInfo_pnLLIR_OprDesc_pnPciInstanceKlass_pnMCodeEmitInfo_nIRuntime1GStubID__v_;
+text: .text%__1cIValueGenJdo_Invoke6MpnGInvoke__v_;
+text: .text%__1cIValueGenWinvoke_visit_arguments6MpnGInvoke_pnRCallingConvention__pnJItemArray__;
+text: .text%__1cIValueGenNis_free_rinfo6MnFRInfo__i_;
+text: .text%__1cGInvokeRsize_of_arguments6kM_i_;
+text: .text%__1cLLIR_EmitterVstore_stack_parameter6MpnLLIR_OprDesc_i_v_;
+text: .text%__1cILIR_ListFstore6MpnLLIR_OprDesc_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_;
+text: .text%__1cHHideReg2t6MpnIValueGen_nFRInfo_i_v_;
+text: .text%__1cIValueGenVinvoke_load_arguments6MpnGInvoke_pnJItemArray_pnRCallingConvention__v_;
+text: .text%__1cIValueGenPinvoke_do_spill6MpnGInvoke_nFRInfo__v_;
+text: .text%__1cIValueGenXis_caller_save_register6FnFRInfo__i_;
+text: .text%__1cIValueGenLspill_value6MpnLInstruction__v_;
+text: .text%__1cIValueGenKspill_item6MpnEItem__v_;
+text: .text%__1cIValueGenQround_spill_item6MpnEItem_i_v_;
+text: .text%__1cIRegAllocOget_lock_spill6MpnLInstruction_i_i_;
+text: .text%__1cIValueGenJraw_rfree6MpnEItem__v_;
+text: .text%__1cLLIR_EmitterFspill6MipnLLIR_OprDesc__v_;
+text: .text%__1cIFrameMapKspill_name6kMi_i_;
+text: .text%__1cIValueGenQinvoke_do_result6MpnGInvoke_ipnEItem__v_;
+text: .text%__1cIVoidTypeLas_VoidType6M_p0_: c1_ValueType.o;
+text: .text%__1cLCompilationXlir_opr_for_instruction6MpnLInstruction__pnLLIR_OprDesc__;
+text: .text%__1cLLIR_EmitterHcall_op6MnJBytecodesECode_pknOBasicTypeArray_pnMCodeEmitInfo_iiinFRInfo_pnLLIR_OprDesc__v_;
+text: .text%__1cILIR_ListKnull_check6MnFRInfo_pnMCodeEmitInfo__v_: c1_LIREmitter.o;
+text: .text%__1cILIR_ListQcall_opt_virtual6MnFRInfo_pnLLIR_OprDesc_pCpnMCodeEmitInfo_pnOStaticCallStub__v_: c1_LIREmitter.o;
+text: .text%__1cIValueGenIdo_Throw6MpnFThrow__v_;
+text: .text%__1cLNewInstanceKexact_type6kM_pnGciType__;
+text: .text%__1cOExceptionScopeLcould_catch6kMpnPciInstanceKlass_i_i_;
+text: .text%__1cIValueGenRexceptionOopRInfo6F_nFRInfo__;
+text: .text%__1cIValueGenFsfree6MpnEItem__v_;
+text: .text%__1cIRegAllocKfree_spill6MipnJValueType__v_;
+text: .text%__1cIRegAllocNis_free_spill6kMipnJValueType__i_;
+text: .text%__1cLNewInstanceOas_NewInstance6M_p0_: c1_Instruction.o;
+text: .text%__1cIValueGenQexceptionPcRInfo6F_nFRInfo__;
+text: .text%__1cILIR_ListPthrow_exception6MnFRInfo_1pnMCodeEmitInfo__v_: c1_CodeGenerator.o;
+text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CodeGenerator.o;
+text: .text%__1cPNewInstanceStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
+text: .text%__1cOLIR_OpJavaCallFvisit6MpnQLIR_OpVisitState__v_;
+text: .text%__1cQLIR_OpVisitStateGappend6MnFRInfo__v_: c1_LIR.o;
+text: .text%__1cOStaticCallStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
+text: .text%__1cIFrameMapWcaller_save_cpu_reg_at6Fi_pnLLIR_OprDesc__;
+text: .text%__1cLInstructionLas_NewArray6M_pnINewArray__: c1_Instruction.o;
+text: .text%__1cIVoidTypeDtag6kM_nIValueTag__: c1_ValueType.o;
+text: .text%__1cLInstructionOas_NewInstance6M_pnLNewInstance__: c1_Instruction.o;
+text: .text%__1cLInstructionQas_AccessMonitor6M_pnNAccessMonitor__: c1_Instruction.o;
+text: .text%__1cRLIR_PeepholeStateHdo_call6M_v_;
+text: .text%__1cOLIR_OpJavaCallJemit_code6MpnVLIR_AbstractAssembler__v_;
+text: .text%__1cNLIR_OptimizerJemit_call6MpnOLIR_OpJavaCall__v_;
+text: .text%__1cNLIR_AssemblerJconst2reg6MpnJLIR_Const_nFRInfo_nHLIR_Op1NLIR_PatchCode_pnMCodeEmitInfo__v_;
+text: .text%__1cMPatchingStubQalign_patch_site6MpnOMacroAssembler__v_;
+text: .text%__1cJAssemblerEmovl6MpnMRegisterImpl_pnI_jobject__v_;
+text: .text%__1cOoop_RelocationLunpack_data6M_v_;
+text: .text%__1cKRelocationNunpack_2_ints6Mri1_v_: relocInfo.o;
+text: .text%__1cOoop_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
+text: .text%__1cOoop_RelocationJpack_data6M_i_;
+text: .text%__1cNLIR_AssemblerPpatching_epilog6MpnMPatchingStub_nHLIR_Op1NLIR_PatchCode_pnMRegisterImpl_pnMCodeEmitInfo__v_;
+text: .text%__1cMPatchingStubHinstall6MpnOMacroAssembler_nHLIR_Op1NLIR_PatchCode_pnMRegisterImpl_pnMCodeEmitInfo__v_: c1_LIRAssembler.o;
+text: .text%__1cNLIR_AssemblerUappend_patching_stub6MpnMPatchingStub__v_;
+text: .text%__1cPNewInstanceStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
+text: .text%__1cNLIR_AssemblerJemit_call6MpnOLIR_OpJavaCall__v_;
+text: .text%__1cNLIR_AssemblerKalign_call6MnILIR_Code__v_;
+text: .text%__1cICodeStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
+text: .text%__1cOStaticCallStubLset_code_pc6MpC_v_: c1_CodeStubs_x86.o;
+text: .text%__1cOStaticCallStubMis_call_stub6kM_i_: c1_CodeStubs_x86.o;
+text: .text%__1cNLIR_AssemblerEcall6MpCnJrelocInfoJrelocType_pnMCodeEmitInfo__v_;
+text: .text%__1cbBopt_virtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
+text: .text%__1cKRelocationJpack_data6M_i_: relocInfo.o;
+text: .text%__1cMCodeEmitInfoSappend_scope_value6MpnLLIR_OprDesc_pnNGrowableArray4CpnKScopeValue____v_;
+text: .text%__1cMCodeEmitInfoRopr2location_type6MpnLLIR_OprDesc__nILocationEType__;
+text: .text%__1cMCodeEmitInfoRlocation_for_name6MinILocationEType_ii_1_;
+text: .text%__1cIFrameMapRlocation_for_name6kMinILocationEType_p1ii_i_;
+text: .text%__1cNLIR_AssemblerIthrow_op6MnFRInfo_1pnMCodeEmitInfo_i_v_;
+text: .text%__1cMCodeEmitInfoQadd_register_oop6MnFRInfo__v_;
+text: .text%__1cIintArrayIindex_of6kMki_i_: c1_LIREmitter.o;
+text: .text%__1cMCodeEmitInfoYadd_registers_to_oop_map6MpnGOopMap__v_;
+text: .text%__1cYinternal_word_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
+text: .text%__1cYinternal_word_RelocationJpack_data6M_i_;
+text: .text%__1cJrelocInfoKset_format6Mi_v_;
+text: .text%__1cMPatchingStubJemit_code6MpnNLIR_Assembler__v_;
+text: .text%__1cRAbstractAssemblerGa_byte6Mi_v_;
+text: .text%__1cRNativeGeneralJumpUinsert_unconditional6FpC1_v_;
+text: .text%__1cNRelocIterator2t6MpnKCodeBuffer_pC3_v_;
+text: .text%__1cJrelocInfobDchange_reloc_info_for_address6FpnNRelocIterator_pCn0AJrelocType_4_v_;
+text: .text%__1cJrelocInfoIset_type6Mn0AJrelocType__v_;
+text: .text%__1cPNewInstanceStubJemit_code6MpnNLIR_Assembler__v_;
+text: .text%__1cJOopMapSetMgrow_om_data6M_v_;
+text: .text%__1cOStaticCallStubJemit_code6MpnNLIR_Assembler__v_;
+text: .text%__1cWstatic_stub_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
+text: .text%__1cWstatic_stub_RelocationJpack_data6M_i_;
+text: .text%__1cNRelocIteratorTadvance_over_prefix6M_v_;
+text: .text%__1cOCallRelocationFvalue6M_pC_: relocInfo.o;
+text: .text%__1cYinternal_word_RelocationLunpack_data6M_v_;
+text: .text%__1cYinternal_word_RelocationWfix_relocation_at_move6Mi_v_;
+text: .text%__1cYinternal_word_RelocationFvalue6M_pC_: relocInfo.o;
+text: .text%__1cYinternal_word_RelocationGtarget6M_pC_;
+text: .text%__1cODataRelocationJset_value6MpC_v_: relocInfo.o;
+text: .text%__1cODataRelocationGoffset6M_i_: relocInfo.o;
+text: .text%__1cKRelocationRpd_set_data_value6MpCi_v_;
+text: .text%__1cKRelocationSpd_address_in_code6M_ppC_;
+text: .text%__1cWstatic_stub_RelocationLunpack_data6M_v_;
+text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: nmethod.o;
+text: .text%__1cOoop_RelocationHoops_do6MpFppnHoopDesc__v_v_;
+text: .text%__1cOoop_RelocationIoop_addr6M_ppnHoopDesc__;
+text: .text%__1cRresolve_and_patch6FppnHoopDesc__v_;
+text: .text%__1cMPeriodicTaskOreal_time_tick6FI_v_;
+text: .text%__1cPStatSamplerTaskEtask6M_v_: statSampler.o;
+text: .text%__1cLStatSamplerOcollect_sample6F_v_;
+text: .text%__1cLStatSamplerLsample_data6FpnMPerfDataList__v_;
+text: .text%jni_GetPrimitiveArrayCritical: jni.o;
+text: .text%jni_ReleasePrimitiveArrayCritical: jni.o;
+text: .text%jni_SetBooleanField: jni.o;
+text: .text%__1cNFingerprinterIdo_float6M_v_: dump.o;
+text: .text%__1cXNativeSignatureIteratorIdo_float6M_v_: interpreterRuntime.o;
+text: .text%JVM_IsNaN;
+text: .text%__1cNFingerprinterJdo_double6M_v_: dump.o;
+text: .text%__1cXNativeSignatureIteratorJdo_double6M_v_: interpreterRuntime.o;
+text: .text%__1cXNativeSignatureIteratorLpass_double6M_v_: interpreterRuntime.o;
+text: .text%__1cKExceptionsL_throw_args6FpnGThread_pkcinMsymbolHandle_5pnRJavaCallArguments__v_;
+text: .text%jni_GetArrayLength: jni.o;
+text: .text%JVM_Read;
+text: .text%__1cDhpiEread6FipvI_I_: jvm.o;
+text: .text%__1cKJNIHandlesKmake_local6FpnGThread_pnHoopDesc__pnI_jobject__;
+text: .text%__1cRComputeEntryStackJdo_object6Mii_v_: generateOopMap.o;
+text: .text%__1cQComputeCallStackHdo_char6M_v_: generateOopMap.o;
+text: .text%__1cQComputeCallStackJdo_object6Mii_v_: generateOopMap.o;
+text: .text%__1cFciEnvNlookup_method6MpnNinstanceKlass_2pnNsymbolOopDesc_4nJBytecodesECode__pnNmethodOopDesc__;
+text: .text%__1cMLinkResolverbNlinktime_resolve_virtual_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__;
+text: .text%__1cIciMethodXfind_monomorphic_target6MpnHciKlass_22_p0_;
+text: .text%__1cDCHAManalyze_call6FnLKlassHandle_11nMsymbolHandle_2_pnJCHAResult__;
+text: .text%__1cMLinkResolverbCresolve_virtual_call_or_null6FnLKlassHandle_1nMsymbolHandle_21_nMmethodHandle__;
+text: .text%__1cJCHAResult2t6MnLKlassHandle_nMsymbolHandle_2pnNGrowableArray4n0B___pnNGrowableArray4nMmethodHandle___n0E_i_v_;
+text: .text%__1cJCHAResultOis_monomorphic6kM_i_;
+text: .text%__1cJCHAResultSmonomorphic_target6kM_nMmethodHandle__;
+text: .text%__1cIciMethodJwill_link6MpnHciKlass_2nJBytecodesECode__i_;
+text: .text%__1cMGraphBuilderKtry_inline6MpnIciMethod_i_i_;
+text: .text%__1cMGraphBuilderUclear_inline_bailout6M_v_;
+text: .text%__1cIciMethodOshould_exclude6M_i_;
+text: .text%__1cIciMethodPcan_be_compiled6M_i_;
+text: .text%__1cMGraphBuilderVtry_inline_intrinsics6MpnIciMethod__i_;
+text: .text%__1cMGraphBuilderPtry_inline_full6MpnIciMethod_i_i_;
+text: .text%__1cIciMethodIhas_jsrs6kM_i_;
+text: .text%__1cMGraphBuilderWrecursive_inline_level6kMpnIciMethod__i_;
+text: .text%__1cPciObjectFactoryMvm_symbol_at6Fi_pnIciSymbol__;
+text: .text%__1cJNullCheckFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cNCanonicalizerMdo_NullCheck6MpnJNullCheck__v_;
+text: .text%__1cJNullCheckIcan_trap6kM_i_: c1_GraphBuilder.o;
+text: .text%__1cKObjectTypeEbase6kM_pnJValueType__: c1_ValueType.o;
+text: .text%__1cKValueStackEpush6MpnJValueType_pnLInstruction__v_: c1_ValueStack.o;
+text: .text%__1cMGraphBuilderKpush_scope6MpnIciMethod_pnKBlockBegin_i_v_;
+text: .text%__1cKValueStackKpush_scope6MpnHIRScope__p0_;
+text: .text%__1cOExceptionScopeKpush_scope6M_p0_;
+text: .text%__1cOExceptionScope2t6Mp0_v_;
+text: .text%__1cHIRScopeXcompute_lock_stack_size6M_v_;
+text: .text%__1cMGraphBuilderJScopeDataRcaller_stack_size6kM_i_;
+text: .text%__1cMGraphBuilderJScopeDataLnum_returns6M_i_;
+text: .text%__1cMGraphBuilderJScopeDataXset_inline_cleanup_info6MpnKBlockBegin_pnLInstruction_pnKValueStack__v_;
+text: .text%__1cMGraphBuilderJScopeDataQincr_num_returns6M_v_;
+text: .text%__1cKValueStackJpop_scope6Mii_p0_;
+text: .text%__1cMGraphBuilderJpop_scope6M_v_;
+text: .text%__1cMGraphBuilderTpop_exception_scope6M_v_;
+text: .text%__1cOExceptionScopeJpop_scope6M_p0_;
+text: .text%__1cLCompilationVnotice_inlined_method6MpnIciMethod__v_;
+text: .text%__1cFciEnvVnotice_inlined_method6MpnIciMethod__v_;
+text: .text%__1cMLinkResolverbCresolve_special_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__;
+text: .text%__1cMGraphBuilderOinline_bailout6Mpkc_v_;
+text: .text%__1cLInstructionEprev6MpnKBlockBegin__p0_;
+text: .text%__1cKBlockBeginUresolve_substitution6M_v_;
+text: .text%__1cKBlockBeginPblock_values_do6MpFppnLInstruction__v_v_;
+text: .text%__1cZresolve_substituted_value6FppnLInstruction__v_: c1_Instruction.o;
+text: .text%__1cLInstructionFsubst6M_p0_: c1_Instruction.o;
+text: .text%__1cLInstructionPother_values_do6MpFpp0_v_v_: c1_GraphBuilder.o;
+text: .text%__1cLInstructionPstate_values_do6MpFpp0_v_v_: c1_GraphBuilder.o;
+text: .text%__1cLInstructionPstate_values_do6MpFpp0_v_v_: c1_Instruction.o;
+text: .text%__1cIConstantPother_values_do6MpFppnLInstruction__v_v_;
+text: .text%__1cIBlockEndPother_values_do6MpFppnLInstruction__v_v_;
+text: .text%__1cHIntTypeEsize6kM_i_: c1_Canonicalizer.o;
+text: .text%__1cJNullCheckPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o;
+text: .text%__1cQNullCheckVisitorMdo_NullCheck6MpnJNullCheck__v_;
+text: .text%__1cTNullCheckEliminatorQhandle_NullCheck6MpnJNullCheck__v_;
+text: .text%__1cLInstructionOas_AccessLocal6M_pnLAccessLocal__: c1_GraphBuilder.o;
+text: .text%__1cHIRScopeNtop_scope_bci6kM_i_;
+text: .text%__1cQUseCountComputerPclear_use_count6FpnKBlockBegin__v_: c1_IR.o;
+text: .text%__1cIValueGenMdo_NullCheck6MpnJNullCheck__v_;
+text: .text%__1cJNullCheckKlock_stack6kM_pnKValueStack__: c1_GraphBuilder.o;
+text: .text%__1cLLIR_EmitterKnull_check6MpnLLIR_OprDesc_pnMCodeEmitInfo__v_;
+text: .text%__1cILIR_ListDsub6MpnLLIR_OprDesc_22pnMCodeEmitInfo__v_: c1_LIREmitter.o;
+text: .text%__1cIValueGenQlock_spill_rinfo6MpnLInstruction_nFRInfo__v_;
+text: .text%__1cQIRScopeDebugInfoRrecord_debug_info6MpnYDebugInformationRecorder__v_: c1_LIREmitter.o;
+text: .text%__1cIRuntime1Yresolve_opt_virtual_call6FpnKJavaThread_pnHoopDesc__pC_;
+text: .text%__1cNSharedRuntimeOresolve_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__;
+text: .text%__1cNSharedRuntimeSresolve_sub_helper6FpnKJavaThread_iipnGThread__nMmethodHandle__;
+text: .text%__1cFframeZsender_for_compiled_frame6kMpnLRegisterMap_pnICodeBlob_i_0_;
+text: .text%__1cLRuntimeStubYcaller_must_gc_arguments6kMpnKJavaThread__i_: codeBlob.o;
+text: .text%__1cHnmethodJis_zombie6kM_i_: nmethod.o;
+text: .text%__1cNnmethodLocker2t6MpnHnmethod__v_;
+text: .text%__1cNSharedRuntimeQfind_callee_info6FpnKJavaThread_rnJBytecodesECode_rnICallInfo_pnGThread__nGHandle__;
+text: .text%__1cHnmethodQis_native_method6kM_i_: nmethod.o;
+text: .text%__1cHnmethodKpc_desc_at6MpCi_pnGPcDesc__;
+text: .text%__1cGPcDescHreal_pc6kMpknHnmethod__pC_;
+text: .text%__1cLPcDescCacheKpc_desc_at6kMpnHnmethod_pCi_pnGPcDesc__;
+text: .text%__1cLPcDescCacheLadd_pc_desc6MpnGPcDesc__v_;
+text: .text%__1cSvframeStreamCommonYfill_from_compiled_frame6MpnHnmethod_i_v_;
+text: .text%__1cUCompressedReadStreamMraw_read_int6FrpC_i_: vframe.o;
+text: .text%__1cICodeBlobLoop_addr_at6kMi_ppnHoopDesc__;
+text: .text%__1cNSharedRuntimeXfind_callee_info_helper6FpnKJavaThread_rnMvframeStream_rnJBytecodesECode_rnICallInfo_pnGThread__nGHandle__;
+text: .text%__1cPBytecode_invokeNstatic_target6MpnGThread__nMmethodHandle__;
+text: .text%__1cMLinkResolverOresolve_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cKCompiledICZcompute_monomorphic_entry6FnMmethodHandle_nLKlassHandle_iirnOCompiledICInfo_pnGThread__v_;
+text: .text%__1cKCompiledIC2t6MpnKNativeCall__v_;
+text: .text%__1cXvirtual_call_RelocationIparse_ic6FrpnICodeBlob_rpC5rppnHoopDesc_pi_nNRelocIterator__;
+text: .text%__1cKCompiledICIis_clean6kM_i_;
+text: .text%__1cKCompiledICOic_destination6kM_pC_;
+text: .text%__1cKCompiledICWis_in_transition_state6kM_i_;
+text: .text%__1cRInlineCacheBufferIcontains6FpC_i_;
+text: .text%__1cKCompiledICSset_to_monomorphic6MrknOCompiledICInfo__v_;
+text: .text%__1cSCompiledStaticCallSset_to_interpreted6MnMmethodHandle_pC_v_;
+text: .text%__1cSCompiledStaticCallJfind_stub6M_pC_;
+text: .text%__1cNRelocIteratorEnext6M_i_: compiledIC.o;
+text: .text%__1cPBoundRelocationLunpack_data6MnJrelocInfoJrelocType__v_: compiledIC.o;
+text: .text%__1cbBopt_virtual_call_RelocationLstatic_stub6M_pC_;
+text: .text%__1cKNativeCallXset_destination_mt_safe6MpC_v_;
+text: .text%__1cNnmethodLocker2T6M_v_;
+text: .text%__1cNmethodOopDescTverified_code_entry6M_pC_;
+text: .text%jni_GetByteArrayRegion: jni.o;
+text: .text%JVM_DefineClassWithSource;
+text: .text%__1cXjvm_define_class_common6FpnHJNIEnv__pkcpnI_jobject_pkWi53pnGThread__pnH_jclass__: jvm.o;
+text: .text%__1cQSystemDictionaryTresolve_from_stream6FnMsymbolHandle_nGHandle_2pnPClassFileStream_pnGThread__pnMklassOopDesc__;
+text: .text%__1cPClassFileParserbDverify_legal_method_signature6MnMsymbolHandle_1pnGThread__i_;
+text: .text%__1cPClassFileParserZskip_over_field_signature6MpciIpnGThread__1_;
+text: .text%__1cPClassFileParserXverify_legal_class_name6MnMsymbolHandle_pnGThread__v_;
+text: .text%__1cQput_after_lookup6FnMsymbolHandle_0ppnLNameSigHash__i_;
+text: .text%__1cEhash6Fpkc1_I_;
+text: .text%__1cKDictionarybAis_valid_protection_domain6MiInMsymbolHandle_nGHandle_2_i_;
+text: .text%__1cPDictionaryEntrybAcontains_protection_domain6kMpnHoopDesc__i_;
+text: .text%__1cQSystemDictionarybAvalidate_protection_domain6FnTinstanceKlassHandle_nGHandle_2pnGThread__v_;
+text: .text%__1cKDictionaryVadd_protection_domain6MiInTinstanceKlassHandle_nGHandle_2pnGThread__v_;
+text: .text%__1cPDictionaryEntryVadd_protection_domain6MpnHoopDesc__v_;
+text: .text%__1cUverify_byte_codes_fn6F_pv_: verifier.o;
+text: .text%JVM_GetClassCPEntriesCount;
+text: .text%JVM_GetClassCPTypes;
+text: .text%JVM_GetClassNameUTF;
+text: .text%JVM_ReleaseUTF;
+text: .text%JVM_FindClassFromClass;
+text: .text%jni_IsSameObject: jni.o;
+text: .text%JVM_GetClassFieldsCount;
+text: .text%JVM_GetClassMethodsCount;
+text: .text%JVM_GetMethodIxModifiers;
+text: .text%JVM_GetMethodIxByteCodeLength;
+text: .text%JVM_GetMethodIxByteCode;
+text: .text%JVM_GetMethodIxExceptionTableLength;
+text: .text%JVM_GetMethodIxLocalsCount;
+text: .text%JVM_GetMethodIxArgsSize;
+text: .text%JVM_GetMethodIxSignatureUTF;
+text: .text%JVM_GetMethodIxMaxStack;
+text: .text%JVM_GetMethodIxExceptionsCount;
+text: .text%JVM_GetMethodIxExceptionIndexes;
+text: .text%JVM_GetCPMethodNameUTF;
+text: .text%JVM_GetCPMethodClassNameUTF;
+text: .text%jni_NewLocalRef: jni.o;
+text: .text%JVM_GetCPMethodModifiers;
+text: .text%JVM_IsConstructorIx;
+text: .text%JVM_GetCPMethodSignatureUTF;
+text: .text%jni_DeleteGlobalRef: jni.o;
+text: .text%__1cQSystemDictionaryVadd_loader_constraint6FnMsymbolHandle_nGHandle_2pnGThread__v_;
+text: .text%__1cVLoaderConstraintTableJadd_entry6MnMsymbolHandle_pnMklassOopDesc_nGHandle_34pnGThread__i_;
+text: .text%__1cVLoaderConstraintTableJnew_entry6MIpnNsymbolOopDesc_pnMklassOopDesc_ii_pnVLoaderConstraintEntry__;
+text: .text%jni_ToReflectedMethod: jni.o;
+text: .text%__1cKReflectionKnew_method6FnMmethodHandle_iipnGThread__pnHoopDesc__;
+text: .text%__1cNSignatureInfoIdo_array6Mii_v_: reflection.o;
+text: .text%__1cYjava_lang_reflect_MethodGcreate6FpnGThread__nGHandle__;
+text: .text%__1cYjava_lang_reflect_MethodJset_clazz6FpnHoopDesc_2_v_;
+text: .text%__1cYjava_lang_reflect_MethodIset_slot6FpnHoopDesc_i_v_;
+text: .text%__1cYjava_lang_reflect_MethodIset_name6FpnHoopDesc_2_v_;
+text: .text%__1cYjava_lang_reflect_MethodPset_return_type6FpnHoopDesc_2_v_;
+text: .text%__1cYjava_lang_reflect_MethodTset_parameter_types6FpnHoopDesc_2_v_;
+text: .text%__1cYjava_lang_reflect_MethodTset_exception_types6FpnHoopDesc_2_v_;
+text: .text%__1cYjava_lang_reflect_MethodNset_modifiers6FpnHoopDesc_i_v_;
+text: .text%__1cYjava_lang_reflect_MethodThas_signature_field6F_i_;
+text: .text%__1cYjava_lang_reflect_MethodVhas_annotations_field6F_i_;
+text: .text%__1cYjava_lang_reflect_MethodPset_annotations6FpnHoopDesc_2_v_;
+text: .text%__1cYjava_lang_reflect_MethodbFhas_parameter_annotations_field6F_i_;
+text: .text%__1cYjava_lang_reflect_MethodZset_parameter_annotations6FpnHoopDesc_2_v_;
+text: .text%__1cYjava_lang_reflect_MethodbChas_annotation_default_field6F_i_;
+text: .text%__1cNmethodOopDescSannotation_default6kM_pnQtypeArrayOopDesc__;
+text: .text%__1cYjava_lang_reflect_MethodWset_annotation_default6FpnHoopDesc_2_v_;
+text: .text%jni_CallIntMethod: jni.o;
+text: .text%jni_CallStaticVoidMethod: jni.o;
+text: .text%jni_DetachCurrentThread;
+text: .text%__1cKJavaThreadEexit6Mi_v_;
+text: .text%__1cQjava_lang_ThreadLthreadGroup6FpnHoopDesc__2_;
+text: .text%JVM_MonitorNotifyAll;
+text: .text%__1cNThreadServiceWcurrent_thread_exiting6FpnKJavaThread__v_;
+text: .text%__1cLensure_join6FpnKJavaThread__v_: thread.o;
+text: .text%__1cQjava_lang_ThreadNset_stillborn6FpnHoopDesc__v_;
+text: .text%__1cKJavaThreadYremove_stack_guard_pages6M_v_;
+text: .text%__1cWThreadLocalAllocBufferFclear6M_v_;
+text: .text%__1cHThreadsGremove6FpnKJavaThread__v_;
+text: .text%__1cNThreadServiceNremove_thread6FpnKJavaThread_i_v_;
+text: .text%__SLIP.DELETER__A: thread.o;
+text: .text%__1cKJavaThread2T6M_v_;
+text: .text%__1cGParker2T6M_v_;
+text: .text%__1cHMonitor2T6M_v_;
+text: .text%__1cFMutex2T6M_v_;
+text: .text%lwp_cond_destroy: os_solaris.o;
+text: .text%lwp_mutex_destroy: os_solaris.o;
+text: .text%__1cUThreadSafepointStateHdestroy6FpnKJavaThread__v_;
+text: .text%__1cUThreadSafepointState2T6M_v_;
+text: .text%__1cGThread2T5B6M_v_;
+text: .text%__1cCosLfree_thread6FpnIOSThread__v_;
+text: .text%__1cIOSThread2T6M_v_;
+text: .text%__1cIOSThreadKpd_destroy6M_v_;
+text: .text%jni_DestroyJavaVM;
+text: .text%jni_AttachCurrentThread;
+text: .text%attach_current_thread: jni.o;
+text: .text%__1cCosWcreate_attached_thread6FpnGThread__i_;
+text: .text%__1cKJavaThreadSallocate_threadObj6MnGHandle_pcipnGThread__v_;
+text: .text%__1cHThreadsKdestroy_vm6F_i_;
+text: .text%__1cKJavaThreadVinvoke_shutdown_hooks6M_v_;
+text: .text%__1cLbefore_exit6FpnKJavaThread__v_;
+text: .text%__1cNWatcherThreadEstop6F_v_;
+text: .text%__1cLStatSamplerJdisengage6F_v_;
+text: .text%__1cMPeriodicTaskJdisenroll6M_v_;
+text: .text%__1cMPeriodicTask2T5B6M_v_;
+text: .text%__1cMPeriodicTaskLis_enrolled6kM_i_;
+text: .text%__1cLStatSamplerHdestroy6F_v_;
+text: .text%__1cMPerfDataList2T6M_v_;
+text: .text%__1cLJvmtiExportNpost_vm_death6F_v_;
+text: .text%__1cUJvmtiEventControllerIvm_death6F_v_;
+text: .text%__1cCosXterminate_signal_thread6F_v_;
+text: .text%__1cCosNsigexitnum_pd6F_i_;
+text: .text%__1cCosNsignal_notify6Fi_v_;
+text: .text%__1cQprint_statistics6F_v_;
+text: .text%__1cFVTuneEexit6F_v_;
+text: .text%__1cIVMThreadXwait_for_vm_thread_exit6F_v_;
+text: .text%__1cUSafepointSynchronizeFbegin6F_v_;
+text: .text%__1cORuntimeServiceWrecord_safepoint_begin6F_v_;
+text: .text%__1cJTimeStampSticks_since_update6kM_x_;
+text: .text%__1cTAbstractInterpreterRnotice_safepoints6F_v_;
+text: .text%__1cKcopy_table6FppC1i_v_: interpreter.o;
+text: .text%__1cUSafepointSynchronizeFblock6FpnKJavaThread__v_;
+text: .text%__1cCosRcurrent_thread_id6F_i_;
+text: .text%__1cJttyLockerbCbreak_tty_lock_for_safepoint6Fi_v_;
+text: .text%__1cCosbCmake_polling_page_unreadable6F_v_;
+text: .text%__1cUThreadSafepointStateXexamine_state_of_thread6Mi_v_;
+text: .text%__1cUSafepointSynchronizeOsafepoint_safe6FpnKJavaThread_nPJavaThreadState__i_;
+text: .text%__1cUThreadSafepointStateMroll_forward6Mn0AMsuspend_type_pnHnmethod_i_v_;
+text: .text%__1cORuntimeServicebDrecord_safepoint_synchronized6F_v_;
+text: .text%__1cUSafepointSynchronizeQdo_cleanup_tasks6F_v_;
+text: .text%__1cSObjectSynchronizerVdeflate_idle_monitors6F_v_;
+text: .text%__1cNObjectMonitorHis_busy6kM_i_;
+text: .text%__1cRInlineCacheBufferUupdate_inline_caches6F_v_;
+text: .text%__1cMCounterDecayFdecay6F_v_;
+text: .text%__1cQSystemDictionaryRnumber_of_classes6F_i_;
+text: .text%__1cQSystemDictionaryStry_get_next_class6F_pnMklassOopDesc__;
+text: .text%__1cKDictionaryStry_get_next_class6M_pnMklassOopDesc__;
+text: .text%__1cNinstanceKlassKmethods_do6MpFpnNmethodOopDesc__v_v_;
+text: .text%__1cJdo_method6FpnNmethodOopDesc__v_: recompilationMonitor.o;
+text: .text%__1cONMethodSweeperFsweep6F_v_;
+text: .text%__1cNCompileBrokerQset_should_block6F_v_;
+text: .text%__1cHVM_ExitbJwait_for_threads_in_native_to_block6F_i_;
+text: .text%__1cURecompilationMonitorbFstop_recompilation_monitor_task6F_v_;
+text: .text%__1cIVMThreadHdestroy6F_v_;
+text: .text%__SLIP.DELETER__A: vmThread.o;
+text: .text%__1cSThreadLocalStorageRpd_invalidate_all6F_v_;
+text: .text%__1cHVM_ExitNset_vm_exited6F_i_;
+text: .text%__1cMexit_globals6F_v_;
+text: .text%__1cVverificationType_exit6F_v_;
+text: .text%__1cQVerificationTypeIfinalize6F_v_;
+text: .text%__1cPperfMemory_exit6F_v_;
+text: .text%__1cPPerfDataManagerHdestroy6F_v_;
+text: .text%__1cIPerfData2T6M_v_;
+text: .text%__1cKPerfMemoryHdestroy6F_v_;
+text: .text%__1cKPerfMemoryUdelete_memory_region6F_v_;
+text: .text%__1cUdelete_shared_memory6FpcI_v_: perfMemory_solaris.o;
+text: .text%__1cLremove_file6Fpkc_v_: perfMemory_solaris.o;
+text: .text%__1cMostream_exit6F_v_;
+text: .text%__SLIP.DELETER__C: ostream.o;
+text: .text%__SLIP.FINAL__A: c1_Items.o;
+# Test Exit
+text: .text%__1cPSignatureStreamHis_done6kM_i_;
+text: .text%JVM_Halt;
+text: .text%__1cHvm_exit6Fi_v_;
+text: .text%__1cIVMThreadHexecute6FpnMVM_Operation__v_;
+text: .text%__1cMVM_OperationNdoit_prologue6M_i_: vm_operations.o;
+text: .text%__1cGThreadMget_priority6Fkpk0_nOThreadPriority__;
+text: .text%__1cCosMget_priority6FkpknGThread_rnOThreadPriority__nIOSReturn__;
+text: .text%__1cCosTget_native_priority6FkpknGThread_pi_nIOSReturn__;
+text: .text%__1cMVM_OperationSset_calling_thread6MpnGThread_nOThreadPriority__v_;
+text: .text%__1cMVM_OperationPevaluation_mode6kM_n0AEMode__: vm_operations.o;
+text: .text%__1cMVM_OperationSis_cheap_allocated6kM_i_: vm_operations.o;
+text: .text%__1cQVMOperationQdDueueDadd6MpnMVM_Operation__i_;
+text: .text%__1cQVMOperationQdDueueOqueue_add_back6MipnMVM_Operation__v_;
+text: .text%__1cQVMOperationQdDueueGinsert6MpnMVM_Operation_2_v_;
+text: .text%__1cQVMOperationQdDueueGunlink6MpnMVM_Operation__v_;
+text: .text%__1cHVM_ExitEname6kM_pkc_: vm_operations.o;
+text: .text%__1cJEventMark2t6MpkcE_v_: vmThread.o;
+text: .text%__1cCosJyield_all6Fi_v_;
+text: .text%__1cGThreadRis_Watcher_thread6kM_i_: vmThread.o;
+text: .text%__1cSInterpreterRuntimeMat_safepoint6FpnKJavaThread__v_;
+text: .text%__1cIVMThreadSevaluate_operation6MpnMVM_Operation__v_;
+text: .text%__1cMVM_OperationIevaluate6M_v_;
+text: .text%__1cHVM_ExitEdoit6M_v_;
+# Test Hello
+text: .text%JVM_GetCPFieldSignatureUTF;
+text: .text%JVM_Write;
+text: .text%__1cDhpiFwrite6FipkvI_I_: jvm.o;
+# Test Sleep
+text: .text%JVM_GetMethodIxExceptionTableEntry;
+text: .text%JVM_GetCPClassNameUTF;
+text: .text%JVM_Sleep;
+text: .text%__1cCosHSolarisTsetup_interruptible6F_pnKJavaThread__;
+text: .text%__1cCosHSolarisTsetup_interruptible6FpnKJavaThread__v_;
+text: .text%__1cUSafepointSynchronizeRis_cleanup_needed6F_i_;
+text: .text%__1cRInlineCacheBufferIis_empty6F_i_;
+text: .text%__1cCosHSolarisVcleanup_interruptible6FpnKJavaThread__v_;
+text: .text%__1cCosOunguard_memory6FpcI_i_;
+# Test IntToString
+text: .text%__1cQChunkPoolCleanerEtask6M_v_: allocation.o;
+text: .text%__1cJChunkPoolMfree_all_but6MI_v_: allocation.o;
+# Test LoadToolkit
+text: .text%JVM_GetClassContext;
+text: .text%__1cNCollectedHeapMobj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: jvm.o;
+text: .text%jni_IsAssignableFrom: jni.o;
+text: .text%__1cOGenerateOopMapGdo_ldc6Mii_v_;
+text: .text%__1cQComputeCallStackIdo_array6Mii_v_: generateOopMap.o;
+text: .text%__1cMGraphBuilderNload_constant6M_v_;
+text: .text%__1cQciBytecodeStreamMget_constant6kM_nKciConstant__;
+text: .text%__1cQciBytecodeStreamSget_constant_index6kM_i_;
+text: .text%__1cFciEnvVget_constant_by_index6MpnPciInstanceKlass_i_nKciConstant__;
+text: .text%__1cFciEnvbAget_constant_by_index_impl6MpnPciInstanceKlass_i_nKciConstant__;
+text: .text%__1cMLinkResolverbBresolve_static_call_or_null6FnLKlassHandle_nMsymbolHandle_21_nMmethodHandle__;
+text: .text%__1cLInstructionMas_LoadLocal6M_pnJLoadLocal__: c1_Canonicalizer.o;
+text: .text%__1cTsort_by_start_block6FppnELoop_2_i_: c1_Loops.o;
+text: .text%__1cILIR_ListLcall_static6MpnLLIR_OprDesc_pCpnMCodeEmitInfo_pnOStaticCallStub__v_: c1_LIREmitter.o;
+text: .text%__1cLLIR_EmitterLcmp_mem_int6MnMLIR_OpBranchNLIR_Condition_nFRInfo_iipnMCodeEmitInfo__v_;
+text: .text%__1cILIR_ListLcmp_mem_int6MnMLIR_OpBranchNLIR_Condition_nFRInfo_iipnMCodeEmitInfo__v_;
+text: .text%__1cJValueTypeLas_VoidType6M_pnIVoidType__: c1_Canonicalizer.o;
+text: .text%__1cILIR_ListHint2reg6MinFRInfo__v_: c1_LIREmitter.o;
+text: .text%__1cWstatic_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
+text: .text%__1cRComputeEntryStackIdo_array6Mii_v_: generateOopMap.o;
+text: .text%__1cKValueStackEpush6MpnJValueType_pnLInstruction__v_: c1_Optimizer.o;
+text: .text%__1cEIfOpPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o;
+text: .text%__1cEIfOpFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o;
+text: .text%__1cQNullCheckVisitorHdo_IfOp6MpnEIfOp__v_;
+text: .text%__1cIValueGenHdo_IfOp6MpnEIfOp__v_;
+text: .text%__1cLLIR_EmitterLifop_phase16MnLInstructionJCondition_pnLLIR_OprDesc_4_v_;
+text: .text%__1cLLIR_EmitterLifop_phase26MnFRInfo_pnLLIR_OprDesc_3nLInstructionJCondition__v_;
+text: .text%__1cILIR_ListGbranch6MnMLIR_OpBranchNLIR_Condition_pnFLabel__v_;
+text: .text%__1cRLIR_PeepholeStateUstart_forward_branch6MpnFLabel__v_;
+text: .text%__1cOGenerateOopMapMdo_checkcast6M_v_;
+text: .text%__1cMGraphBuilderLinstance_of6Mi_v_;
+text: .text%__1cKInstanceOfFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cNCanonicalizerNdo_InstanceOf6MpnKInstanceOf__v_;
+text: .text%__1cJTypeCheckIcan_trap6kM_i_: c1_GraphBuilder.o;
+text: .text%__1cMGraphBuilderOdirect_compare6MpnHciKlass__i_;
+text: .text%__1cKInstanceOfNas_InstanceOf6M_p0_: c1_GraphBuilder.o;
+text: .text%__1cMGraphBuilderKcheck_cast6Mi_v_;
+text: .text%__1cJCheckCastFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cNCanonicalizerMdo_CheckCast6MpnJCheckCast__v_;
+text: .text%__1cJValueTypeKas_IntType6M_pnHIntType__: c1_ValueType.o;
+text: .text%__1cJTypeCheckPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o;
+text: .text%__1cQNullCheckVisitorNdo_InstanceOf6MpnKInstanceOf__v_;
+text: .text%__1cQNullCheckVisitorMdo_CheckCast6MpnJCheckCast__v_;
+text: .text%__1cIValueGenNdo_InstanceOf6MpnKInstanceOf__v_;
+text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CodeGenerator_x86.o;
+text: .text%__1cLLIR_EmitterNinstanceof_op6MpnLLIR_OprDesc_2pnHciKlass_nFRInfo_5ipnMCodeEmitInfo__v_;
+text: .text%__1cILIR_ListKinstanceof6MpnLLIR_OprDesc_2pnHciKlass_22ipnMCodeEmitInfo__v_;
+text: .text%__1cPLIR_OpTypeCheck2t6MnILIR_Code_pnLLIR_OprDesc_3pnHciKlass_33ipnMCodeEmitInfo_7pnICodeStub__v_;
+text: .text%__1cIValueGenMdo_CheckCast6MpnJCheckCast__v_;
+text: .text%__1cILIR_ListJcheckcast6MpnLLIR_OprDesc_2pnHciKlass_22ipnMCodeEmitInfo_6pnICodeStub__v_;
+text: .text%__1cILIR_ListJsafepoint6MnFRInfo_pnMCodeEmitInfo__v_: c1_CodeGenerator_x86.o;
+text: .text%__1cPLIR_OpTypeCheckFvisit6MpnQLIR_OpVisitState__v_;
+text: .text%__1cTSimpleExceptionStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
+text: .text%__1cPLIR_OpTypeCheckJemit_code6MpnVLIR_AbstractAssembler__v_;
+text: .text%__1cNLIR_OptimizerQemit_opTypeCheck6MpnPLIR_OpTypeCheck__v_;
+text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_LIROptimizer.o;
+text: .text%__1cIintArrayIindex_of6kMki_i_: c1_LIROptimizer.o;
+text: .text%__1cNLIR_AssemblerQemit_opTypeCheck6MpnPLIR_OpTypeCheck__v_;
+text: .text%__1cIciObjectIencoding6M_pnI_jobject__;
+text: .text%__1cJAssemblerEcmpl6MnHAddress_pnI_jobject__v_;
+text: .text%__1cTSimpleExceptionStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
+text: .text%__1cTSimpleExceptionStubJemit_code6MpnNLIR_Assembler__v_;
+text: .text%__1cJLoadFieldIis_equal6kMpnLInstruction__i_: c1_Instruction.o;
+text: .text%__1cJLoadFieldMas_LoadField6M_p0_: c1_Instruction.o;
+text: .text%__1cDPhiPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o;
+text: .text%__1cDPhiFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cQNullCheckVisitorGdo_Phi6MpnDPhi__v_;
+text: .text%__1cLInstructionIas_Local6M_pnFLocal__: c1_GraphBuilder.o;
+text: .text%__1cDPhiGas_Phi6M_p0_: c1_GraphBuilder.o;
+text: .text%__1cIValueGenScompute_phi_arrays6MpnKValueStack_pnGValues_pnIintStack_i_pnLInstruction__;
+text: .text%__1cLLIR_EmitterTset_fpu_stack_empty6M_v_;
+text: .text%__1cIRegAllocKlock_spill6MpnLInstruction_ii_v_;
+text: .text%__1cIRegAllocRextend_spill_area6Mi_v_;
+text: .text%__1cRclear_state_items6FppnLInstruction__v_: c1_CodeGenerator.o;
+text: .text%__1cNLIR_AssemblerTset_fpu_stack_empty6M_v_;
+text: .text%__1cIFrameMapLFpuStackSimFclear6M_v_;
+text: .text%jni_GetEnv;
+text: .text%jni_CallStaticBooleanMethod: jni.o;
+text: .text%__1cOtypeArrayKlassQarray_klass_impl6MipnGThread__pnMklassOopDesc__;
+text: .text%__1cOtypeArrayKlassKinitialize6MpnGThread__v_;
+text: .text%__1cVcreate_gc_point_array6FpnFArena_i_pnNGrowableArray4Ci___;
+text: .text%__1cOGenerateOopMapRdo_exception_edge6MpnOBytecodeStream__v_;
+text: .text%__1cOGenerateOopMapIppop_any6Mi_v_;
+text: .text%__1cYciExceptionHandlerStreamEnext6M_v_: c1_IR.o;
+text: .text%__1cMGraphBuilderQhandle_exception6Mi_v_;
+text: .text%__1cOExceptionScopeFclear6M_v_;
+text: .text%__1cMGraphBuilderJScopeDataJxhandlers6kM_pnJXHandlers__;
+text: .text%__1cTciConstantPoolCache2t6MpnFArena_i_v_;
+text: .text%__1cTciConstantPoolCacheDget6Mi_pv_;
+text: .text%__1cTciConstantPoolCacheEfind6Mi_i_;
+text: .text%__1cTciConstantPoolCacheGinsert6Mipv_v_;
+text: .text%__1cMGraphBuilderHif_null6MpnJValueType_nLInstructionJCondition__v_;
+text: .text%__1cOObjectConstantRas_ObjectConstant6M_p0_: c1_ValueType.o;
+text: .text%__1cMas_ValueType6FnKciConstant__pnJValueType__;
+text: .text%__1cLInstructionGmirror6Fn0AJCondition__1_;
+text: .text%__1cHis_true6FxnLInstructionJCondition_x_i_: c1_Canonicalizer.o;
+text: .text%__1cNCanonicalizerNset_canonical6MpnLInstruction__v_;
+text: .text%__1cKBlockBeginVadd_exception_handler6Mp0_v_;
+text: .text%__1cPBlockBeginArrayIindex_of6kMkpnKBlockBegin__i_: c1_Instruction.o;
+text: .text%__1cOExceptionScopeLadd_handler6MpnIXHandler__v_;
+text: .text%__1cIciObjectEhash6M_i_;
+text: .text%__1cPciObjectFactoryPinsert_non_perm6Mrpn0ANNonPermObject_pnHoopDesc_pnIciObject__v_;
+text: .text%__1cIciObjectMhas_encoding6M_i_;
+text: .text%__1cJValueTypeRas_ObjectConstant6M_pnOObjectConstant__: c1_ValueType.o;
+text: .text%__1cNClassConstantQas_ClassConstant6M_p0_: c1_ValueType.o;
+text: .text%__1cOExceptionScopeKhandler_at6kMi_pnIXHandler__;
+text: .text%__1cLInstructionMas_LoadLocal6M_pnJLoadLocal__: c1_GraphBuilder.o;
+text: .text%__1cMGraphBuilderIlogic_op6MpnJValueType_nJBytecodesECode__v_;
+text: .text%__1cHLogicOpFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o;
+text: .text%__1cNCanonicalizerKdo_LogicOp6MpnHLogicOp__v_;
+text: .text%__1cHLogicOpEhash6kM_i_: c1_Instruction.o;
+text: .text%__1cHLogicOpEname6kM_pkc_: c1_Instruction.o;
+text: .text%__1cLInstructionIcan_trap6kM_i_: c1_Instruction.o;
+text: .text%__1cMGraphBuilderHconvert6MnJBytecodesECode_nJBasicType_3_v_;
+text: .text%__1cHConvertFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cNCanonicalizerKdo_Convert6MpnHConvert__v_;
+text: .text%__1cHConvertEhash6kM_i_: c1_GraphBuilder.o;
+text: .text%__1cHConvertEname6kM_pkc_: c1_GraphBuilder.o;
+text: .text%__1cMGraphBuilderNstore_indexed6MnJBasicType__v_;
+text: .text%__1cIValueMapKkill_array6MpnJValueType__v_;
+text: .text%__1cGBucketKkill_array6MpnJValueType__v_;
+text: .text%__1cLInstructionOas_LoadIndexed6M_pnLLoadIndexed__: c1_GraphBuilder.o;
+text: .text%__1cLInstructionOas_LoadIndexed6M_pnLLoadIndexed__: c1_Instruction.o;
+text: .text%__1cKValueStackRpin_stack_indexed6MpnJValueType__v_;
+text: .text%__1cMStoreIndexedFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cNCanonicalizerPdo_StoreIndexed6MpnMStoreIndexed__v_;
+text: .text%__1cLAccessArrayIcan_trap6kM_i_: c1_GraphBuilder.o;
+text: .text%__1cLAccessFieldPother_values_do6MpFppnLInstruction__v_v_;
+text: .text%__1cLInstructionPother_values_do6MpFpp0_v_v_: c1_Instruction.o;
+text: .text%__1cIciObjectOis_null_object6kM_i_: ciInstance.o;
+text: .text%__1cMStoreIndexedPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o;
+text: .text%__1cHConvertPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o;
+text: .text%__1cQNullCheckVisitorKdo_LogicOp6MpnHLogicOp__v_;
+text: .text%__1cQNullCheckVisitorKdo_Convert6MpnHConvert__v_;
+text: .text%__1cQNullCheckVisitorPdo_StoreIndexed6MpnMStoreIndexed__v_;
+text: .text%__1cTNullCheckEliminatorThandle_StoreIndexed6MpnMStoreIndexed__v_;
+text: .text%__1cMciNullObjectMis_classless6kM_i_: ciNullObject.o;
+text: .text%__1cJValueTypeQas_ClassConstant6M_pnNClassConstant__: c1_ValueType.o;
+text: .text%__1cOObjectConstantIencoding6kM_pnI_jobject__;
+text: .text%__1cIValueGenbBrlock_byte_result_with_hint6MpnLInstruction_pknEItem__nFRInfo__;
+text: .text%__1cNc1_AllocTableThas_one_free_masked6kMnKc1_RegMask__i_;
+text: .text%__1cIRegAllocMget_lock_reg6MpnLInstruction_nKc1_RegMask__nFRInfo__;
+text: .text%__1cIRegAllocMget_free_reg6MnKc1_RegMask__nFRInfo__;
+text: .text%__1cNc1_AllocTablePget_free_masked6MnKc1_RegMask__i_;
+text: .text%__1cNClassConstantIencoding6kM_pnI_jobject__;
+text: .text%__1cLLIR_EmitterLopr2jobject6MpnLLIR_OprDesc__pnI_jobject__;
+text: .text%__1cILIR_ListHoop2reg6MpnI_jobject_nFRInfo__v_: c1_LIREmitter.o;
+text: .text%__1cIValueGenMrelease_item6MpnEItem__v_;
+text: .text%__1cIValueGenPdo_StoreIndexed6MpnMStoreIndexed__v_;
+text: .text%__1cIValueGenKdo_Convert6MpnHConvert__v_;
+text: .text%__1cIValueGenKdo_LogicOp6MpnHLogicOp__v_;
+text: .text%__1cLLIR_EmitterIlogic_op6MnJBytecodesECode_nFRInfo_pnLLIR_OprDesc_5_v_;
+text: .text%__1cILIR_ListLlogical_and6MnFRInfo_pnLLIR_OprDesc_1_v_: c1_LIREmitter.o;
+text: .text%__1cLLIR_EmitterKconvert_op6MnJBytecodesECode_pnLLIR_OprDesc_nFRInfo_i_v_;
+text: .text%__1cILIR_ListHconvert6MnJBytecodesECode_pnLLIR_OprDesc_4i_v_: c1_LIREmitter.o;
+text: .text%__1cIValueGenKmust_round6MpnLInstruction_pknEItem__i_;
+text: .text%__1cLAccessArrayKlock_stack6kM_pnKValueStack__: c1_GraphBuilder.o;
+text: .text%__1cLLIR_EmitterNindexed_store6MnJBasicType_pnLLIR_OprDesc_33nFRInfo_pnMCodeEmitInfo__v_;
+text: .text%__1cLLIR_EmitterXlo_word_offset_in_bytes6kM_i_;
+text: .text%__1cLLIR_EmitterXhi_word_offset_in_bytes6kM_i_;
+text: .text%__1cILIR_ListLstore_array6MnFRInfo_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo__v_;
+text: .text%__1cIValueGenXexception_handler_start6MpnHIRScope_ipnKValueStack__v_;
+text: .text%__1cLLIR_EmitterNhandler_entry6M_v_;
+text: .text%__1cLLIR_OprFactQdummy_value_type6FpnJValueType__pnLLIR_OprDesc__;
+text: .text%__1cLInstructionKexact_type6kM_pnGciType__: c1_GraphBuilder.o;
+text: .text%__1cLInstructionNdeclared_type6kM_pnGciType__: c1_GraphBuilder.o;
+text: .text%__1cILIR_ListKnull_check6MnFRInfo_pnMCodeEmitInfo__v_: c1_CodeGenerator.o;
+text: .text%__1cNLIR_OpConvertJemit_code6MpnVLIR_AbstractAssembler__v_;
+text: .text%__1cNLIR_OptimizerOemit_opConvert6MpnNLIR_OpConvert__v_;
+text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_pnI_jobject__v_;
+text: .text%__1cJAssemblerKemit_arith6MiipnMRegisterImpl_pnI_jobject__v_;
+text: .text%__1cNLIR_AssemblerIlogic_op6MnILIR_Code_pnLLIR_OprDesc_33_v_;
+text: .text%__1cNLIR_AssemblerOemit_opConvert6MpnNLIR_OpConvert__v_;
+text: .text%__1cNLIR_AssemblerNarray_move_op6MpnLLIR_OprDesc_2nJBasicType_pnMCodeEmitInfo__v_;
+text: .text%__1cNLIR_AssemblerJreg2array6MnFRInfo_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo__v_;
+text: .text%__1cNLIR_AssemblerPas_ArrayAddress6MpnLLIR_Address_nJBasicType__nHAddress__;
+text: .text%__1cVConstantOopWriteValueIwrite_on6MpnUDebugInfoWriteStream__v_;
+text: .text%__1cUDebugInfoWriteStreamMwrite_handle6MpnI_jobject__v_;
+text: .text%__1cTExceptionRangeTableJadd_entry6Miiiiii_v_;
+text: .text%__1cTExceptionRangeEntry2t6Miiiiii_v_;
+text: .text%__1cTExceptionRangeTableJadd_entry6MnTExceptionRangeEntry__v_;
+text: .text%__1cOExceptionScopeCid6kM_i_;
+text: .text%__1cTExceptionRangeTableTentry_index_for_pco6kMi_i_;
+text: .text%__1cTExceptionRangeTableIentry_at6kMi_pnTExceptionRangeEntry__;
+text: .text%jni_CallStaticVoidMethodV: jni.o;
+text: .text%JVM_GetLastErrorString;
+text: .text%jni_Throw: jni.o;
+text: .text%__1cKExceptionsK_throw_oop6FpnGThread_pkcipnHoopDesc__v_;
+text: .text%JVM_DisableCompiler;
+text: .text%__1cNinstanceKlassbFlookup_method_in_all_interfaces6kMpnNsymbolOopDesc_2_pnNmethodOopDesc__;
+text: .text%JVM_Available;
+text: .text%__1cOGenerateOopMapKpp_new_ref6MpnNCellTypeState_i_v_;
+text: .text%__1cLInstructionMas_LoadField6M_pnJLoadField__: c1_Instruction.o;
+text: .text%__1cHLogicOpOis_commutative6kM_i_;
+text: .text%__1cDCHANprocess_class6FnLKlassHandle_pnNGrowableArray4n0B___pnNGrowableArray4nMmethodHandle___nMsymbolHandle_6_v_;
+text: .text%__1cUGenericGrowableArrayMraw_contains6kMpknEGrET__i_;
+text: .text%__1cLArrayLengthFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cNCanonicalizerOdo_ArrayLength6MpnLArrayLength__v_;
+text: .text%__1cLArrayLengthEhash6kM_i_: c1_GraphBuilder.o;
+text: .text%__1cLArrayLengthEname6kM_pkc_: c1_GraphBuilder.o;
+text: .text%__1cMGraphBuilderOnew_type_array6M_v_;
+text: .text%__1cMNewTypeArrayFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o;
+text: .text%__1cNCanonicalizerPdo_NewTypeArray6MpnMNewTypeArray__v_;
+text: .text%__1cINewArrayIcan_trap6kM_i_: c1_Instruction.o;
+text: .text%__1cJIntrinsicFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cNCanonicalizerMdo_Intrinsic6MpnJIntrinsic__v_;
+text: .text%__1cJIntrinsicMas_Intrinsic6M_p0_: c1_GraphBuilder.o;
+text: .text%__1cJIntrinsicIcan_trap6kM_i_: c1_GraphBuilder.o;
+text: .text%__1cLAccessArrayPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o;
+text: .text%__1cQNullCheckVisitorOdo_ArrayLength6MpnLArrayLength__v_;
+text: .text%__1cTNullCheckEliminatorShandle_ArrayLength6MpnLArrayLength__v_;
+text: .text%__1cINewArrayPinput_values_do6MpFppnLInstruction__v_v_: c1_Instruction.o;
+text: .text%__1cQNullCheckVisitorPdo_NewTypeArray6MpnMNewTypeArray__v_;
+text: .text%__1cTNullCheckEliminatorPhandle_NewArray6MpnINewArray__v_;
+text: .text%__1cJIntrinsicPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o;
+text: .text%__1cQNullCheckVisitorMdo_Intrinsic6MpnJIntrinsic__v_;
+text: .text%__1cJLoopArrayIindex_of6kMkpnELoop__i_: c1_Loops.o;
+text: .text%__1cINewArrayLas_NewArray6M_p0_: c1_Instruction.o;
+text: .text%__1cILIR_ListOcall_icvirtual6MnFRInfo_pnLLIR_OprDesc_pCpnMCodeEmitInfo__v_: c1_LIREmitter.o;
+text: .text%__1cILIR_ListNstore_mem_int6MinFRInfo_inJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_;
+text: .text%__1cIValueGenOdo_ArrayLength6MpnLArrayLength__v_;
+text: .text%__1cLLIR_EmitterMarray_length6MnFRInfo_pnLLIR_OprDesc_pnMCodeEmitInfo__v_;
+text: .text%__1cLlog2_intptr6Fi_i_: c1_LIREmitter.o;
+text: .text%__1cIValueGenPdo_NewTypeArray6MpnMNewTypeArray__v_;
+text: .text%__1cLLIR_EmitterOnew_type_array6MnFRInfo_nJBasicType_pnLLIR_OprDesc_11111pnMCodeEmitInfo__v_;
+text: .text%__1cQNewTypeArrayStub2t6MnFRInfo_11pnMCodeEmitInfo__v_;
+text: .text%__1cQciTypeArrayKlassEmake6FnJBasicType__p0_;
+text: .text%__1cQciTypeArrayKlassJmake_impl6FnJBasicType__p0_;
+text: .text%__1cILIR_ListHoop2reg6MpnI_jobject_nFRInfo__v_: c1_LIREmitter_x86.o;
+text: .text%__1cILIR_ListOallocate_array6MnFRInfo_11111nJBasicType_1pnICodeStub__v_;
+text: .text%__1cIValueGenMdo_Intrinsic6MpnJIntrinsic__v_;
+text: .text%__1cIValueGenMdo_ArrayCopy6MpnJIntrinsic__v_;
+text: .text%__1cIValueGenQarraycopy_helper6MpnJIntrinsic_pippnMciArrayKlass__v_;
+text: .text%__1cJLoadFieldKexact_type6kM_pnGciType__;
+text: .text%__1cJLoadFieldNdeclared_type6kM_pnGciType__;
+text: .text%__1cQciTypeArrayKlassTis_type_array_klass6M_i_: ciTypeArrayKlass.o;
+text: .text%__1cOas_array_klass6FpnGciType__pnMciArrayKlass__: c1_CodeGenerator.o;
+text: .text%__1cMciArrayKlassOis_array_klass6M_i_: ciTypeArrayKlass.o;
+text: .text%__1cMNewTypeArrayKexact_type6kM_pnGciType__;
+text: .text%__1cLInstructionNdeclared_type6kM_pnGciType__: c1_Instruction.o;
+text: .text%__1cRpositive_constant6FpnLInstruction__i_: c1_CodeGenerator.o;
+text: .text%__1cLArrayLengthOas_ArrayLength6M_p0_: c1_GraphBuilder.o;
+text: .text%__1cQis_constant_zero6FpnLInstruction__i_: c1_CodeGenerator.o;
+text: .text%__1cILIR_ListJarraycopy6MpnLLIR_OprDesc_22222pnMciArrayKlass_ipnMCodeEmitInfo__v_: c1_CodeGenerator_x86.o;
+text: .text%__1cLLIR_EmitterNwrite_barrier6MpnLLIR_OprDesc_2_v_;
+text: .text%__1cILIR_ListUunsigned_shift_right6MnFRInfo_i1_v_: c1_LIREmitter_x86.o;
+text: .text%__1cILIR_ListUunsigned_shift_right6MpnLLIR_OprDesc_222_v_;
+text: .text%__1cQLIR_OpAllocArrayFvisit6MpnQLIR_OpVisitState__v_;
+text: .text%__1cQNewTypeArrayStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
+text: .text%__1cPLIR_OpArrayCopyFvisit6MpnQLIR_OpVisitState__v_;
+text: .text%__1cQLIR_OpAllocArrayJemit_code6MpnVLIR_AbstractAssembler__v_;
+text: .text%__1cNLIR_OptimizerQemit_alloc_array6MpnQLIR_OpAllocArray__v_;
+text: .text%__1cPLIR_OpArrayCopyJemit_code6MpnVLIR_AbstractAssembler__v_;
+text: .text%__1cNLIR_OptimizerOemit_arraycopy6MpnPLIR_OpArrayCopy__v_;
+text: .text%__1cNLIR_AssemblerHic_call6MpCpnMCodeEmitInfo__v_;
+text: .text%__1cJAssemblerEcall6MpCrknQRelocationHolder__v_;
+text: .text%__1cXvirtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
+text: .text%__1cXvirtual_call_RelocationJpack_data6M_i_;
+text: .text%__1cNLIR_AssemblerJconst2mem6MpnJLIR_Const_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo__v_;
+text: .text%__1cNLIR_AssemblerQemit_alloc_array6MpnQLIR_OpAllocArray__v_;
+text: .text%__1cNLIR_AssemblerSarray_element_size6kMnJBasicType__nHAddressLScaleFactor__;
+text: .text%__1cRC1_MacroAssemblerOallocate_array6MpnMRegisterImpl_222inHAddressLScaleFactor_2rnFLabel__v_;
+text: .text%__1cRC1_MacroAssemblerMtry_allocate6MpnMRegisterImpl_2i22rnFLabel__v_;
+text: .text%__1cQNewTypeArrayStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
+text: .text%__1cNLIR_AssemblerOemit_arraycopy6MpnPLIR_OpArrayCopy__v_;
+text: .text%__1cMciArrayKlassMelement_type6M_pnGciType__;
+text: .text%__1cNArrayCopyStub2t6MpnMCodeEmitInfo_pnOStaticCallStub__v_;
+text: .text%__1cFRInfoMset_word_reg6MkpnMRegisterImpl__v_;
+text: .text%__1cNArrayCopyStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
+text: .text%__1cNLIR_AssemblerOpush_parameter6MpnMRegisterImpl_i_v_;
+text: .text%__1cQNewTypeArrayStubJemit_code6MpnNLIR_Assembler__v_;
+text: .text%__1cNArrayCopyStubJemit_code6MpnNLIR_Assembler__v_;
+text: .text%__1cXvirtual_call_RelocationLunpack_data6M_v_;
+text: .text%__1cIRuntime1Uresolve_virtual_call6FpnKJavaThread_pnHoopDesc__pC_;
+text: .text%__1cKoopFactoryUnew_compiledICHolder6FnMmethodHandle_nLKlassHandle_pnGThread__pnXcompiledICHolderOopDesc__;
+text: .text%__1cVcompiledICHolderKlassIallocate6MpnGThread__pnXcompiledICHolderOopDesc__;
+text: .text%__1cNCollectedHeapWpermanent_obj_allocate6FnLKlassHandle_ipnGThread__pnHoopDesc__: compiledICHolderKlass.o;
+text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: compiledICHolderKlass.o;
+text: .text%__1cRLowMemoryDetectorbLdetect_low_memory_for_collected_pools6F_v_: compiledICHolderKlass.o;
+text: .text%__1cXvirtual_call_RelocationJfirst_oop6M_pC_;
+text: .text%__1cXvirtual_call_RelocationJoop_limit6M_pC_;
+text: .text%__1cNRelocIteratorJset_limit6MpC_v_;
+text: .text%__1cRInlineCacheBufferWcreate_transition_stub6FpnKCompiledIC_pnHoopDesc_pC_v_;
+text: .text%__1cGICStubIset_stub6MpnKCompiledIC_pnHoopDesc_pC_v_;
+text: .text%__1cRInlineCacheBufferXassemble_ic_buffer_code6FpCpnHoopDesc_1_v_;
+text: .text%__1cKCompiledICSset_ic_destination6MpC_v_;
+text: .text%__1cRInlineCacheBufferLnew_ic_stub6F_pnGICStub__;
+text: .text%__1cFKlassQoop_is_typeArray6kM_i_: objArrayKlass.o;
+text: .text%JVM_NewArray;
+text: .text%__1cKReflectionRreflect_new_array6FpnHoopDesc_ipnGThread__pnMarrayOopDesc__;
+text: .text%__1cSInterpreterRuntimeOmultianewarray6FpnKJavaThread_pi_v_;
+text: .text%__1cNobjArrayKlassOmulti_allocate6MipiipnGThread__pnHoopDesc__;
+text: .text%__1cNinstanceKlassSlookup_osr_nmethod6kMkpnNmethodOopDesc_i_pnHnmethod__;
+text: .text%__1cQSimpleCompPolicyYmethod_back_branch_event6MnMmethodHandle_iipnGThread__v_;
+text: .text%__1cICompilerMsupports_osr6M_i_: c1_Compiler.o;
+text: .text%__1cHciKlassOis_subclass_of6Mp0_i_;
+text: .text%__1cMGraphBuilderQnew_object_array6M_v_;
+text: .text%__1cONewObjectArrayFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o;
+text: .text%__1cNCanonicalizerRdo_NewObjectArray6MpnONewObjectArray__v_;
+text: .text%__1cPciObjArrayKlass2t6MnLKlassHandle__v_;
+text: .text%__1cPciObjArrayKlassGloader6M_pnHoopDesc__: ciObjArrayKlass.o;
+text: .text%__1cMGraphBuilderIshift_op6MpnJValueType_nJBytecodesECode__v_;
+text: .text%__1cHShiftOpFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cNCanonicalizerKdo_ShiftOp6MpnHShiftOp__v_;
+text: .text%__1cHShiftOpEhash6kM_i_: c1_GraphBuilder.o;
+text: .text%__1cHShiftOpEname6kM_pkc_: c1_GraphBuilder.o;
+text: .text%__1cLLoadIndexedOas_LoadIndexed6M_p0_: c1_Instruction.o;
+text: .text%__1cMArithmeticOpIis_equal6kMpnLInstruction__i_: c1_Instruction.o;
+text: .text%__1cDOp2Gas_Op26M_p0_: c1_Instruction.o;
+text: .text%__1cLInstructionMas_LoadField6M_pnJLoadField__: c1_GraphBuilder.o;
+text: .text%__1cQNullCheckVisitorRdo_NewObjectArray6MpnONewObjectArray__v_;
+text: .text%__1cDOp2Pinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o;
+text: .text%__1cQNullCheckVisitorKdo_ShiftOp6MpnHShiftOp__v_;
+text: .text%__1cHciKlassMaccess_flags6M_i_;
+text: .text%__1cILIR_ListPallocate_object6MnFRInfo_111ii1pnICodeStub__v_;
+text: .text%__1cLLIR_EmitterOmembar_release6M_v_;
+text: .text%__1cLLIR_EmitterGmembar6M_v_;
+text: .text%__1cIValueGenRdo_NewObjectArray6MpnONewObjectArray__v_;
+text: .text%__1cLLIR_EmitterQnew_object_array6MnFRInfo_pnHciKlass_pnLLIR_OprDesc_11111pnMCodeEmitInfo_7_v_;
+text: .text%__1cSNewObjectArrayStub2t6MnFRInfo_11pnMCodeEmitInfo__v_;
+text: .text%__1cPciObjArrayKlassEmake6FpnHciKlass__p0_;
+text: .text%__1cPciObjArrayKlassJmake_impl6FpnHciKlass__p0_;
+text: .text%__1cLLIR_EmitterOmembar_acquire6M_v_;
+text: .text%__1cIValueGenKdo_ShiftOp6MpnHShiftOp__v_;
+text: .text%__1cIValueGenPshiftCountRInfo6F_nFRInfo__;
+text: .text%__1cLLIR_EmitterIshift_op6MnJBytecodesECode_nFRInfo_pnLLIR_OprDesc_53_v_;
+text: .text%__1cILIR_ListKshift_left6MnFRInfo_i1_v_: c1_LIREmitter.o;
+text: .text%__1cILIR_ListKlogical_or6MnFRInfo_pnLLIR_OprDesc_1_v_: c1_LIREmitter.o;
+text: .text%__1cOLIR_OpAllocObjFvisit6MpnQLIR_OpVisitState__v_;
+text: .text%__1cSNewObjectArrayStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
+text: .text%__1cOLIR_OpAllocObjJemit_code6MpnVLIR_AbstractAssembler__v_;
+text: .text%__1cNLIR_OptimizerOemit_alloc_obj6MpnOLIR_OpAllocObj__v_;
+text: .text%__1cNLIR_AssemblerOemit_alloc_obj6MpnOLIR_OpAllocObj__v_;
+text: .text%__1cRC1_MacroAssemblerPallocate_object6MpnMRegisterImpl_22ii2rnFLabel__v_;
+text: .text%__1cNLIR_AssemblerOmembar_release6M_v_;
+text: .text%__1cNLIR_AssemblerGmembar6M_v_;
+text: .text%__1cSNewObjectArrayStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
+text: .text%__1cNLIR_AssemblerOmembar_acquire6M_v_;
+text: .text%__1cEBaseHas_Base6M_p0_: c1_IR.o;
+text: .text%__1cNLIR_AssemblerOemit_osr_entry6MpnHIRScope_ipnFLabel_i_v_;
+text: .text%__1cSNewObjectArrayStubJemit_code6MpnNLIR_Assembler__v_;
+text: .text%__1cNinstanceKlassPadd_osr_nmethod6MpnHnmethod__v_;
+text: .text%__1cUGenericGrowableArrayLraw_at_grow6MipknEGrET__pv_;
+text: .text%__1cNSignatureInfoIdo_short6M_v_: bytecode.o;
+text: .text%jni_MonitorEnter: jni.o;
+text: .text%__1cSObjectSynchronizerJjni_enter6FnGHandle_pnGThread__v_;
+text: .text%jni_MonitorExit: jni.o;
+text: .text%__1cSObjectSynchronizerIjni_exit6FpnHoopDesc_pnGThread__v_;
+text: .text%jni_CallVoidMethod: jni.o;
+text: .text%__1cXJNI_ArgumentPusherVaArgHget_int6M_v_: jni.o;
+text: .text%jni_CallStaticBooleanMethodV: jni.o;
+text: .text%JVM_GetStackTraceDepth;
+text: .text%__1cTjava_lang_ThrowableVget_stack_trace_depth6FpnHoopDesc_pnGThread__i_;
+text: .text%__1cTjava_lang_ThrowableJbacktrace6FpnHoopDesc__2_;
+text: .text%JVM_GetStackTraceElement;
+text: .text%__1cTjava_lang_ThrowableXget_stack_trace_element6FpnHoopDesc_ipnGThread__2_;
+text: .text%__1cbBjava_lang_StackTraceElementGcreate6FnMmethodHandle_ipnGThread__pnHoopDesc__;
+text: .text%__1cbBjava_lang_StackTraceElementNset_className6FpnHoopDesc_2_v_;
+text: .text%__1cbBjava_lang_StackTraceElementOset_methodName6FpnHoopDesc_2_v_;
+text: .text%__1cbBjava_lang_StackTraceElementMset_fileName6FpnHoopDesc_2_v_;
+text: .text%__1cNmethodOopDescUline_number_from_bci6kMi_i_;
+text: .text%__1cbECompressedLineNumberReadStream2t6MpC_v_;
+text: .text%__1cbECompressedLineNumberReadStreamJread_pair6M_i_;
+text: .text%__1cUCompressedReadStreamIread_int6M_i_: methodOop.o;
+text: .text%__1cbBjava_lang_StackTraceElementOset_lineNumber6FpnHoopDesc_i_v_;
+text: .text%__1cFKlassNoop_is_method6kM_i_: typeArrayKlass.o;
+text: .text%__1cFKlassRoop_is_methodData6kM_i_: typeArrayKlass.o;
+text: .text%__1cIciObjectOis_null_object6kM_i_: ciObjectFactory.o;
+text: .text%__1cNObjectMonitorJnotifyAll6MpnGThread__v_;
+text: .text%__1cNObjectMonitorREntryQdDueue_insert6MpnMObjectWaiter_i_v_;
+text: .text%__1cNObjectMonitorbAEntryQdDueue_SelectSuccessor6M_pnMObjectWaiter__;
+text: .text%__1cLServiceUtilLvisible_oop6FpnHoopDesc__i_: objectMonitor_solaris.o;
+text: .text%__1cNObjectMonitorGEnterI6MpnGThread__v_;
+text: .text%JVM_EnableCompiler;
+text: .text%__1cCosHSolarisFEventEpark6Mx_i_: objectMonitor_solaris.o;
+text: .text%__1cJStubQdDueueKremove_all6M_v_;
+text: .text%__1cJStubQdDueueMremove_first6Mi_v_;
+text: .text%__1cJStubQdDueueMremove_first6M_v_;
+text: .text%__1cPICStubInterfaceIfinalize6MpnEStub__v_: icBuffer.o;
+text: .text%__1cGICStubIfinalize6M_v_;
+text: .text%__1cGICStubKcached_oop6kM_pnHoopDesc__;
+text: .text%__1cRInlineCacheBufferUic_buffer_cached_oop6FpC_pnHoopDesc__;
+text: .text%__1cKCompiledICOset_cached_oop6MpnHoopDesc__v_;
+text: .text%__1cOoop_RelocationSfix_oop_relocation6M_v_;
+text: .text%__1cGICStubLdestination6kM_pC_;
+text: .text%__1cRInlineCacheBufferVic_buffer_entry_point6FpC_1_;
+text: .text%__1cPICStubInterfaceEsize6kMpnEStub__i_: icBuffer.o;
+text: .text%__1cUSafepointSynchronizeDend6F_v_;
+text: .text%__1cCosbAmake_polling_page_readable6F_v_;
+text: .text%__1cTAbstractInterpreterRignore_safepoints6F_v_;
+text: .text%__1cGThreadQunboost_priority6Fp0_v_;
+text: .text%__1cUThreadSafepointStateHrestart6M_v_;
+text: .text%__1cORuntimeServiceUrecord_safepoint_end6F_v_;
+text: .text%__1cKJavaThreadbScheck_safepoint_and_suspend_for_native_trans6Fp0_v_;
+# Test LoadFrame
+text: .text%__1cNObjectMonitorGenter26MpnGThread__v_;
+text: .text%__1cICompilerPsupports_native6M_i_: c1_Compiler.o;
+text: .text%__1cLCompilationVcompile_native_method6MpnLCodeOffsets__i_;
+text: .text%__1cIciMethodMnative_entry6M_pC_;
+text: .text%__1cLCompilationUemit_code_for_native6MpCpnLCodeOffsets__v_;
+text: .text%__1cLCompilationXemit_code_prolog_native6MpnIFrameMap__v_;
+text: .text%__1cNLIR_AssemblerRemit_method_entry6MpnLLIR_Emitter_pnHIRScope__v_;
+text: .text%__1cOMacroAssemblerHfat_nop6M_v_;
+text: .text%__1cNLIR_AssemblerQemit_native_call6MpCpnMCodeEmitInfo__v_;
+text: .text%__1cMCodeEmitInfobGcreate_oop_map_for_own_signature6M_pnGOopMap__;
+text: .text%__1cNLIR_AssemblerXemit_native_method_exit6MpnMCodeEmitInfo__v_;
+text: .text%__1cNSignatureInfoHdo_char6M_v_: reflection.o;
+text: .text%__1cNSignatureInfoHdo_bool6M_v_: reflection.o;
+text: .text%jni_CallObjectMethodV: jni.o;
+text: .text%jni_SetObjectField: jni.o;
+text: .text%jni_IsInstanceOf: jni.o;
+text: .text%jni_GetStaticObjectField: jni.o;
+text: .text%__1cbCTwoGenerationCollectorPolicybMshould_try_older_generation_allocation6kMI_i_;
+text: .text%__1cQGenCollectedHeapSattempt_allocation6MIiii_pnIHeapWord__;
+text: .text%__1cQDefNewGenerationIallocate6MIii_pnIHeapWord__: defNewGeneration.o;
+text: .text%__1cKGenerationInext_gen6kM_p0_;
+text: .text%__1cKGenerationYallocation_limit_reached6MpnFSpace_pnIHeapWord_I_4_: tenuredGeneration.o;
+text: .text%__1cQDefNewGenerationTallocate_from_space6MI_pnIHeapWord__;
+text: .text%__1cPVM_GC_OperationNdoit_prologue6M_i_;
+text: .text%__1cPVM_GC_OperationZacquire_pending_list_lock6M_v_;
+text: .text%__1cQinstanceRefKlassZacquire_pending_list_lock6FpnJBasicLock__v_;
+text: .text%__1cXjava_lang_ref_ReferenceWpending_list_lock_addr6F_ppnHoopDesc__;
+text: .text%__1cPVM_GC_OperationQgc_count_changed6kM_i_;
+text: .text%__1cbAVM_GenCollectForAllocationEname6kM_pkc_: vm_operations.o;
+text: .text%__1cbAVM_GenCollectForAllocationEdoit6M_v_;
+text: .text%__1cNJvmtiGCMarker2t6Mi_v_;
+text: .text%__1cQGenCollectedHeapZsatisfy_failed_allocation6MIiipi_pnIHeapWord__;
+text: .text%__1cbCTwoGenerationCollectorPolicyZsatisfy_failed_allocation6MIiipi_pnIHeapWord__;
+text: .text%__1cQGenCollectedHeapNdo_collection6MiiIiiipi_v_;
+text: .text%__1cXTraceMemoryManagerStats2t6Mi_v_;
+text: .text%__1cNMemoryServiceIgc_begin6Fi_v_;
+text: .text%__1cPGCMemoryManagerIgc_begin6M_v_;
+text: .text%__1cKManagementJtimestamp6F_x_;
+text: .text%__1cKGCStatInfoMset_gc_usage6MinLMemoryUsage_i_v_;
+text: .text%__1cTContiguousSpacePoolQget_memory_usage6M_nLMemoryUsage__;
+text: .text%__1cTContiguousSpacePoolNused_in_bytes6M_I_: memoryPool.o;
+text: .text%__1cbBSurvivorContiguousSpacePoolQget_memory_usage6M_nLMemoryUsage__;
+text: .text%__1cbBSurvivorContiguousSpacePoolNused_in_bytes6M_I_: memoryPool.o;
+text: .text%__1cOGenerationPoolQget_memory_usage6M_nLMemoryUsage__;
+text: .text%__1cOGenerationPoolNused_in_bytes6M_I_: memoryPool.o;
+text: .text%__1cQGenCollectedHeapLgc_prologue6Mi_v_;
+text: .text%__1cNCollectedHeapbFaccumulate_statistics_all_tlabs6M_v_;
+text: .text%__1cWThreadLocalAllocBufferbFaccumulate_statistics_before_gc6F_v_;
+text: .text%__1cWThreadLocalAllocBufferVaccumulate_statistics6MIi_v_;
+text: .text%__1cPGlobalTLABStatsHpublish6M_v_;
+text: .text%__1cQGenCollectedHeapTensure_parseability6M_v_;
+text: .text%__1cNCollectedHeapTensure_parseability6M_v_;
+text: .text%__1cNCollectedHeapOfill_all_tlabs6M_v_;
+text: .text%__1cQGenCollectedHeapSgeneration_iterate6Mpn0AKGenClosure_i_v_;
+text: .text%__1cbCGenEnsureParseabilityClosureNdo_generation6MpnKGeneration__v_: genCollectedHeap.o;
+text: .text%__1cKGenerationTensure_parseability6M_v_: defNewGeneration.o;
+text: .text%__1cKGenerationTensure_parseability6M_v_: tenuredGeneration.o;
+text: .text%__1cKGenerationTensure_parseability6M_v_: compactingPermGenGen.o;
+text: .text%__1cSAllocationProfilerViterate_since_last_gc6F_v_;
+text: .text%__1cUGenGCPrologueClosureNdo_generation6MpnKGeneration__v_: genCollectedHeap.o;
+text: .text%__1cQDefNewGenerationLgc_prologue6Mi_v_: defNewGeneration.o;
+text: .text%__1cRTenuredGenerationLgc_prologue6Mi_v_;
+text: .text%__1cKGenerationLgc_prologue6Mi_v_: compactingPermGenGen.o;
+text: .text%__1cKGenerationOshould_collect6MiIii_i_: defNewGeneration.o;
+text: .text%__1cQDefNewGenerationKshort_name6kM_pkc_: defNewGeneration.o;
+text: .text%__1cKGenerationIcounters6M_pnRCollectorCounters__: defNewGeneration.o;
+text: .text%__1cQGenCollectedHeapKsave_marks6M_v_;
+text: .text%__1cQDefNewGenerationKsave_marks6M_v_;
+text: .text%__1cbCOneContigSpaceCardGenerationKsave_marks6M_v_;
+text: .text%__1cQDefNewGenerationHcollect6MiiIii_v_;
+text: .text%__1cQDefNewGenerationbAcollection_attempt_is_safe6M_i_;
+text: .text%__1cRTenuredGenerationZpromotion_attempt_is_safe6kMIi_i_;
+text: .text%__1cKGenerationYmax_contiguous_available6kM_I_;
+text: .text%__1cbCOneContigSpaceCardGenerationUcontiguous_available6kM_I_;
+text: .text%__1cQDefNewGenerationbIinit_assuming_no_promotion_failure6M_v_;
+text: .text%__1cQDefNewGenerationOIsAliveClosure2t6MpnKGeneration__v_;
+text: .text%__1cSScanWeakRefClosure2t6MpnQDefNewGeneration__v_;
+text: .text%__1cLCardTableRSbGprepare_for_younger_refs_iterate6Mi_v_;
+text: .text%__1cULRUCurrentHeapPolicy2t6M_v_;
+text: .text%__1cPCollectorPolicyPis_train_policy6M_i_: collectorPolicy.o;
+text: .text%__1cPFastScanClosure2t6MpnQDefNewGeneration_i_v_;
+text: .text%__1cQDefNewGenerationbCFastEvacuateFollowersClosure2t6MpnQGenCollectedHeap_ip0pnPFastScanClosure_6_v_;
+text: .text%__1cQGenCollectedHeapUprocess_strong_roots6Miiin0ATClassScanningOption_pnQOopsInGenClosure_3_v_;
+text: .text%__1cKSharedHeapbAchange_strong_roots_parity6M_v_;
+text: .text%__1cMSubTasksDonePis_task_claimed6Mi_i_;
+text: .text%__1cIUniverseHoops_do6FpnKOopClosure_i_v_;
+text: .text%__1cPFastScanClosureGdo_oop6MppnHoopDesc__v_: defNewGeneration.o;
+text: .text%__1cQDefNewGenerationWcopy_to_survivor_space6MpnHoopDesc_p2_2_;
+text: .text%__1cKJNIHandlesHoops_do6FpnKOopClosure__v_;
+text: .text%__1cOJNIHandleBlockHoops_do6MpnKOopClosure__v_;
+text: .text%__1cHThreadsHoops_do6FpnKOopClosure__v_;
+text: .text%__1cKJavaThreadHoops_do6MpnKOopClosure__v_;
+text: .text%__1cGThreadHoops_do6MpnKOopClosure__v_;
+text: .text%__1cKHandleAreaHoops_do6MpnKOopClosure__v_;
+text: .text%__1cNchunk_oops_do6FpnKOopClosure_pnFChunk_pc_I_: handles.o;
+text: .text%__1cQStackFrameStream2t6MpnKJavaThread_i_v_;
+text: .text%__1cFframeQoops_do_internal6MpnKOopClosure_pnLRegisterMap_i_v_;
+text: .text%__1cFframeToops_interpreted_do6MpnKOopClosure_pknLRegisterMap_i_v_;
+text: .text%__1cFframeVinterpreter_frame_bci6kM_i_;
+text: .text%__1cFframebDinterpreter_frame_monitor_end6kM_pnPBasicObjectLock__;
+text: .text%__1cFframebFinterpreter_frame_monitor_begin6kM_pnPBasicObjectLock__;
+text: .text%__1cRInterpreterOopMap2t6M_v_;
+text: .text%__1cRInterpreterOopMapKinitialize6M_v_;
+text: .text%__1cNmethodOopDescImask_for6MipnRInterpreterOopMap__v_;
+text: .text%__1cNinstanceKlassImask_for6MnMmethodHandle_ipnRInterpreterOopMap__v_;
+text: .text%__1cLOopMapCache2t6M_v_;
+text: .text%__1cLOopMapCacheGlookup6MnMmethodHandle_ipnRInterpreterOopMap__v_;
+text: .text%__1cLOopMapCacheIentry_at6kMi_pnQOopMapCacheEntry__;
+text: .text%__1cRInterpreterOopMapIis_empty6M_i_;
+text: .text%__1cQOopMapCacheEntryEfill6MnMmethodHandle_i_v_;
+text: .text%__1cQOopMapCacheEntryFflush6M_v_;
+text: .text%__1cQOopMapCacheEntryTdeallocate_bit_mask6M_v_;
+text: .text%__1cQOopMapCacheEntryPfill_for_native6M_v_;
+text: .text%__1cQOopMapCacheEntryRallocate_bit_mask6M_v_;
+text: .text%__1cTMaskFillerForNative2t6MnMmethodHandle_pIi_v_: oopMapCache.o;
+text: .text%__1cNFingerprinterLfingerprint6M_X_: oopMapCache.o;
+text: .text%__1cTMaskFillerForNativeLpass_object6M_v_: oopMapCache.o;
+text: .text%__1cRInterpreterOopMapNresource_copy6MpnQOopMapCacheEntry__v_;
+text: .text%__1cRInterpreterOopMapLiterate_oop6MpnNOffsetClosure__v_;
+text: .text%__1cXInterpreterFrameClosureJoffset_do6Mi_v_: frame.o;
+text: .text%__1cRInterpreterOopMap2T6M_v_;
+text: .text%__1cTOopMapForCacheEntry2t6MnMmethodHandle_ipnQOopMapCacheEntry__v_;
+text: .text%__1cTOopMapForCacheEntryLcompute_map6MpnGThread__v_;
+text: .text%__1cTOopMapForCacheEntryRpossible_gc_point6MpnOBytecodeStream__i_;
+text: .text%__1cTOopMapForCacheEntryOreport_results6kM_i_: oopMapCache.o;
+text: .text%__1cOGenerateOopMapVresult_for_basicblock6Mi_v_;
+text: .text%__1cTOopMapForCacheEntryZfill_stackmap_for_opcodes6MpnOBytecodeStream_pnNCellTypeState_4i_v_;
+text: .text%__1cQOopMapCacheEntryIset_mask6MpnNCellTypeState_2i_v_;
+text: .text%__1cFframeNoops_entry_do6MpnKOopClosure_pknLRegisterMap__v_;
+text: .text%__1cPJavaCallWrapperHoops_do6MpnKOopClosure__v_;
+text: .text%__1cXNativeSignatureIteratorHdo_long6M_v_: oopMapCache.o;
+text: .text%__1cTMaskFillerForNativeJpass_long6M_v_: oopMapCache.o;
+text: .text%__1cFframebHnext_monitor_in_interpreter_frame6kMpnPBasicObjectLock__2_;
+text: .text%__1cOGenerateOopMapPdo_monitorenter6Mi_v_;
+text: .text%__1cOGenerateOopMapXreplace_all_CTS_matches6MnNCellTypeState_1_v_;
+text: .text%__1cOGenerateOopMapMmonitor_push6MnNCellTypeState__v_;
+text: .text%__1cQComputeCallStackHdo_bool6M_v_: generateOopMap.o;
+text: .text%__1cQComputeCallStackHdo_long6M_v_: generateOopMap.o;
+text: .text%__1cOGenerateOopMapOdo_monitorexit6Mi_v_;
+text: .text%__1cOGenerateOopMapLmonitor_pop6M_nNCellTypeState__;
+text: .text%__1cRComputeEntryStackHdo_long6M_v_: generateOopMap.o;
+text: .text%__1cPBytecode_invokeIis_valid6kM_i_: frame.o;
+text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: oopMapCache.o;
+text: .text%__1cFframebDoops_interpreted_arguments_do6MnMsymbolHandle_ipnKOopClosure__v_;
+text: .text%__1cRArgumentOopFinderDset6MinJBasicType__v_: frame.o;
+text: .text%__1cIVMThreadHoops_do6MpnKOopClosure__v_;
+text: .text%__1cQVMOperationQdDueueHoops_do6MpnKOopClosure__v_;
+text: .text%__1cQVMOperationQdDueueNqueue_oops_do6MipnKOopClosure__v_;
+text: .text%__1cSObjectSynchronizerHoops_do6FpnKOopClosure__v_;
+text: .text%__1cMFlatProfilerHoops_do6FpnKOopClosure__v_;
+text: .text%__1cKManagementHoops_do6FpnKOopClosure__v_;
+text: .text%__1cNMemoryServiceHoops_do6FpnKOopClosure__v_;
+text: .text%__1cKMemoryPoolHoops_do6MpnKOopClosure__v_;
+text: .text%__1cNMemoryManagerHoops_do6MpnKOopClosure__v_;
+text: .text%__1cNThreadServiceHoops_do6FpnKOopClosure__v_;
+text: .text%__1cLJvmtiExportHoops_do6FpnKOopClosure__v_;
+text: .text%__1cXJvmtiCurrentBreakpointsHoops_do6FpnKOopClosure__v_;
+text: .text%__1cbGJvmtiVMObjectAllocEventCollectorXoops_do_for_all_threads6FpnKOopClosure__v_;
+text: .text%__1cQSystemDictionaryHoops_do6FpnKOopClosure__v_;
+text: .text%__1cQSystemDictionaryRpreloaded_oops_do6FpnKOopClosure__v_;
+text: .text%__1cKDictionaryHoops_do6MpnKOopClosure__v_;
+text: .text%__1cPDictionaryEntrybDprotection_domain_set_oops_do6MpnKOopClosure__v_: dictionary.o;
+text: .text%__1cQPlaceholderTableHoops_do6MpnKOopClosure__v_;
+text: .text%__1cVLoaderConstraintTableHoops_do6MpnKOopClosure__v_;
+text: .text%__1cUCompactingPermGenGenUyounger_refs_iterate6MpnQOopsInGenClosure__v_;
+text: .text%__1cbCOneContigSpaceCardGenerationUyounger_refs_iterate6MpnQOopsInGenClosure__v_;
+text: .text%__1cKGenerationbDyounger_refs_in_space_iterate6MpnFSpace_pnQOopsInGenClosure__v_;
+text: .text%__1cLCardTableRSbDyounger_refs_in_space_iterate6MpnFSpace_pnQOopsInGenClosure__v_;
+text: .text%__1cPContiguousSpaceLnew_dcto_cl6MpnKOopClosure_nRCardTableModRefBSOPrecisionStyle_pnIHeapWord__pnVDirtyCardToOopClosure__;
+text: .text%__1cPContiguousSpaceZused_region_at_save_marks6kM_nJMemRegion__: space.o;
+text: .text%__1cRCardTableModRefBSWnon_clean_card_iterate6MpnFSpace_nJMemRegion_pnVDirtyCardToOopClosure_pnQMemRegionClosure_i_v_;
+text: .text%__1cRCardTableModRefBSbBnon_clean_card_iterate_work6MnJMemRegion_pnQMemRegionClosure_i_v_;
+text: .text%__1cJMemRegionMintersection6kMk0_0_;
+text: .text%__1cYClearNoncleanCardWrapperMdo_MemRegion6MnJMemRegion__v_: cardTableRS.o;
+text: .text%__1cYClearNoncleanCardWrapperKclear_card6MpW_i_: cardTableRS.o;
+text: .text%__1cVDirtyCardToOopClosureMdo_MemRegion6MnJMemRegion__v_;
+text: .text%__1cWOffsetTableContigSpaceLblock_start6kMpkv_pnIHeapWord__: space.o;
+text: .text%__1cbBBlockOffsetArrayContigSpaceSblock_start_unsafe6kMpkv_pnIHeapWord__;
+text: .text%__1cPContiguousSpaceKblock_size6kMpknIHeapWord__I_;
+text: .text%__1cUContiguousSpaceDCTOCOget_actual_top6MpnIHeapWord_2_2_;
+text: .text%__1cPContiguousSpaceRtoContiguousSpace6M_p0_: space.o;
+text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceKlass.o;
+text: .text%__1cPFiltering_DCTOCPwalk_mem_region6MnJMemRegion_pnIHeapWord_3_v_;
+text: .text%__1cUContiguousSpaceDCTOCXwalk_mem_region_with_cl6MnJMemRegion_pnIHeapWord_3pnQFilteringClosure__v_;
+text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: methodKlass.o;
+text: .text%__1cLmethodKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_;
+text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: instanceKlassKlass.o;
+text: .text%__1cSinstanceKlassKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_;
+text: .text%__1cNinstanceKlassViterate_static_fields6MpnKOopClosure__v_;
+text: .text%__1cLklassVtablePoop_oop_iterate6MpnKOopClosure__v_;
+text: .text%__1cQFilteringClosureGdo_oop6MppnHoopDesc__v_;
+text: .text%__1cLklassItablePoop_oop_iterate6MpnKOopClosure__v_;
+text: .text%__1cKklassKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_;
+text: .text%__1cKOopClosureXshould_remember_klasses6kM_ki_: space.o;
+text: .text%__1cNinstanceKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_;
+text: .text%__1cWconstantPoolCacheKlassIoop_size6kMpnHoopDesc__i_;
+text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: cpCacheKlass.o;
+text: .text%__1cWconstantPoolCacheKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_;
+text: .text%__1cWConstantPoolCacheEntryLoop_iterate6MpnKOopClosure__v_;
+text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: objArrayKlassKlass.o;
+text: .text%__1cSobjArrayKlassKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_;
+text: .text%__1cParrayKlassKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_;
+text: .text%__1cNobjArrayKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_;
+text: .text%__1cNinstanceKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_;
+text: .text%__1cNobjArrayKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_;
+text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: typeArrayKlass.o;
+text: .text%__1cOtypeArrayKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_;
+text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: constMethodKlass.o;
+text: .text%__1cQconstMethodKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_;
+text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: methodKlass.o;
+text: .text%__1cLmethodKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_;
+text: .text%__1cFKlassPoop_is_objArray6kM_i_: constantPoolKlass.o;
+text: .text%__1cFKlassQoop_is_typeArray6kM_i_: constantPoolKlass.o;
+text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: constMethodKlass.o;
+text: .text%__1cQconstMethodKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_;
+text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: constantPoolKlass.o;
+text: .text%__1cRconstantPoolKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_;
+text: .text%__1cFKlassPoop_is_objArray6kM_i_: constMethodKlass.o;
+text: .text%__1cFKlassQoop_is_typeArray6kM_i_: constMethodKlass.o;
+text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: symbolKlass.o;
+text: .text%__1cLsymbolKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_;
+text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: symbolKlass.o;
+text: .text%__1cLsymbolKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_;
+text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: constantPoolKlass.o;
+text: .text%__1cRconstantPoolKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_;
+text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: cpCacheKlass.o;
+text: .text%__1cWconstantPoolCacheKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_;
+text: .text%__1cWConstantPoolCacheEntryNoop_iterate_m6MpnKOopClosure_nJMemRegion__v_;
+text: .text%__1cFKlassPoop_is_objArray6kM_i_: cpCacheKlass.o;
+text: .text%__1cFKlassQoop_is_typeArray6kM_i_: cpCacheKlass.o;
+text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: typeArrayKlass.o;
+text: .text%__1cOtypeArrayKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_;
+text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: instanceKlassKlass.o;
+text: .text%__1cSinstanceKlassKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_;
+text: .text%__1cNinstanceKlassViterate_static_fields6MpnKOopClosure_nJMemRegion__v_;
+text: .text%__1cLklassVtableRoop_oop_iterate_m6MpnKOopClosure_nJMemRegion__v_;
+text: .text%__1cLklassItableRoop_oop_iterate_m6MpnKOopClosure_nJMemRegion__v_;
+text: .text%__1cKklassKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_;
+text: .text%__1cLOopMapCacheLoop_iterate6MpnKOopClosure_nJMemRegion__v_;
+text: .text%__1cRInterpreterOopMapLoop_iterate6MpnKOopClosure_nJMemRegion__v_;
+text: .text%__1cKOopClosureIdo_oop_v6MppnHoopDesc__v_: space.o;
+text: .text%__1cLOopMapCacheLoop_iterate6MpnKOopClosure__v_;
+text: .text%__1cRInterpreterOopMapLoop_iterate6MpnKOopClosure__v_;
+text: .text%__1cVcompiledICHolderKlassIoop_size6kMpnHoopDesc__i_;
+text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: compiledICHolderKlass.o;
+text: .text%__1cVcompiledICHolderKlassPoop_oop_iterate6MpnHoopDesc_pnKOopClosure__i_;
+text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: klassKlass.o;
+text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: klassKlass.o;
+text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: typeArrayKlassKlass.o;
+text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_: arrayKlassKlass.o;
+text: .text%__1cLCardTableRSUyounger_refs_iterate6MpnKGeneration_pnQOopsInGenClosure__v_;
+text: .text%__1cMSubTasksDoneTall_tasks_completed6M_v_;
+text: .text%__1cQDefNewGenerationbCFastEvacuateFollowersClosureHdo_void6M_v_;
+text: .text%__1cQGenCollectedHeapbCoop_since_save_marks_iterate6MipnPFastScanClosure_2_v_;
+text: .text%__1cQDefNewGenerationbFoop_since_save_marks_iterate_nv6MpnPFastScanClosure__v_;
+text: .text%__1cPContiguousSpacebFoop_since_save_marks_iterate_nv6MpnPFastScanClosure__v_;
+text: .text%__1cNobjArrayKlassSoop_oop_iterate_nv6MpnHoopDesc_pnPFastScanClosure__i_;
+text: .text%__1cNinstanceKlassSoop_oop_iterate_nv6MpnHoopDesc_pnPFastScanClosure__i_;
+text: .text%__1cFKlassSoop_oop_iterate_nv6MpnHoopDesc_pnPFastScanClosure__i_: typeArrayKlass.o;
+text: .text%__1cQinstanceRefKlassSoop_oop_iterate_nv6MpnHoopDesc_pnPFastScanClosure__i_;
+text: .text%__1cXjava_lang_ref_ReferenceNreferent_addr6FpnHoopDesc__p2_;
+text: .text%__1cSReferenceProcessorSdiscover_reference6MpnHoopDesc_nNReferenceType__i_;
+text: .text%__1cXjava_lang_ref_ReferenceJnext_addr6FpnHoopDesc__p2_;
+text: .text%__1cXjava_lang_ref_ReferencePdiscovered_addr6FpnHoopDesc__p2_;
+text: .text%__1cSReferenceProcessorTget_discovered_list6MnNReferenceType__ppnHoopDesc__;
+text: .text%__1cKGenerationHpromote6MpnHoopDesc_Ip2_2_;
+text: .text%__1cbCOneContigSpaceCardGenerationIallocate6MIii_pnIHeapWord__: tenuredGeneration.o;
+text: .text%__1cbCOneContigSpaceCardGenerationbFoop_since_save_marks_iterate_nv6MpnPFastScanClosure__v_;
+text: .text%__1cQGenCollectedHeapbAno_allocs_since_save_marks6Mi_i_;
+text: .text%__1cQDefNewGenerationbAno_allocs_since_save_marks6M_i_;
+text: .text%__1cbCOneContigSpaceCardGenerationbAno_allocs_since_save_marks6M_i_;
+text: .text%__1cQDefNewGenerationUFastKeepAliveClosure2t6Mp0pnSScanWeakRefClosure__v_;
+text: .text%__1cQDefNewGenerationQKeepAliveClosure2t6MpnSScanWeakRefClosure__v_;
+text: .text%__1cbDReferenceProcessorInitializerIis_clean6kM_v_: concurrentMarkSweepGeneration.o;
+text: .text%__1cSReferenceProcessorbDprocess_discovered_references6M_v_;
+text: .text%__1cSReferenceProcessorbAprocess_discovered_reflist6MppnHoopDesc_pnPReferencePolicy_i_v_;
+text: .text%__1cSReferenceProcessorOprocess_phase16MppnHoopDesc_pnPReferencePolicy_pnRBoolObjectClosure_pnKOopClosure_pnLVoidClosure__v_;
+text: .text%__1cQDefNewGenerationOIsAliveClosureLdo_object_b6MpnHoopDesc__i_;
+text: .text%__1cULRUCurrentHeapPolicyWshould_clear_reference6MpnHoopDesc__i_;
+text: .text%__1cbBjava_lang_ref_SoftReferenceFclock6F_x_;
+text: .text%__1cbBjava_lang_ref_SoftReferenceJtimestamp6FpnHoopDesc__x_;
+text: .text%__1cXjava_lang_ref_ReferenceIset_next6FpnHoopDesc_2_v_;
+text: .text%__1cQDefNewGenerationUFastKeepAliveClosureGdo_oop6MppnHoopDesc__v_;
+text: .text%__1cSReferenceProcessorOprocess_phase26MppnHoopDesc_pnRBoolObjectClosure_pnKOopClosure__v_;
+text: .text%__1cSReferenceProcessorOprocess_phase36MppnHoopDesc_ipnRBoolObjectClosure_pnKOopClosure_pnLVoidClosure__v_;
+text: .text%__1cSReferenceProcessorQprocess_phaseJNI6M_v_;
+text: .text%__1cKJNIHandlesMweak_oops_do6FpnRBoolObjectClosure_pnKOopClosure__v_;
+text: .text%__1cOJNIHandleBlockMweak_oops_do6MpnRBoolObjectClosure_pnKOopClosure__v_;
+text: .text%__1cQDefNewGenerationLswap_spaces6M_v_;
+text: .text%__1cIageTablebAcompute_tenuring_threshold6MI_i_;
+text: .text%__1cKGenerationWupdate_time_of_last_gc6Mx_v_: defNewGeneration.o;
+text: .text%__1cSReferenceProcessorbDenqueue_discovered_references6M_i_;
+text: .text%__1cXjava_lang_ref_ReferenceRpending_list_addr6F_ppnHoopDesc__;
+text: .text%__1cSReferenceProcessorbBenqueue_discovered_reflists6MppnHoopDesc__v_;
+text: .text%__1cSReferenceProcessorbAenqueue_discovered_reflist6MpnHoopDesc_p2_v_;
+text: .text%__1cQGenCollectedHeapPupdate_gc_stats6Mii_v_: genCollectedHeap.o;
+text: .text%__1cKGenerationPupdate_gc_stats6Mii_v_: defNewGeneration.o;
+text: .text%__1cRTenuredGenerationPupdate_gc_stats6Mii_v_;
+text: .text%__1cVAdaptivePaddedAverageGsample6Mf_v_;
+text: .text%__1cKGenerationPupdate_gc_stats6Mii_v_: compactingPermGenGen.o;
+text: .text%__1cRTenuredGenerationOshould_collect6MiIii_i_;
+text: .text%__1cKGenerationPshould_allocate6MIii_i_: tenuredGeneration.o;
+text: .text%__1cbCOneContigSpaceCardGenerationEfree6kM_I_;
+text: .text%__1cQDefNewGenerationQcompute_new_size6M_v_;
+text: .text%__1cNMemoryServiceStrack_memory_usage6F_v_;
+text: .text%__1cRLowMemoryDetectorRdetect_low_memory6F_v_;
+text: .text%__1cQGenCollectedHeapLgc_epilogue6Mi_v_;
+text: .text%__1cNCollectedHeapQresize_all_tlabs6M_v_;
+text: .text%__1cWThreadLocalAllocBufferQresize_all_tlabs6F_v_;
+text: .text%__1cWThreadLocalAllocBufferGresize6M_v_;
+text: .text%__1cUGenGCEpilogueClosureNdo_generation6MpnKGeneration__v_: genCollectedHeap.o;
+text: .text%__1cQDefNewGenerationLgc_epilogue6Mi_v_;
+text: .text%__1cRTenuredGenerationLgc_epilogue6Mi_v_;
+text: .text%__1cbCOneContigSpaceCardGenerationLgc_epilogue6Mi_v_;
+text: .text%__1cRTenuredGenerationPupdate_counters6M_v_;
+text: .text%__1cUCompactingPermGenGenPupdate_counters6M_v_;
+text: .text%__1cXTraceMemoryManagerStats2T6M_v_;
+text: .text%__1cNMemoryServiceGgc_end6Fi_v_;
+text: .text%__1cPGCMemoryManagerGgc_end6M_v_;
+text: .text%__1cRLowMemoryDetectorWdetect_after_gc_memory6FpnKMemoryPool__v_;
+text: .text%__1cNJvmtiGCMarker2T6M_v_;
+text: .text%__1cPVM_GC_OperationNdoit_epilogue6M_v_;
+text: .text%__1cPVM_GC_OperationbKrelease_and_notify_pending_list_lock6M_v_;
+text: .text%__1cQinstanceRefKlassbKrelease_and_notify_pending_list_lock6FipnJBasicLock__v_;
+text: .text%__1cXJNI_ArgumentPusherVaArgIget_long6M_v_: jni.o;
+text: .text%jni_GetIntArrayRegion: jni.o;
+text: .text%jni_SetIntArrayRegion: jni.o;
+text: .text%jni_PushLocalFrame: jni.o;
+text: .text%jni_PopLocalFrame: jni.o;
+text: .text%__1cMGraphBuilderJnegate_op6MpnJValueType__v_;
+text: .text%__1cINegateOpFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cNCanonicalizerLdo_NegateOp6MpnINegateOp__v_;
+text: .text%__1cMLinkResolverbPlinktime_resolve_interface_method_or_null6FnLKlassHandle_nMsymbolHandle_21i_nMmethodHandle__;
+text: .text%__1cPciInstanceKlassLimplementor6M_p0_;
+text: .text%__1cINegateOpPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o;
+text: .text%__1cQNullCheckVisitorLdo_NegateOp6MpnINegateOp__v_;
+text: .text%__1cIValueGenJspill_one6MpnJValueType__v_;
+text: .text%__1cIRegAllocbBget_smallest_value_to_spill6kMpnJValueType__pnLInstruction__;
+text: .text%__1cLLIR_EmitterRarray_store_check6MpnLLIR_OprDesc_2nFRInfo_33pnMCodeEmitInfo__v_;
+text: .text%__1cILIR_ListLstore_check6MpnLLIR_OprDesc_2222pnMCodeEmitInfo__v_;
+text: .text%__1cPLIR_OpTypeCheck2t6MnILIR_Code_pnLLIR_OprDesc_3333pnMCodeEmitInfo__v_;
+text: .text%__1cXArrayStoreExceptionStub2t6MpnMCodeEmitInfo__v_;
+text: .text%__1cILIR_ListLshift_right6MnFRInfo_i1_v_: c1_LIREmitter.o;
+text: .text%__1cILIR_ListLshift_right6MpnLLIR_OprDesc_222_v_;
+text: .text%__1cIValueGenLdo_NegateOp6MpnINegateOp__v_;
+text: .text%__1cLLIR_EmitterGnegate6MnFRInfo_pnLLIR_OprDesc__v_;
+text: .text%__1cILIR_ListGnegate6MnFRInfo_1_v_: c1_LIREmitter.o;
+text: .text%__1cXArrayStoreExceptionStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
+text: .text%__1cXArrayStoreExceptionStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
+text: .text%__1cNLIR_AssemblerEleal6MpnLLIR_OprDesc_2_v_;
+text: .text%__1cNLIR_AssemblerGnegate6MpnLLIR_OprDesc_2_v_;
+text: .text%__1cNCodeStubArrayIindex_of6kMkpnICodeStub__i_: c1_LIRAssembler_x86.o;
+text: .text%__1cXArrayStoreExceptionStubJemit_code6MpnNLIR_Assembler__v_;
+text: .text%__1cIRuntime1Tresolve_static_call6FpnKJavaThread_pnHoopDesc__pC_;
+text: .text%__1cSCompiledStaticCallNcompute_entry6FnMmethodHandle_rnOStaticCallInfo__v_;
+text: .text%__1cSCompiledStaticCallIis_clean6kM_i_;
+text: .text%__1cSCompiledStaticCallDset6MrknOStaticCallInfo__v_;
+text: .text%__1cMLinkResolverYresolve_interface_method6FrnMmethodHandle_rnLKlassHandle_nSconstantPoolHandle_ipnGThread__v_;
+text: .text%__1cHciKlassSsuper_check_offset6M_I_;
+text: .text%__1cIRuntime1Thandle_wrong_method6FpnKJavaThread_pnHoopDesc__pC_;
+text: .text%__1cNSharedRuntimeTreresolve_call_site6FpnKJavaThread_pnGThread__nMmethodHandle__;
+text: .text%__1cFframeRis_compiled_frame6kMpi_i_;
+text: .text%__1cHnmethodOis_java_method6kM_i_: nmethod.o;
+text: .text%__1cGEventsDlog6FpkcE_v_: sharedRuntime.o;
+text: .text%__1cJCodeCacheMfind_nmethod6Fpv_pnHnmethod__;
+text: .text%__1cNRelocIteratorEnext6M_i_: sharedRuntime.o;
+text: .text%__1cKCompiledICMset_to_clean6M_v_;
+text: .text%__1cKCompiledICMstub_address6kM_pC_;
+text: .text%__1cGICStubFclear6M_v_;
+text: .text%__1cNSharedRuntimeSfind_callee_method6FpnKJavaThread_pnGThread__nMmethodHandle__;
+text: .text%__1cRInlineCacheBufferSic_destination_for6FpnKCompiledIC__pC_;
+text: .text%__1cIRuntime1Jarraycopy6FpnHoopDesc_i2ii_i_;
+text: .text%__1cMGraphBuilderNadd_dependent6MpnPciInstanceKlass_pnIciMethod__v_;
+text: .text%__1cYDebugInformationRecorderNadd_dependent6MpnPciInstanceKlass_pnIciMethod__v_;
+text: .text%__1cNinstanceKlassVadd_dependent_nmethod6MpnHnmethod__v_;
+text: .text%__1cJCodeCacheXmark_for_deoptimization6FpnMklassOopDesc__i_;
+text: .text%__1cNinstanceKlassXmark_dependent_nmethods6MpnMklassOopDesc__i_;
+text: .text%jni_NewWeakGlobalRef: jni.o;
+text: .text%__1cKJNIHandlesQmake_weak_global6FnGHandle__pnI_jobject__;
+text: .text%__1cMLinkResolverbBlookup_method_in_interfaces6FrnMmethodHandle_nLKlassHandle_nMsymbolHandle_4pnGThread__v_;
+text: .text%jni_CallIntMethodV: jni.o;
+text: .text%Unsafe_GetObject;
+text: .text%jni_CallBooleanMethod: jni.o;
+text: .text%jni_CallVoidMethodV: jni.o;
+text: .text%JVM_GetClassDeclaredMethods;
+text: .text%JVM_InvokeMethod;
+text: .text%__1cKReflectionNinvoke_method6FpnHoopDesc_nGHandle_nOobjArrayHandle_pnGThread__2_;
+text: .text%__1cYjava_lang_reflect_MethodFclazz6FpnHoopDesc__2_;
+text: .text%__1cYjava_lang_reflect_MethodEslot6FpnHoopDesc__i_;
+text: .text%__1cYjava_lang_reflect_MethodPparameter_types6FpnHoopDesc__2_;
+text: .text%__1cYjava_lang_reflect_MethodLreturn_type6FpnHoopDesc__2_;
+text: .text%JVM_IsInterrupted;
+# Test LoadJFrame
+text: .text%__1cTresource_free_bytes6FpcI_v_;
+text: .text%__1cRComputeEntryStackHdo_bool6M_v_: generateOopMap.o;
+text: .text%__1cJFloatTypeDtag6kM_nIValueTag__: c1_ValueType.o;
+text: .text%__1cJFloatTypeEbase6kM_pnJValueType__: c1_ValueType.o;
+text: .text%__1cJFloatTypeMas_FloatType6M_p0_: c1_ValueType.o;
+text: .text%__1cIValueGenTdo_ArithmeticOp_FPU6MpnMArithmeticOp__v_;
+text: .text%__1cHLockRegIdo_float6Mi_v_: c1_RegAlloc.o;
+text: .text%__1cIRegAllocOset_locked_fpu6MipnLInstruction_i_v_;
+text: .text%__1cIValueGenNis_32bit_mode6M_i_;
+text: .text%__1cLGetRefCountIdo_float6Mi_v_: c1_RegAlloc.o;
+text: .text%__1cJFloatTypeEsize6kM_i_: c1_ValueType.o;
+text: .text%__1cHFreeRegIdo_float6Mi_v_: c1_RegAlloc.o;
+text: .text%__1cIRegAllocMset_free_fpu6Mi_v_;
+text: .text%__1cQChangeSpillCountIdo_float6Mi_v_: c1_RegAlloc.o;
+text: .text%__1cLLIR_EmitterRarithmetic_op_fpu6MnJBytecodesECode_pnLLIR_OprDesc_44i_v_;
+text: .text%__1cILIR_ListDmul6MpnLLIR_OprDesc_22_v_: c1_LIREmitter.o;
+text: .text%__1cIValueGenKround_item6MpnEItem__v_;
+text: .text%__1cLLIR_EmitterFround6MipnLLIR_OprDesc__v_;
+text: .text%__1cILIR_ListKround32bit6MnFRInfo_i_v_: c1_LIREmitter.o;
+text: .text%__1cIValueGenOspill_register6MnFRInfo__v_;
+text: .text%__1cIRegAllocTget_value_for_rinfo6kMnFRInfo__pnLInstruction__;
+text: .text%__1cLGetValueForGdo_cpu6Mi_v_: c1_RegAlloc.o;
+text: .text%__1cIValueGenKdivInRInfo6F_nFRInfo__;
+text: .text%__1cIValueGenLremOutRInfo6F_nFRInfo__;
+text: .text%__1cMArithmeticOpKlock_stack6kM_pnKValueStack__: c1_Instruction.o;
+text: .text%__1cLLIR_EmitterParithmetic_idiv6MnJBytecodesECode_pnLLIR_OprDesc_44nFRInfo_pnMCodeEmitInfo__v_;
+text: .text%__1cILIR_ListEirem6MnFRInfo_111pnMCodeEmitInfo__v_;
+text: .text%__1cHLIR_Op3Fvisit6MpnQLIR_OpVisitState__v_;
+text: .text%__1cHLIR_Op3Jemit_code6MpnVLIR_AbstractAssembler__v_;
+text: .text%__1cNLIR_OptimizerIemit_op36MpnHLIR_Op3__v_;
+text: .text%__1cNLIR_AssemblerIfpu_push6MnFRInfo__v_;
+text: .text%__1cIFrameMapLFpuStackSimEpush6Mi_v_;
+text: .text%__1cNLIR_AssemblerKfpu_on_tos6MnFRInfo__v_;
+text: .text%__1cIFrameMapLFpuStackSimPoffset_from_tos6kMi_i_;
+text: .text%__1cIintArrayIindex_of6kMki_i_: c1_FrameMap_x86.o;
+text: .text%__1cNLIR_AssemblerHfpu_pop6MnFRInfo__v_;
+text: .text%__1cIFrameMapLFpuStackSimDpop6Mi_i_;
+text: .text%__1cNLIR_AssemblerKround32_op6MpnLLIR_OprDesc_2_v_;
+text: .text%__1cJAssemblerGfist_s6MnHAddress__v_;
+text: .text%__1cNLIR_AssemblerJreset_FPU6M_v_;
+text: .text%__1cNLIR_AssemblerIemit_op36MpnHLIR_Op3__v_;
+text: .text%__1cNLIR_AssemblerParithmetic_idiv6MnILIR_Code_pnLLIR_OprDesc_333pnMCodeEmitInfo__v_;
+text: .text%__1cNLIR_AssemblerXadd_debug_info_for_div06MipnMCodeEmitInfo__v_;
+text: .text%__1cNDivByZeroStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
+text: .text%__1cNDivByZeroStubJemit_code6MpnNLIR_Assembler__v_;
+text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciTypeArrayKlass.o;
+text: .text%__1cLInstructionOas_ArrayLength6M_pnLArrayLength__: c1_GraphBuilder.o;
+text: .text%__1cLInstructionKas_ShiftOp6M_pnHShiftOp__: c1_Instruction.o;
+text: .text%__1cILIR_ListLlogical_xor6MnFRInfo_pnLLIR_OprDesc_1_v_: c1_LIREmitter.o;
+text: .text%__1cILIR_ListUunsigned_shift_right6MnFRInfo_i1_v_: c1_LIREmitter.o;
+text: .text%__1cIRuntime1Ohandle_ic_miss6FpnKJavaThread_pnHoopDesc__pC_;
+text: .text%__1cNSharedRuntimeVhandle_ic_miss_helper6FpnKJavaThread_pnGThread__nMmethodHandle__;
+text: .text%__1cbEJvmtiDynamicCodeEventCollector2t6M_v_;
+text: .text%__1cKCompiledICOis_megamorphic6kM_i_;
+text: .text%__1cLVtableStubsOis_entry_point6FpC_i_;
+text: .text%__1cKCompiledICSset_to_megamorphic6MpnICallInfo_nJBytecodesECode_pnGThread__v_;
+text: .text%__1cLVtableStubsLcreate_stub6FiipnNmethodOopDesc__pC_;
+text: .text%__1cLVtableStubsGlookup6Fiii_pnKVtableStub__;
+text: .text%__1cLVtableStubsScreate_vtable_stub6Fii_pnKVtableStub__;
+text: .text%__1cKVtableStubSpd_code_size_limit6Fi_i_;
+text: .text%__1cKVtableStub2n6FIi_pv_;
+text: .text%__1cKVtableStubRpd_code_alignment6F_i_;
+text: .text%__1cLVtableStubsFenter6FiiipnKVtableStub__v_;
+text: .text%__1cGEventsDlog6FpkcE_v_: compiledIC.o;
+text: .text%__1cbEJvmtiDynamicCodeEventCollector2T6M_v_;
+text: .text%Unsafe_EnsureClassInitialized;
+text: .text%Unsafe_StaticFieldOffset;
+text: .text%Unsafe_StaticFieldBaseFromField;
+text: .text%Unsafe_GetIntVolatile;
+text: .text%__1cUBytecode_tableswitchGlength6M_i_: generateOopMap.o;
+text: .text%__1cUBytecode_tableswitchOdest_offset_at6kMi_i_;
+text: .text%__1cUBytecode_tableswitchGlength6M_i_: c1_GraphBuilder.o;
+text: .text%__1cLInstructionKas_ShiftOp6M_pnHShiftOp__: c1_GraphBuilder.o;
+text: .text%__1cMGraphBuilderMtable_switch6M_v_;
+text: .text%__1cLTableSwitchFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cNCanonicalizerOdo_TableSwitch6MpnLTableSwitch__v_;
+text: .text%__1cLInstructionJas_Return6M_pnGReturn__: c1_GraphBuilder.o;
+text: .text%__1cGSwitchPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o;
+text: .text%__1cQNullCheckVisitorOdo_TableSwitch6MpnLTableSwitch__v_;
+text: .text%__1cIValueGenOdo_TableSwitch6MpnLTableSwitch__v_;
+text: .text%__1cIValueGenVsetup_phis_for_switch6MpnEItem_pnKValueStack__v_;
+text: .text%__1cLLIR_EmitterOtableswitch_op6MpnLLIR_OprDesc_ipnKBlockBegin__v_;
+text: .text%__1cWstatic_call_RelocationLstatic_stub6M_pC_;
+text: .text%__1cSCompiledStaticCallMset_to_clean6M_v_;
+# Test JHello
+text: .text%__1cYjava_lang_reflect_MethodNset_signature6FpnHoopDesc_2_v_;
+text: .text%JVM_InitializeSocketLibrary;
+text: .text%__1cDhpiZinitialize_socket_library6F_i_;
+text: .text%JVM_Socket;
+text: .text%Unsafe_PageSize;
+text: .text%__1cNFingerprinterHdo_byte6M_v_: dump.o;
+text: .text%__1cXNativeSignatureIteratorHdo_byte6M_v_: interpreterRuntime.o;
+text: .text%Unsafe_SetMemory;
+text: .text%__1cECopyQpd_fill_to_words6FpnIHeapWord_II_v_: unsafe.o;
+text: .text%__1cNSharedRuntimeElrem6Fxx_x_;
+text: .text%Unsafe_DefineClass1;
+text: .text%__1cSUnsafe_DefineClass6FpnHJNIEnv__pnI_jstring_pnL_jbyteArray_iipnI_jobject_7_pnH_jclass__: unsafe.o;
+text: .text%JVM_DefineClass;
+text: .text%__1cPClassFileParserXverify_unqualified_name6MpcIi_i_;
+text: .text%__1cVLoaderConstraintTableYextend_loader_constraint6MpnVLoaderConstraintEntry_nGHandle_pnMklassOopDesc__v_;
+text: .text%__1cVLoaderConstraintTablebHensure_loader_constraint_capacity6MpnVLoaderConstraintEntry_i_v_;
+text: .text%__1cIciObjectIis_klass6M_i_: ciInstance.o;
+text: .text%__1cQInstanceConstantIencoding6kM_pnI_jobject__;
+text: .text%__1cLInstructionOas_ArrayLength6M_pnLArrayLength__: c1_Instruction.o;
+text: .text%__1cILIR_ListQunwind_exception6MnFRInfo_1pnMCodeEmitInfo__v_: c1_CodeGenerator.o;
+text: .text%__1cIRuntime1Tprimitive_arraycopy6FpnIHeapWord_2i_v_;
+text: .text%__1cRComputeEntryStackHdo_char6M_v_: generateOopMap.o;
+text: .text%jni_NewDirectByteBuffer;
+text: .text%__1cbDinitializeDirectBufferSupport6FpnHJNIEnv___i_: jni.o;
+text: .text%lookupDirectBufferClasses: jni.o;
+text: .text%__1cJlookupOne6FpnHJNIEnv__pkcpnGThread__pnH_jclass__: jni.o;
+text: .text%__1cHJNIEnv_JNewObject6MpnH_jclass_pnK_jmethodID_E_pnI_jobject__: jni.o;
+text: .text%jni_GetDoubleArrayRegion: jni.o;
+text: .text%__1cNSignatureInfoJdo_double6M_v_: bytecode.o;
+text: .text%__1cXJNI_ArgumentPusherVaArgJget_float6M_v_: jni.o;
+text: .text%__1cQComputeCallStackHdo_byte6M_v_: generateOopMap.o;
+text: .text%__1cFKlassQup_cast_abstract6M_p0_;
+text: .text%__1cRComputeEntryStackHdo_byte6M_v_: generateOopMap.o;
+text: .text%__1cNSharedRuntimeDd2i6Fd_i_;
+text: .text%__1cSInterpreterRuntimeWslow_signature_handler6FpnKJavaThread_pnNmethodOopDesc_pi5_pC_;
+text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: interpreterRT_x86.o;
+text: .text%__1cUSlowSignatureHandlerLpass_object6M_v_: interpreterRT_x86.o;
+text: .text%__1cXNativeSignatureIteratorIdo_array6Mii_v_: interpreterRT_x86.o;
+text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: interpreterRT_x86.o;
+text: .text%__1cUSlowSignatureHandlerIpass_int6M_v_: interpreterRT_x86.o;
+text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: interpreterRT_x86.o;
+text: .text%jni_GetFloatArrayRegion: jni.o;
+text: .text%jni_GetCharArrayRegion: jni.o;
+text: .text%jni_SetFloatField: jni.o;
+text: .text%jni_NewFloatArray: jni.o;
+text: .text%jni_SetFloatArrayRegion: jni.o;
+# SwingSet
+text: .text%JVM_GetFieldIxModifiers;
+text: .text%JVM_GetCPFieldClassNameUTF;
+text: .text%JVM_GetCPFieldModifiers;
+text: .text%__1cPClassFileParserUverify_constantvalue6MiinSconstantPoolHandle_pnGThread__v_;
+text: .text%__1cXjava_lang_ref_ReferenceOset_discovered6FpnHoopDesc_2_v_;
+text: .text%__1cMStoreIndexedPother_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o;
+text: .text%JVM_MonitorNotify;
+text: .text%__1cSObjectSynchronizerGnotify6FnGHandle_pnGThread__v_;
+text: .text%__1cKValueStackElock6MpnHIRScope_pnLInstruction__i_;
+text: .text%__1cKValueStackGunlock6M_i_;
+text: .text%__1cLLIR_EmitterVmonitorenter_at_entry6MnFRInfo_pnMCodeEmitInfo__v_;
+text: .text%__1cLLIR_EmitterNmonitor_enter6MnFRInfo_111ipnMCodeEmitInfo_3_v_;
+text: .text%__1cQMonitorEnterStub2t6MnFRInfo_1pnMCodeEmitInfo__v_;
+text: .text%__1cILIR_ListbAload_stack_address_monitor6MinFRInfo__v_: c1_LIREmitter.o;
+text: .text%__1cILIR_ListLlock_object6MnFRInfo_111pnICodeStub_pnMCodeEmitInfo__v_;
+text: .text%__1cIValueGenNsyncTempRInfo6F_nFRInfo__;
+text: .text%__1cLLIR_EmitterQreturn_op_prolog6Mi_v_;
+text: .text%__1cLLIR_EmitterMmonitor_exit6MnFRInfo_11i_v_;
+text: .text%__1cILIR_ListNunlock_object6MnFRInfo_11pnICodeStub__v_;
+text: .text%__1cKLIR_OpLockFvisit6MpnQLIR_OpVisitState__v_;
+text: .text%__1cQMonitorEnterStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
+text: .text%__1cRMonitorAccessStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
+text: .text%__1cKLIR_OpLockJemit_code6MpnVLIR_AbstractAssembler__v_;
+text: .text%__1cNLIR_OptimizerJemit_lock6MpnKLIR_OpLock__v_;
+text: .text%__1cNLIR_AssemblerPmonitor_address6MinFRInfo__v_;
+text: .text%__1cIFrameMapbEaddress_for_monitor_lock_index6kMi_nHAddress__;
+text: .text%__1cIFrameMapbAfp_offset_for_monitor_lock6kMi_i_;
+text: .text%__1cNLIR_AssemblerJemit_lock6MpnKLIR_OpLock__v_;
+text: .text%__1cRC1_MacroAssemblerLlock_object6MpnMRegisterImpl_22rnFLabel__v_;
+text: .text%__1cQMonitorEnterStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
+text: .text%__1cIFrameMapWmonitor_object_regname6kMi_nHOptoRegEName__;
+text: .text%__1cIFrameMapbCfp_offset_for_monitor_object6kMi_i_;
+text: .text%__1cMCodeEmitInfobHlocation_for_monitor_object_index6Mi_nILocation__;
+text: .text%__1cIFrameMapbHlocation_for_monitor_object_index6kMipnILocation__i_;
+text: .text%__1cMCodeEmitInfobFlocation_for_monitor_lock_index6Mi_nILocation__;
+text: .text%__1cIFrameMapbFlocation_for_monitor_lock_index6kMipnILocation__i_;
+text: .text%__1cMMonitorValue2t6MpnKScopeValue_nILocation__v_;
+text: .text%__1cMMonitorValueIwrite_on6MpnUDebugInfoWriteStream__v_;
+text: .text%__1cRC1_MacroAssemblerNunlock_object6MpnMRegisterImpl_22rnFLabel__v_;
+text: .text%__1cPMonitorExitStubMis_call_stub6kM_i_: c1_CodeStubs_x86.o;
+text: .text%__1cQMonitorEnterStubJemit_code6MpnNLIR_Assembler__v_;
+text: .text%__1cNLIR_AssemblerRload_receiver_reg6MpnMRegisterImpl__v_;
+text: .text%__1cNLIR_AssemblerLmonitorexit6MnFRInfo_1pnMRegisterImpl_i3_v_;
+text: .text%__1cPMonitorExitStubJemit_code6MpnNLIR_Assembler__v_;
+text: .text%jni_NewIntArray: jni.o;
+text: .text%__1cNCollectedHeapYlarge_typearray_allocate6FnLKlassHandle_iipnGThread__pnHoopDesc__: typeArrayKlass.o;
+text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: objArrayKlassKlass.o;
+text: .text%__1cSobjArrayKlassKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_;
+text: .text%__1cQinstanceRefKlassSoop_oop_iterate_nv6MpnHoopDesc_pnQFilteringClosure__i_;
+text: .text%__1cRTenuredGenerationKshort_name6kM_pkc_: tenuredGeneration.o;
+text: .text%__1cKGenerationIcounters6M_pnRCollectorCounters__: tenuredGeneration.o;
+text: .text%__1cRTenuredGenerationHcollect6MiiIii_v_;
+text: .text%__1cRTenuredGenerationbJretire_alloc_buffers_before_full_gc6M_v_;
+text: .text%__1cbCOneContigSpaceCardGenerationHcollect6MiiIii_v_;
+text: .text%__1cMGenMarkSweepTinvoke_at_safepoint6FipnSReferenceProcessor_i_v_;
+text: .text%__1cJCodeCacheLgc_prologue6F_v_;
+text: .text%__1cHThreadsLgc_prologue6F_v_;
+text: .text%__1cKJavaThreadLgc_prologue6M_v_;
+text: .text%__1cKJavaThreadJframes_do6MpFpnFframe_pknLRegisterMap__v_v_;
+text: .text%__1cRframe_gc_prologue6FpnFframe_pknLRegisterMap__v_: thread.o;
+text: .text%__1cFframeLgc_prologue6M_v_;
+text: .text%__1cQGenCollectedHeapRsave_used_regions6Mii_v_;
+text: .text%__1cKGenerationQsave_used_region6M_v_: tenuredGeneration.o;
+text: .text%__1cbCOneContigSpaceCardGenerationLused_region6kM_nJMemRegion__;
+text: .text%__1cPContiguousSpaceLused_region6kM_nJMemRegion__: space.o;
+text: .text%__1cKGenerationQsave_used_region6M_v_: defNewGeneration.o;
+text: .text%__1cKGenerationLused_region6kM_nJMemRegion__: defNewGeneration.o;
+text: .text%__1cKGenerationQsave_used_region6M_v_: compactingPermGenGen.o;
+text: .text%__1cMGenMarkSweepPallocate_stacks6F_v_;
+text: .text%__1cQGenCollectedHeapOgather_scratch6MpnKGeneration_I_pnMScratchBlock__;
+text: .text%__1cQDefNewGenerationScontribute_scratch6MrpnMScratchBlock_pnKGeneration_I_v_;
+text: .text%__1cKGenerationScontribute_scratch6MrpnMScratchBlock_p0I_v_: tenuredGeneration.o;
+text: .text%__1cRsort_scratch_list6FrpnMScratchBlock__v_: genCollectedHeap.o;
+text: .text%__1cVremoveSmallestScratch6FppnMScratchBlock__1_: genCollectedHeap.o;
+text: .text%__1cMGenMarkSweepRmark_sweep_phase16Firii_v_;
+text: .text%__1cJEventMark2t6MpkcE_v_: genMarkSweep.o;
+text: .text%__1cJMarkSweepRFollowRootClosureGdo_oop6MppnHoopDesc__v_: markSweep.o;
+text: .text%__1cJMarkSweepLfollow_root6FppnHoopDesc__v_;
+text: .text%__1cParrayKlassKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cLklassVtableToop_follow_contents6M_v_;
+text: .text%__1cJMarkSweepO_mark_and_push6FppnHoopDesc__v_;
+text: .text%__1cKklassKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cJMarkSweepXrevisit_weak_klass_link6FpnFKlass__v_;
+text: .text%__1cJMarkSweepMfollow_stack6F_v_;
+text: .text%__1cNinstanceKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cSinstanceKlassKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cNinstanceKlassUfollow_static_fields6M_v_;
+text: .text%__1cLklassItableToop_follow_contents6M_v_;
+text: .text%__1cJMarkSweepNpreserve_mark6FpnHoopDesc_pnLmarkOopDesc__v_;
+text: .text%__1cLsymbolKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cOtypeArrayKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cMjniIdMapBaseHoops_do6MpnKOopClosure__v_;
+text: .text%__1cIjniIdMapHoops_do6MpnKOopClosure__v_;
+text: .text%__1cJMarkSweepSMarkAndPushClosureGdo_oop6MppnHoopDesc__v_: markSweep.o;
+text: .text%__1cNobjArrayKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cJMarkSweepPmark_and_follow6FppnHoopDesc__v_;
+text: .text%__1cSobjArrayKlassKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cRconstantPoolKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cWconstantPoolCacheKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cWConstantPoolCacheEntryPfollow_contents6M_v_;
+text: .text%__1cLmethodKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cQconstMethodKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cQinstanceRefKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cFJNIidHoops_do6MpnKOopClosure__v_;
+text: .text%__1cQSystemDictionaryValways_strong_oops_do6FpnKOopClosure__v_;
+text: .text%__1cQSystemDictionaryYalways_strong_classes_do6FpnKOopClosure__v_;
+text: .text%__1cKDictionaryYalways_strong_classes_do6MpnKOopClosure__v_;
+text: .text%__1cQSystemDictionaryPplaceholders_do6FpnKOopClosure__v_;
+text: .text%__1cVLoaderConstraintTableYalways_strong_classes_do6MpnKOopClosure__v_;
+text: .text%__1cJvmSymbolsHoops_do6FpnKOopClosure_i_v_;
+text: .text%__1cJMarkSweepOIsAliveClosureLdo_object_b6MpnHoopDesc__i_: markSweep.o;
+text: .text%__1cJMarkSweepQKeepAliveClosureGdo_oop6MppnHoopDesc__v_;
+text: .text%__1cJMarkSweepSFollowStackClosureHdo_void6M_v_: markSweep.o;
+text: .text%__1cQSystemDictionaryMdo_unloading6FpnRBoolObjectClosure_pnKOopClosure__i_;
+text: .text%__1cKDictionaryMdo_unloading6MpnRBoolObjectClosure_pnKOopClosure__i_;
+text: .text%__1cVLoaderConstraintTableYpurge_loader_constraints6MpnRBoolObjectClosure__v_;
+text: .text%__1cJCodeCacheMdo_unloading6FpnRBoolObjectClosure_pnKOopClosure_iri_v_;
+text: .text%__1cJCodeCacheFfirst6F_pnICodeBlob__;
+text: .text%__1cICodeHeapLfirst_block6kM_pnJHeapBlock__;
+text: .text%__1cICodeHeapJnext_free6kMpnJHeapBlock__pv_;
+text: .text%__1cJCodeCacheFalive6FpnICodeBlob__2_;
+text: .text%__1cKBufferBlobIis_alive6kM_i_: codeBlob.o;
+text: .text%__1cKBufferBlobbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o;
+text: .text%__1cJCodeCacheEnext6FpnICodeBlob__2_;
+text: .text%__1cICodeHeapLblock_start6kMpv_pnJHeapBlock__;
+text: .text%__1cICodeHeapKnext_block6kMpnJHeapBlock__2_;
+text: .text%__1cNSingletonBlobIis_alive6kM_i_: codeBlob.o;
+text: .text%__1cNSingletonBlobbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o;
+text: .text%__1cLRuntimeStubIis_alive6kM_i_: codeBlob.o;
+text: .text%__1cLRuntimeStubbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_: codeBlob.o;
+text: .text%__1cHnmethodIis_alive6kM_i_: nmethod.o;
+text: .text%__1cHnmethodbIfollow_roots_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_iri_v_;
+text: .text%__1cHnmethodOis_not_entrant6kM_i_: nmethod.o;
+text: .text%__1cHnmethodbHfollow_root_or_mark_for_unloading6MpnRBoolObjectClosure_pnKOopClosure_ppnHoopDesc_iri_v_;
+text: .text%__1cOoop_RelocationJoop_value6M_pnHoopDesc__;
+text: .text%__1cVcompiledICHolderKlassSoop_being_unloaded6MpnRBoolObjectClosure_pnHoopDesc__i_;
+text: .text%__1cVcompiledICHolderKlassToop_follow_contents6MpnHoopDesc__v_;
+text: .text%__1cJMarkSweepXfollow_weak_klass_links6F_v_;
+text: .text%__1cFKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_;
+text: .text%__1cNinstanceKlassXfollow_weak_klass_links6MpnRBoolObjectClosure_pnKOopClosure__v_;
+text: .text%__1cJHashtableGunlink6MpnRBoolObjectClosure__v_;
+text: .text%__1cMGenMarkSweepRmark_sweep_phase26F_v_;
+text: .text%__1cQGenCollectedHeapWprepare_for_compaction6M_v_;
+text: .text%__1cKGenerationWprepare_for_compaction6MpnMCompactPoint__v_;
+text: .text%__1cbCOneContigSpaceCardGenerationWfirst_compaction_space6kM_pnQCompactibleSpace__: tenuredGeneration.o;
+text: .text%__1cPContiguousSpaceWprepare_for_compaction6MpnMCompactPoint__v_;
+text: .text%__1cWOffsetTableContigSpaceUinitialize_threshold6M_pnIHeapWord__;
+text: .text%__1cMTenuredSpaceSallowed_dead_ratio6kM_i_;
+text: .text%__1cQCompactibleSpaceHforward6MpnHoopDesc_IpnMCompactPoint_pnIHeapWord__6_;
+text: .text%__1cWOffsetTableContigSpacePcross_threshold6MpnIHeapWord_2_2_;
+text: .text%__1cQCompactibleSpaceQinsert_deadspace6MrIpnIHeapWord_I_i_;
+text: .text%__1cQCompactibleSpaceVnext_compaction_space6kM_p0_: space.o;
+text: .text%__1cQDefNewGenerationWfirst_compaction_space6kM_pnQCompactibleSpace__: defNewGeneration.o;
+text: .text%__1cQCompactibleSpaceSallowed_dead_ratio6kM_i_: space.o;
+text: .text%__1cbCOneContigSpaceCardGenerationWfirst_compaction_space6kM_pnQCompactibleSpace__: compactingPermGenGen.o;
+text: .text%__1cPContigPermSpaceSallowed_dead_ratio6kM_i_;
+text: .text%__1cMGenMarkSweepRmark_sweep_phase36Fi_v_;
+text: .text%__1cUCompactingPermGenGenTpre_adjust_pointers6M_v_;
+text: .text%__1cJMarkSweepUAdjustPointerClosureGdo_oop6MppnHoopDesc__v_: markSweep.o;
+text: .text%__1cQGenCollectedHeapSprocess_weak_roots6MpnKOopClosure_2_v_;
+text: .text%__1cJCodeCacheHoops_do6FpnKOopClosure__v_;
+text: .text%__1cKBufferBlobHoops_do6MpnKOopClosure__v_: codeBlob.o;
+text: .text%__1cSDeoptimizationBlobHoops_do6MpnKOopClosure__v_: codeBlob.o;
+text: .text%__1cLRuntimeStubHoops_do6MpnKOopClosure__v_: codeBlob.o;
+text: .text%__1cNSafepointBlobHoops_do6MpnKOopClosure__v_: codeBlob.o;
+text: .text%__1cHnmethodHoops_do6MpnKOopClosure__v_;
+text: .text%__1cJHashtableHoops_do6MpnKOopClosure__v_;
+text: .text%__1cSReferenceProcessorPoops_do_statics6FpnKOopClosure__v_;
+text: .text%__1cSReferenceProcessorHoops_do6MpnKOopClosure__v_;
+text: .text%__1cJMarkSweepMadjust_marks6F_v_;
+text: .text%__1cYGenAdjustPointersClosureNdo_generation6MpnKGeneration__v_: genMarkSweep.o;
+text: .text%__1cKGenerationPadjust_pointers6M_v_;
+text: .text%__1cbCOneContigSpaceCardGenerationNspace_iterate6MpnMSpaceClosure_i_v_;
+text: .text%__1cVAdjustPointersClosureIdo_space6MpnFSpace__v_: generation.o;
+text: .text%__1cQCompactibleSpacePadjust_pointers6M_v_;
+text: .text%__1cOtypeArrayKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cNobjArrayKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cQinstanceRefKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cNinstanceKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cQDefNewGenerationNspace_iterate6MpnMSpaceClosure_i_v_;
+text: .text%__1cUCompactingPermGenGenPadjust_pointers6M_v_;
+text: .text%__1cKklassKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cLsymbolKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cParrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cLklassVtableToop_adjust_pointers6M_v_;
+text: .text%__1cSobjArrayKlassKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cRconstantPoolKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cQconstMethodKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cLmethodKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cSinstanceKlassKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cNinstanceKlassUadjust_static_fields6M_v_;
+text: .text%__1cLklassItableToop_adjust_pointers6M_v_;
+text: .text%__1cWconstantPoolCacheKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cWConstantPoolCacheEntryPadjust_pointers6M_v_;
+text: .text%__1cVcompiledICHolderKlassToop_adjust_pointers6MpnHoopDesc__i_;
+text: .text%__1cMGenMarkSweepRmark_sweep_phase46F_v_;
+text: .text%__1cUCompactingPermGenGenHcompact6M_v_;
+text: .text%__1cQCompactibleSpaceHcompact6M_v_;
+text: .text%__1cPContiguousSpaceWreset_after_compaction6M_v_: space.o;
+text: .text%__1cRGenCompactClosureNdo_generation6MpnKGeneration__v_: genMarkSweep.o;
+text: .text%__1cKGenerationHcompact6M_v_;
+text: .text%__1cUCompactingPermGenGenMpost_compact6M_v_;
+text: .text%__1cJMarkSweepNrestore_marks6F_v_;
+text: .text%__1cMGenMarkSweepRdeallocate_stacks6F_v_;
+text: .text%__1cLCardTableRSSclear_into_younger6MpnKGeneration_i_v_;
+text: .text%__1cLCardTableRSFclear6MnJMemRegion__v_: cardTableRS.o;
+text: .text%__1cRCardTableModRefBSFclear6MnJMemRegion__v_;
+text: .text%__1cRCardTableModRefBSPclear_MemRegion6MnJMemRegion__v_;
+text: .text%__1cHThreadsLgc_epilogue6F_v_;
+text: .text%__1cKJavaThreadLgc_epilogue6M_v_;
+text: .text%__1cRframe_gc_epilogue6FpnFframe_pknLRegisterMap__v_: thread.o;
+text: .text%__1cFframeLgc_epilogue6M_v_;
+text: .text%__1cFframeMpd_gc_epilog6M_v_;
+text: .text%__1cJCodeCacheLgc_epilogue6F_v_;
+text: .text%__1cICodeBlobTfix_oop_relocations6M_v_;
+text: .text%__1cICodeBlobTfix_oop_relocations6MpC1_v_;
+text: .text%__1cKRelocationSfix_oop_relocation6M_v_: codeBlob.o;
+text: .text%__1cKRelocationSfix_oop_relocation6M_v_: relocInfo.o;
+text: .text%__1cICodeBlobKis_nmethod6kM_i_: onStackReplacement.o;
+text: .text%__1cQGenCollectedHeapWupdate_time_of_last_gc6Mx_v_: genMarkSweep.o;
+text: .text%__1cKGenerationWupdate_time_of_last_gc6Mx_v_: tenuredGeneration.o;
+text: .text%__1cKGenerationWupdate_time_of_last_gc6Mx_v_: compactingPermGenGen.o;
+text: .text%__1cbCOneContigSpaceCardGenerationVunsafe_max_alloc_nogc6kM_I_;
+text: .text%__1cRTenuredGenerationQcompute_new_size6M_v_;
+text: .text%__1cKGenerationEspec6M_pnOGenerationSpec__;
+text: .text%Unsafe_CompareAndSwapObject;
+text: .text%__1cLVtableStubsScreate_itable_stub6Fii_pnKVtableStub__;
+text: .text%__1cLLIR_EmitterDnop6M_v_;
+text: .text%__1cJAssemblerEmovl6MnHAddress_pnI_jobject__v_;
+text: .text%__1cMLinkResolverbEvtable_index_of_miranda_method6FnLKlassHandle_nMsymbolHandle_2pnGThread__i_;
+text: .text%__1cLklassVtableQindex_of_miranda6MpnNsymbolOopDesc_2_i_;
+text: .text%__1cLklassVtableTis_miranda_entry_at6Mi_i_;
+text: .text%__1cRPrivilegedElementHoops_do6MpnKOopClosure__v_;
+text: .text%__1cJCodeCacheIcontains6Fpv_i_;
+text: .text%__1cFframeRoops_code_blob_do6MpnKOopClosure_pknLRegisterMap__v_;
+text: .text%__1cJOopMapSetHoops_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure__v_;
+text: .text%__1cJOopMapSetGall_do6FpknFframe_pnICodeBlob_pknLRegisterMap_pnKOopClosure_pFppnHoopDesc_9E_v9B9B_v_;
+text: .text%__1cICodeBlobbAoop_map_for_return_address6MpCi_pnGOopMap__;
+text: .text%__1cJOopMapSetSfind_map_at_offset6kMii_pnGOopMap__;
+text: .text%__1cMOopMapStream2t6MpnGOopMap_i_v_;
+text: .text%__1cFframeVoopmapreg_to_location6kMnFVMRegEName_pknLRegisterMap__ppnHoopDesc__;
+text: .text%__1cKOopClosureLdo_nmethods6kM_ki_: defNewGeneration.o;
+text: .text%__1cJOopMapSetTupdate_register_map6FpknFframe_pnICodeBlob_pnLRegisterMap__v_;
+text: .text%__1cICodeBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_: nmethod.o;
+text: .text%__1cQComputeCallStackIdo_float6M_v_: generateOopMap.o;
+text: .text%jni_DeleteWeakGlobalRef: jni.o;
+text: .text%__1cKJNIHandlesTdestroy_weak_global6FpnI_jobject__v_;
+text: .text%__1cILIR_ListJoop2stack6MpnI_jobject_i_v_: c1_LIREmitter.o;
+text: .text%__1cNObjectMonitorREntryQdDueue_unlink6MpnMObjectWaiter__v_;
+text: .text%JVM_IsSameClassPackage;
+text: .text%__1cTGeneratePairingInfoRpossible_gc_point6MpnOBytecodeStream__i_: ciMethod.o;
+text: .text%__1cTGeneratePairingInfoOreport_results6kM_i_: ciMethod.o;
+text: .text%__1cMGraphBuilderMmonitorenter6MpnLInstruction__v_;
+text: .text%__1cMMonitorEnterFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cNCanonicalizerPdo_MonitorEnter6MpnMMonitorEnter__v_;
+text: .text%__1cNAccessMonitorIcan_trap6kM_i_: c1_GraphBuilder.o;
+text: .text%__1cMGraphBuilderLmonitorexit6MpnLInstruction__v_;
+text: .text%__1cLMonitorExitFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cNCanonicalizerOdo_MonitorExit6MpnLMonitorExit__v_;
+text: .text%__1cILongTypeDtag6kM_nIValueTag__: c1_Canonicalizer.o;
+text: .text%__1cILongTypeEsize6kM_i_: c1_Canonicalizer.o;
+text: .text%__1cNAccessMonitorPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o;
+text: .text%__1cQNullCheckVisitorPdo_MonitorEnter6MpnMMonitorEnter__v_;
+text: .text%__1cTNullCheckEliminatorUhandle_AccessMonitor6MpnNAccessMonitor__v_;
+text: .text%__1cQNullCheckVisitorOdo_MonitorExit6MpnLMonitorExit__v_;
+text: .text%__1cIValueGenPdo_MonitorEnter6MpnMMonitorEnter__v_;
+text: .text%__1cNc1_AllocTableMhas_two_free6kM_i_;
+text: .text%__1cMLongConstantPas_LongConstant6M_p0_: c1_Canonicalizer.o;
+text: .text%__1cFRInfoLas_rinfo_lo6kM_0_;
+text: .text%__1cLLIR_EmitterJopr2intLo6MpnLLIR_OprDesc__i_;
+text: .text%__1cFRInfoLas_rinfo_hi6kM_0_;
+text: .text%__1cLLIR_EmitterJopr2intHi6MpnLLIR_OprDesc__i_;
+text: .text%__1cIValueGenOdo_MonitorExit6MpnLMonitorExit__v_;
+text: .text%__1cNAccessMonitorQas_AccessMonitor6M_p0_: c1_GraphBuilder.o;
+text: .text%__1cJAssemblerFpushl6MpnI_jobject__v_;
+text: .text%__1cNLIR_AssemblerNas_Address_hi6MpnLLIR_Address__nHAddress__;
+text: .text%__1cFRInfoOas_register_hi6kM_pnMRegisterImpl__;
+text: .text%__1cNLIR_AssemblerNas_Address_lo6MpnLLIR_Address__nHAddress__;
+text: .text%__1cFRInfoOas_register_lo6kM_pnMRegisterImpl__;
+text: .text%__1cCosHrealloc6FpvI_1_;
+text: .text%Unsafe_GetNativeFloat;
+text: .text%__1cIValueGenQdo_currentThread6MpnJIntrinsic__v_;
+text: .text%__1cILIR_ListKget_thread6MnFRInfo__v_: c1_CodeGenerator_x86.o;
+text: .text%__1cNLIR_AssemblerKget_thread6MpnLLIR_OprDesc__v_;
+text: .text%__1cIValueGenSload_item_patching6MpnHIRScope_ipnEItem_pnKValueStack_pnOExceptionScope__v_;
+text: .text%__1cEItemUget_jobject_constant6kM_pnIciObject__;
+text: .text%__1cJValueTypeTas_InstanceConstant6M_pnQInstanceConstant__: c1_ValueType.o;
+text: .text%__1cIintArrayIindex_of6kMki_i_: c1_CodeGenerator.o;
+text: .text%__1cMLinkResolverbEresolve_interface_call_or_null6FnLKlassHandle_1nMsymbolHandle_21_nMmethodHandle__;
+text: .text%__1cIciObjectTis_type_array_klass6M_i_: ciInstanceKlass.o;
+text: .text%__1cGciTypeNis_subtype_of6Mp0_i_;
+text: .text%__1cIValueGenOload_byte_item6MpnEItem__v_;
+text: .text%__1cIValueGenPlock_free_rinfo6MpnLInstruction_nKc1_RegMask__nFRInfo__;
+text: .text%__1cIRegAllocNget_lock_temp6MpnLInstruction_nKc1_RegMask__nFRInfo__;
+text: .text%__1cQComputeCallStackIdo_short6M_v_: generateOopMap.o;
+text: .text%__1cRComputeEntryStackIdo_short6M_v_: generateOopMap.o;
+text: .text%__1cIFrameMapNis_byte_rinfo6FnFRInfo__i_;
+text: .text%Unsafe_AllocateInstance;
+text: .text%jni_AllocObject: jni.o;
+text: .text%__1cQinstanceRefKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_;
+text: .text%__1cNCanonicalizerMset_constant6Mi_v_: c1_Canonicalizer.o;
+text: .text%__1cJTypeCheckPother_values_do6MpFppnLInstruction__v_v_;
+text: .text%__1cNLIR_AssemblerMcheck_icache6M_i_;
+text: .text%__1cRC1_MacroAssemblerTfast_ObjectHashCode6MpnMRegisterImpl_2_v_;
+text: .text%__1cNLIR_AssemblerZjobject2reg_with_patching6MpnMRegisterImpl_pnMCodeEmitInfo__v_;
+text: .text%__1cHLogicOpIis_equal6kMpnLInstruction__i_: c1_Instruction.o;
+text: .text%__1cLAccessFieldKlock_stack6kM_pnKValueStack__: c1_GraphBuilder.o;
+text: .text%__1cIRuntime1Mnew_instance6FpnKJavaThread_pnMklassOopDesc__v_;
+text: .text%__1cQGenCollectedHeapXhandle_failed_promotion6MpnKGeneration_pnHoopDesc_Ip4_4_;
+text: .text%__1cFKlassQoop_is_typeArray6kM_i_: instanceRefKlass.o;
+text: .text%__1cbCOneContigSpaceCardGenerationTexpand_and_allocate6MIiii_pnIHeapWord__;
+text: .text%__1cbCOneContigSpaceCardGenerationGexpand6MII_v_;
+text: .text%__1cNGCMutexLocker2t6MpnFMutex__v_;
+text: .text%__1cbCOneContigSpaceCardGenerationHgrow_by6MI_i_;
+text: .text%__1cPContiguousSpaceNmangle_region6MnJMemRegion__v_;
+text: .text%__1cJMarkSweepRFollowRootClosureLdo_nmethods6kM_ki_: markSweep.o;
+text: .text%__1cQCompactibleSpaceUinitialize_threshold6M_pnIHeapWord__: space.o;
+text: .text%__1cKOopClosureLdo_nmethods6kM_ki_: markSweep.o;
+text: .text%__1cRAlwaysTrueClosureLdo_object_b6MpnHoopDesc__i_: genCollectedHeap.o;
+text: .text%__1cLCardTableRSTinvalidate_or_clear6MpnKGeneration_ii_v_;
+text: .text%__1cJMemRegionFminus6kMk0_0_;
+text: .text%__1cLCardTableRSKinvalidate6MnJMemRegion__v_: cardTableRS.o;
+text: .text%__1cRCardTableModRefBSKinvalidate6MnJMemRegion__v_;
+text: .text%__1cIRuntime1Onew_type_array6FpnKJavaThread_pnMklassOopDesc_i_v_;
+text: .text%__1cJFloatTypeDtag6kM_nIValueTag__: c1_Canonicalizer.o;
+text: .text%__1cNFloatConstantQas_FloatConstant6M_p0_: c1_Canonicalizer.o;
+text: .text%__1cILIR_ListNstore_mem_oop6MpnI_jobject_nFRInfo_inJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_;
+text: .text%__1cJFloatTypeMas_FloatType6M_p0_: c1_Canonicalizer.o;
+text: .text%__1cNConstantTableMappend_float6Mf_v_;
+text: .text%__1cRAbstractAssemblerGa_long6Mi_v_;
+text: .text%__1cNObjectMonitorGnotify6MpnGThread__v_;
+text: .text%__1cDCHARprocess_interface6FnTinstanceKlassHandle_pnNGrowableArray4nLKlassHandle___pnNGrowableArray4nMmethodHandle___nMsymbolHandle_6_v_;
+text: .text%__1cINewArrayPother_values_do6MpFppnLInstruction__v_v_;
+text: .text%__1cLLIR_EmitterQfield_store_byte6MpnLLIR_OprDesc_i2nFRInfo_ipnMCodeEmitInfo__v_;
+text: .text%__1cNLIR_AssemblerIshift_op6MnILIR_Code_nFRInfo_222_v_;
+text: .text%__1cIRuntime1Mmonitorenter6FpnKJavaThread_pnHoopDesc_pnPBasicObjectLock__v_;
+text: .text%__1cIRuntime1Lmonitorexit6FpnKJavaThread_pnPBasicObjectLock__v_;
+text: .text%__1cHnmethodPis_dependent_on6MpnMklassOopDesc__i_;
+text: .text%__1cHnmethodVis_dependent_on_entry6MpnMklassOopDesc_2pnNmethodOopDesc__i_;
+text: .text%__1cNVM_DeoptimizeEname6kM_pkc_: vm_operations.o;
+text: .text%__1cNVM_DeoptimizeEdoit6M_v_;
+text: .text%__1cODeoptimizationVdeoptimize_dependents6F_i_;
+text: .text%__1cHThreadsbFdeoptimized_wrt_marked_nmethods6F_v_;
+text: .text%__1cKJavaThreadbFdeoptimized_wrt_marked_nmethods6M_v_;
+text: .text%__1cFframeVshould_be_deoptimized6kM_i_;
+text: .text%__1cICodeBlobOis_java_method6kM_i_: codeBlob.o;
+text: .text%__1cJCodeCachebGmake_marked_nmethods_not_entrant6F_v_;
+text: .text%__1cJCodeCacheNalive_nmethod6FpnICodeBlob__pnHnmethod__;
+text: .text%__1cHnmethodbAmake_not_entrant_or_zombie6Mi_v_;
+text: .text%__1cHnmethodNis_osr_method6kM_i_: nmethod.o;
+text: .text%__1cKNativeJumpUpatch_verified_entry6FpC11_v_;
+text: .text%__1cHnmethodVmark_as_seen_on_stack6M_v_;
+text: .text%__1cTinc_decompile_count6FpnHnmethod__v_: nmethod.o;
+text: .text%__1cMVM_OperationNdoit_epilogue6M_v_: vm_operations.o;
+text: .text%__1cHThreadsLnmethods_do6F_v_;
+text: .text%__1cKJavaThreadLnmethods_do6M_v_;
+text: .text%__1cGThreadLnmethods_do6M_v_;
+text: .text%__1cFframeLnmethods_do6M_v_;
+text: .text%__1cFframeVnmethods_code_blob_do6M_v_;
+text: .text%__1cILIR_ListEidiv6MnFRInfo_i11pnMCodeEmitInfo__v_;
+text: .text%__1cLlog2_intptr6Fi_i_: c1_LIRAssembler_x86.o;
+text: .text%__1cONMethodSweeperPprocess_nmethod6FpnHnmethod__v_;
+text: .text%__1cHnmethodPis_locked_by_vm6kM_i_: nmethod.o;
+text: .text%__1cHnmethodLis_unloaded6kM_i_: nmethod.o;
+text: .text%__1cHnmethodVcleanup_inline_caches6M_v_;
+text: .text%__1cKCompiledIC2t6MpnKRelocation__v_;
+text: .text%__1cILongTypeDtag6kM_nIValueTag__: c1_ValueType.o;
+text: .text%__1cILongTypeEsize6kM_i_: c1_ValueType.o;
+text: .text%JVM_HoldsLock;
+text: .text%__1cSObjectSynchronizerZcurrent_thread_holds_lock6FpnKJavaThread_nGHandle__i_;
+text: .text%__1cIciObjectRis_instance_klass6M_i_: ciObjArrayKlass.o;
+text: .text%__1cLLoadIndexedIis_equal6kMpnLInstruction__i_: c1_Instruction.o;
+text: .text%__1cFciEnvWis_dependence_violated6FpnMklassOopDesc_pnNmethodOopDesc__i_;
+text: .text%__1cFciEnvZcall_has_multiple_targets6FpnNinstanceKlass_nMsymbolHandle_3ri_i_;
+text: .text%__1cFMutexbLwait_for_lock_blocking_implementation6MpnKJavaThread__v_;
+text: .text%__1cHnmethodbCcan_not_entrant_be_converted6M_i_;
+text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: oopMapCache.o;
+text: .text%__1cTMaskFillerForNativeIpass_int6M_v_: oopMapCache.o;
+text: .text%__1cGThreadOis_Java_thread6kM_i_: vmThread.o;
+text: .text%__1cMLocalMappingDadd6MinFRInfo__v_;
+text: .text%__1cILongTypeEbase6kM_pnJValueType__: c1_ValueType.o;
+text: .text%__1cLLIR_EmitterQfield_store_long6MpnLLIR_OprDesc_i2ipnMCodeEmitInfo__v_;
+text: .text%__1cKScanBlocksMis_long_only6kMi_i_;
+text: .text%__1cRLIR_PeepholeStateLreg2indexLo6MpnLLIR_OprDesc__i_;
+text: .text%__1cRLIR_PeepholeStateLreg2indexHi6MpnLLIR_OprDesc__i_;
+text: .text%__1cNSharedRuntimeDf2l6Ff_x_;
+text: .text%__1cIValueGenLdo_getClass6MpnJIntrinsic__v_;
+text: .text%__1cLLIR_EmitterIgetClass6MnFRInfo_1pnMCodeEmitInfo__v_;
+text: .text%__1cMGraphBuilderKcompare_op6MpnJValueType_nJBytecodesECode__v_;
+text: .text%__1cJCompareOpFvisit6MpnSInstructionVisitor__v_: c1_Instruction.o;
+text: .text%__1cNCanonicalizerMdo_CompareOp6MpnJCompareOp__v_;
+text: .text%__1cJCompareOpEhash6kM_i_: c1_Instruction.o;
+text: .text%__1cJCompareOpEname6kM_pkc_: c1_Instruction.o;
+text: .text%__1cJCompareOpMas_CompareOp6M_p0_: c1_Instruction.o;
+text: .text%__1cCIf2t6MpnLInstruction_n0BJCondition_i2pnKBlockBegin_5pnKValueStack_i_v_: c1_Canonicalizer.o;
+text: .text%__1cGSetRegIdo_float6Mi_v_: c1_RegAlloc.o;
+text: .text%__1cIRegAllocLset_fpu_reg6MiipnLInstruction__v_;
+text: .text%__1cJIsFreeRegIdo_float6Mi_v_: c1_RegAlloc.o;
+text: .text%__1cILIR_ListJfloat2reg6MfnFRInfo__v_: c1_LIREmitter.o;
+text: .text%__1cILIR_ListMbranch_float6MnMLIR_OpBranchNLIR_Condition_pnFLabel_4_v_;
+text: .text%__1cIValueGenNreturnF0RInfo6F_nFRInfo__;
+text: .text%__1cLLIR_EmitterOset_fpu_result6MnFRInfo__v_;
+text: .text%__1cILIR_ListIpush_fpu6MnFRInfo__v_: c1_LIREmitter.o;
+text: .text%__1cNConstantTableZaddress_of_float_constant6Mf_pC_;
+text: .text%__1cNLIR_AssemblerOfpu_two_on_tos6MnFRInfo_1i_v_;
+text: .text%__1cIFrameMapLFpuStackSimEswap6M_v_;
+text: .text%__1cIFrameMapLFpuStackSimRexchange_with_tos6Mi_v_;
+text: .text%__1cHnmethodSflush_dependencies6MpnRBoolObjectClosure__v_;
+text: .text%__1cNinstanceKlassYremove_dependent_nmethod6MpnHnmethod__v_;
+text: .text%__1cFVTuneOdelete_nmethod6FpnHnmethod__v_;
+text: .text%__1cQPlaceholderEntryHoops_do6MpnKOopClosure__v_;
+text: .text%__1cHnmethodFflush6M_v_;
+text: .text%__1cJEventMark2t6MpkcE_v_: nmethod.o;
+text: .text%__1cICodeBlobFflush6M_v_;
+text: .text%__1cJCodeCacheEfree6FpnICodeBlob__v_;
+text: .text%__1cICodeHeapKdeallocate6Mpv_v_;
+text: .text%__1cICodeHeapPadd_to_freelist6MpnJHeapBlock__v_;
+text: .text%__1cICodeHeapPfollowing_block6MpnJFreeBlock__2_;
+text: .text%__1cRComputeEntryStackIdo_float6M_v_: generateOopMap.o;
+text: .text%__1cICodeHeapMinsert_after6MpnJFreeBlock_2_v_;
+text: .text%__1cICodeHeapLmerge_right6MpnJFreeBlock__v_;
+text: .text%__1cHnmethodbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_;
+text: .text%__1cUCompressedReadStreamMraw_read_int6FrpC_i_: nmethod.o;
+text: .text%__1cFframebAoops_compiled_arguments_do6MnMsymbolHandle_ipknLRegisterMap_pnKOopClosure__v_;
+text: .text%__1cQComputeCallStackJdo_double6M_v_: generateOopMap.o;
+text: .text%__1cbCOneContigSpaceCardGenerationGshrink6MI_v_;
+text: .text%__1cbCOneContigSpaceCardGenerationJshrink_by6MI_v_;
+text: .text%__1cMVirtualSpaceJshrink_by6MI_v_;
+text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: oopMapCache.o;
+text: .text%__1cRComputeEntryStackJdo_double6M_v_: generateOopMap.o;
+text: .text%__1cKDoubleTypeDtag6kM_nIValueTag__: c1_ValueType.o;
+text: .text%__1cKDoubleTypeDtag6kM_nIValueTag__: c1_Canonicalizer.o;
+text: .text%__1cODoubleConstantRas_DoubleConstant6M_p0_: c1_Canonicalizer.o;
+text: .text%__1cKDoubleTypeEbase6kM_pnJValueType__: c1_ValueType.o;
+text: .text%__1cODoubleConstantLis_constant6kM_i_: c1_Canonicalizer.o;
+text: .text%__1cKDoubleTypeEsize6kM_i_: c1_ValueType.o;
+text: .text%__1cHLockRegJdo_double6Mi_v_: c1_RegAlloc.o;
+text: .text%__1cIRegAllocRset_locked_double6MipnLInstruction_i_v_;
+text: .text%__1cKDoubleTypeNas_DoubleType6M_p0_: c1_ValueType.o;
+text: .text%__1cIFrameMapUare_adjacent_indeces6kMii_i_;
+text: .text%__1cQChangeSpillCountJdo_double6Mi_v_: c1_RegAlloc.o;
+text: .text%__1cIRegAllocZchange_double_spill_count6Mii_v_;
+text: .text%__1cILIR_ListKdouble2reg6MdnFRInfo__v_: c1_LIREmitter.o;
+text: .text%__1cHFreeRegJdo_double6Mi_v_: c1_RegAlloc.o;
+text: .text%__1cIRegAllocPset_free_double6Mi_v_;
+text: .text%__1cILIR_ListDrem6MpnLLIR_OprDesc_22pnMCodeEmitInfo__v_: c1_LIREmitter.o;
+text: .text%__1cLGetRefCountJdo_double6Mi_v_: c1_RegAlloc.o;
+text: .text%__1cIRegAllocNget_double_rc6kMi_i_;
+text: .text%__1cLLIR_EmitterUcheck_double_address6Mi_v_;
+text: .text%__1cILIR_ListQreg2double_stack6MnFRInfo_inJBasicType__v_: c1_LIREmitter.o;
+text: .text%__1cRLIR_PeepholeStateNstack2indexHi6MpnLLIR_OprDesc__i_;
+text: .text%__1cRLIR_PeepholeStateNstack2indexLo6MpnLLIR_OprDesc__i_;
+text: .text%__1cKDoubleTypeNas_DoubleType6M_p0_: c1_Canonicalizer.o;
+text: .text%__1cNConstantTableNappend_double6Md_v_;
+text: .text%__1cNConstantTablebAaddress_of_double_constant6Md_pC_;
+text: .text%__1cQGenCollectedHeapHcollect6MnHGCCauseFCause_i_v_;
+text: .text%__1cQGenCollectedHeapOcollect_locked6MnHGCCauseFCause_i_v_;
+text: .text%__1cRVM_GenCollectFullEname6kM_pkc_: vm_operations.o;
+text: .text%__1cRVM_GenCollectFullEdoit6M_v_;
+text: .text%__1cQGenCollectedHeapYmust_clear_all_soft_refs6M_i_;
+text: .text%__1cQGenCollectedHeapSdo_full_collection6Miipi_v_;
+text: .text%__1cKGenerationbHfull_collects_younger_generations6kM_i_: defNewGeneration.o;
+text: .text%__1cKDoubleTypeEsize6kM_i_: c1_Canonicalizer.o;
+text: .text%__1cKDoubleTypeEbase6kM_pnJValueType__: c1_Canonicalizer.o;
+text: .text%__1cIValueMapNresize_bucket6MpnGBucket__v_;
+text: .text%__1cNFloatConstantLis_constant6kM_i_: c1_Canonicalizer.o;
+text: .text%__1cJNullCheckMas_NullCheck6M_p0_: c1_GraphBuilder.o;
+text: .text%__1cLLIR_EmitterIopr2long6MpnLLIR_OprDesc__x_;
+text: .text%__1cILIR_ListKlong2stack6Mxi_v_: c1_LIREmitter.o;
+text: .text%__1cIValueGenNreturnD0RInfo6F_nFRInfo__;
+text: .text%__1cJIsFreeRegJdo_double6Mi_v_: c1_RegAlloc.o;
+text: .text%__1cIRegAllocOis_free_double6kMi_i_;
+text: .text%__1cGSetRegJdo_double6Mi_v_: c1_RegAlloc.o;
+text: .text%__1cIRegAllocOset_double_reg6MiipnLInstruction__v_;
+text: .text%__1cLLIR_EmitterNcopy_fpu_item6MnFRInfo_pnLLIR_OprDesc__v_;
+text: .text%__1cILIR_ListHdup_fpu6MnFRInfo_1_v_: c1_LIREmitter.o;
+text: .text%__1cILIR_ListDdiv6MpnLLIR_OprDesc_22pnMCodeEmitInfo__v_: c1_LIREmitter.o;
+text: .text%__1cJAssemblerFfsubp6Mi_v_;
+text: .text%__1cNLIR_AssemblerHdup_fpu6MnFRInfo_1_v_;
+text: .text%__1cIFrameMapLFpuStackSimLmove_on_tos6Mi_i_;
+text: .text%__1cJAssemblerGfdiv_d6MnHAddress__v_;
+text: .text%__1cJAssemblerFfdivp6Mi_v_;
+text: .text%__1cIValueGenMreturn2RInfo6F_nFRInfo__;
+text: .text%__1cJValueTypeQas_FloatConstant6M_pnNFloatConstant__: c1_Canonicalizer.o;
+text: .text%__1cIRuntime1Qnew_object_array6FpnKJavaThread_pnMklassOopDesc_i_v_;
+text: .text%__1cIValueGenLdivOutRInfo6F_nFRInfo__;
+text: .text%__1cILIR_ListEidiv6MnFRInfo_111pnMCodeEmitInfo__v_;
+text: .text%__1cILIR_ListVvolatile_load_mem_reg6MnFRInfo_i1nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_;
+text: .text%__1cEItemSget_jlong_constant6kM_x_;
+text: .text%__1cNLIR_AssemblerQvolatile_move_op6MpnLLIR_OprDesc_2nJBasicType_nHLIR_Op1NLIR_PatchCode_pnMCodeEmitInfo__v_;
+text: .text%__1cFKlassNoop_is_method6kM_i_: objArrayKlass.o;
+text: .text%__1cFKlassRoop_is_methodData6kM_i_: objArrayKlass.o;
+text: .text%__1cIciObjectTis_type_array_klass6M_i_: ciObjArrayKlass.o;
+text: .text%__1cMciArrayKlassOis_array_klass6M_i_: ciObjArrayKlass.o;
+text: .text%__1cONewObjectArrayKexact_type6kM_pnGciType__;
+text: .text%__1cPciObjArrayKlassSis_obj_array_klass6M_i_: ciObjArrayKlass.o;
+text: .text%__1cPciObjArrayKlassNelement_klass6M_pnHciKlass__;
+text: .text%__1cIRuntime1Noop_arraycopy6FpnIHeapWord_2i_v_;
+text: .text%__1cILongTypeEbase6kM_pnJValueType__: c1_Canonicalizer.o;
+text: .text%__1cMLongConstantLis_constant6kM_i_: c1_Canonicalizer.o;
+text: .text%__1cIValueGenUdo_ArithmeticOp_Long6MpnMArithmeticOp__v_;
+text: .text%__1cLLIR_EmitterSarithmetic_op_long6MnJBytecodesECode_pnLLIR_OprDesc_44pnMCodeEmitInfo__v_;
+text: .text%__1cFKlassUoop_oop_iterate_nv_m6MpnHoopDesc_pnQFilteringClosure_nJMemRegion__i_: compiledICHolderKlass.o;
+text: .text%__1cVcompiledICHolderKlassRoop_oop_iterate_m6MpnHoopDesc_pnKOopClosure_nJMemRegion__i_;
+text: .text%__1cIciObjectRis_instance_klass6M_i_: ciTypeArrayKlass.o;
+text: .text%__1cLArrayLengthIis_equal6kMpnLInstruction__i_: c1_GraphBuilder.o;
+text: .text%__1cFKlassPoop_is_objArray6kM_i_: compiledICHolderKlass.o;
+text: .text%__1cFKlassQoop_is_typeArray6kM_i_: compiledICHolderKlass.o;
+text: .text%__1cTunsafe_intrinsic_id6FpnNsymbolOopDesc_1_nNmethodOopDescLIntrinsicId__;
+text: .text%__1cMGraphBuilderVappend_unsafe_put_raw6MpnIciMethod_nJBasicType__i_;
+text: .text%__1cMUnsafePutRawFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cNCanonicalizerPdo_UnsafePutRaw6MpnMUnsafePutRaw__v_;
+text: .text%__1cNCanonicalizerOdo_UnsafeRawOp6MpnLUnsafeRawOp__v_;
+text: .text%__1cFmatch6FpnLUnsafeRawOp_ppnLInstruction_4pi_i_: c1_Canonicalizer.o;
+text: .text%__1cLInstructionPas_ArithmeticOp6M_pnMArithmeticOp__: c1_Instruction.o;
+text: .text%__1cIUnsafeOpLas_UnsafeOp6M_p0_: c1_GraphBuilder.o;
+text: .text%__1cMGraphBuilderVappend_unsafe_get_raw6MpnIciMethod_nJBasicType__i_;
+text: .text%__1cMUnsafeGetRawFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cNCanonicalizerPdo_UnsafeGetRaw6MpnMUnsafeGetRaw__v_;
+text: .text%__1cMGraphBuilderNlookup_switch6M_v_;
+text: .text%__1cIintArray2t6Mki1_v_: c1_GraphBuilder.o;
+text: .text%__1cMLookupSwitchFvisit6MpnSInstructionVisitor__v_: c1_GraphBuilder.o;
+text: .text%__1cNCanonicalizerPdo_LookupSwitch6MpnMLookupSwitch__v_;
+text: .text%__1cMUnsafePutRawPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o;
+text: .text%__1cQNullCheckVisitorPdo_UnsafePutRaw6MpnMUnsafePutRaw__v_;
+text: .text%__1cTNullCheckEliminatorPhandle_UnsafeOp6MpnIUnsafeOp__v_;
+text: .text%__1cLUnsafeRawOpPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBuilder.o;
+text: .text%__1cQNullCheckVisitorPdo_UnsafeGetRaw6MpnMUnsafeGetRaw__v_;
+text: .text%__1cQNullCheckVisitorPdo_LookupSwitch6MpnMLookupSwitch__v_;
+text: .text%__1cIValueGenPdo_UnsafePutRaw6MpnMUnsafePutRaw__v_;
+text: .text%__1cLLIR_EmitterOput_raw_unsafe6MpnLLIR_OprDesc_2i2nJBasicType__v_;
+text: .text%__1cLLIR_EmitterMlong2address6MpnLLIR_OprDesc__nFRInfo__;
+text: .text%__1cILIR_ListNstore_mem_reg6MnFRInfo_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_;
+text: .text%__1cIValueGenPdo_UnsafeGetRaw6MpnMUnsafeGetRaw__v_;
+text: .text%__1cLLIR_EmitterOget_raw_unsafe6MnFRInfo_pnLLIR_OprDesc_3inJBasicType__v_;
+text: .text%__1cILIR_ListMload_mem_reg6MpnLLIR_Address_nFRInfo_nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_;
+text: .text%__1cIValueGenPdo_LookupSwitch6MpnMLookupSwitch__v_;
+text: .text%__1cUcreate_lookup_ranges6FpnMLookupSwitch__pnQLookupRangeArray__: c1_CodeGenerator_x86.o;
+text: .text%__1cLLIR_EmitterVlookupswitch_range_op6MpnLLIR_OprDesc_iipnKBlockBegin__v_;
+text: .text%__1cNSharedRuntimeEldiv6Fxx_x_;
+text: .text%Unsafe_GetObjectVolatile;
+text: .text%signalHandler;
+text: .text%JVM_handle_solaris_signal;
+text: .text%__1cKJavaThreadUin_stack_yellow_zone6MpC_i_: os_solaris_x86.o;
+text: .text%__1cICodeBlobRis_at_poll_return6MpC_i_;
+text: .text%__1cUSafepointSynchronizebDhandle_polling_page_exception6FpnKJavaThread__pC_;
+text: .text%__1cbCCompiledCodeSafepointHandlerbDhandle_polling_page_exception6M_pC_;
+text: .text%__1cFframebDsender_for_raw_compiled_frame6kMpnLRegisterMap__0_;
+text: .text%__1cNSafepointBlobYcaller_must_gc_arguments6kMpnKJavaThread__i_;
+text: .text%__1cUThreadSafepointStateYcaller_must_gc_arguments6kM_i_;
+text: .text%__1cbCCompiledCodeSafepointHandlerYcaller_must_gc_arguments6kM_i_: safepoint.o;
+text: .text%__1cFframeIpatch_pc6MpnGThread_pC_v_;
+text: .text%__1cNSafepointBlobbDpreserve_callee_argument_oops6MnFframe_pknLRegisterMap_pnKOopClosure__v_: codeBlob.o;
+text: .text%__1cSvframeStreamCommonbFfill_in_compiled_inlined_sender6M_i_;
+text: .text%__1cJFloatTypeEsize6kM_i_: c1_Canonicalizer.o;
+text: .text%__1cIValueGenNrelease_roots6MpnKValueStack__v_;
+text: .text%__1cSciExceptionHandlerLcatch_klass6M_pnPciInstanceKlass__;
+text: .text%__1cHciKlassNis_subtype_of6Mp0_i_;
+text: .text%__1cNSharedRuntimeDd2l6Fd_x_;
+text: .text%__1cOObjectConstantLis_constant6kM_i_: c1_ValueType.o;
+text: .text%__1cILIR_ListLstore_array6MipnLLIR_Address_nJBasicType_pnMCodeEmitInfo__v_;
+text: .text%__1cNLIR_AssemblerLconst2array6MpnJLIR_Const_pnLLIR_Address_nJBasicType_pnMCodeEmitInfo__v_;
+text: .text%__1cQInstanceConstantLis_constant6kM_i_: c1_ValueType.o;
diff --git a/make/solaris/makefiles/reorder_COMPILER1_i486 b/make/solaris/makefiles/reorder_COMPILER1_i486
index bab5b288d..caf8c0298 100644
--- a/make/solaris/makefiles/reorder_COMPILER1_i486
+++ b/make/solaris/makefiles/reorder_COMPILER1_i486
@@ -8,20 +8,20 @@ text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: arguments.o;
text: .text%__1cQAgentLibraryList2t6M_v_: arguments.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_AllocTable.o;
text: .text%__1cFRInfo2t6M_v_: c1_AllocTable.o;
-text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_AllocTable_i486.o;
-text: .text%__1cFRInfo2t6M_v_: c1_AllocTable_i486.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_AllocTable_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_AllocTable_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CacheLocals.o;
text: .text%__1cFRInfo2t6M_v_: c1_CacheLocals.o;
-text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CacheLocals_i486.o;
-text: .text%__1cFRInfo2t6M_v_: c1_CacheLocals_i486.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CacheLocals_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_CacheLocals_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Canonicalizer.o;
text: .text%__1cFRInfo2t6M_v_: c1_Canonicalizer.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeGenerator.o;
text: .text%__1cFRInfo2t6M_v_: c1_CodeGenerator.o;
-text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeGenerator_i486.o;
-text: .text%__1cFRInfo2t6M_v_: c1_CodeGenerator_i486.o;
-text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeStubs_i486.o;
-text: .text%__1cFRInfo2t6M_v_: c1_CodeStubs_i486.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeGenerator_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_CodeGenerator_x86.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_CodeStubs_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_CodeStubs_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Compilation.o;
text: .text%__1cFRInfo2t6M_v_: c1_Compilation.o;
text: .text%__1cMelapsedTimer2t6M_v_: c1_Compilation.o;
@@ -29,9 +29,9 @@ text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Compiler.o;
text: .text%__1cFRInfo2t6M_v_: c1_Compiler.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_FrameMap.o;
text: .text%__1cFRInfo2t6M_v_: c1_FrameMap.o;
-text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_FrameMap_i486.o;
-text: .text%__1cFRInfo2t6M_v_: c1_FrameMap_i486.o;
-text: .text%__1cKc1_RegMask2t6M_v_: c1_FrameMap_i486.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_FrameMap_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_FrameMap_x86.o;
+text: .text%__1cKc1_RegMask2t6M_v_: c1_FrameMap_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_GraphBuilder.o;
text: .text%__1cFRInfo2t6M_v_: c1_GraphBuilder.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_IR.o;
@@ -43,41 +43,41 @@ text: .text%__1cFRInfo2t6M_v_: c1_InstructionPrinter.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Items.o;
text: .text%__1cFRInfo2t6M_v_: c1_Items.o;
text: .text%__1cIHintItem2t6MpnJValueType_i_v_: c1_Items.o;
-text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Items_i486.o;
-text: .text%__1cFRInfo2t6M_v_: c1_Items_i486.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Items_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_Items_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIR.o;
text: .text%__1cFRInfo2t6M_v_: c1_LIR.o;
text: .text%__1cLLIR_OprFactHillegal6F_pnLLIR_OprDesc__: c1_LIR.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIRAssembler.o;
text: .text%__1cFRInfo2t6M_v_: c1_LIRAssembler.o;
-text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIRAssembler_i486.o;
-text: .text%__1cFRInfo2t6M_v_: c1_LIRAssembler_i486.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIRAssembler_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_LIRAssembler_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIREmitter.o;
text: .text%__1cFRInfo2t6M_v_: c1_LIREmitter.o;
-text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIREmitter_i486.o;
-text: .text%__1cFRInfo2t6M_v_: c1_LIREmitter_i486.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIREmitter_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_LIREmitter_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIROptimizer.o;
text: .text%__1cFRInfo2t6M_v_: c1_LIROptimizer.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Loops.o;
text: .text%__1cFRInfo2t6M_v_: c1_Loops.o;
-text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_MacroAssembler_i486.o;
-text: .text%__1cFRInfo2t6M_v_: c1_MacroAssembler_i486.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_MacroAssembler_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_MacroAssembler_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Optimizer.o;
text: .text%__1cFRInfo2t6M_v_: c1_Optimizer.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RInfo.o;
text: .text%__1cFRInfo2t6M_v_: c1_RInfo.o;
text: .text%__1cKc1_RegMask2t6M_v_: c1_RInfo.o;
-text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RInfo_i486.o;
-text: .text%__1cFRInfo2t6M_v_: c1_RInfo_i486.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RInfo_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_RInfo_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RegAlloc.o;
text: .text%__1cFRInfo2t6M_v_: c1_RegAlloc.o;
-text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RegAlloc_i486.o;
-text: .text%__1cFRInfo2t6M_v_: c1_RegAlloc_i486.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_RegAlloc_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_RegAlloc_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Runtime1.o;
text: .text%__1cFRInfo2t6M_v_: c1_Runtime1.o;
text: .text%__1cIiEntries2t6M_v_;
-text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Runtime1_i486.o;
-text: .text%__1cFRInfo2t6M_v_: c1_Runtime1_i486.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_Runtime1_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_Runtime1_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ScanBlocks.o;
text: .text%__1cFRInfo2t6M_v_: c1_ScanBlocks.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_ValueMap.o;
@@ -105,8 +105,8 @@ text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: fprofiler.o;
text: .text%__1cMelapsedTimer2t6M_v_: fprofiler.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: frame.o;
text: .text%__1cFRInfo2t6M_v_: frame.o;
-text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: frame_i486.o;
-text: .text%__1cFRInfo2t6M_v_: frame_i486.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: frame_x86.o;
+text: .text%__1cFRInfo2t6M_v_: frame_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: genCollectedHeap.o;
text: .text%__1cTAssertIsPermClosure2t6M_v_: genCollectedHeap.o;
text: .text%__1cRAlwaysTrueClosure2t6M_v_: genCollectedHeap.o;
@@ -117,8 +117,8 @@ text: .text%__1cNCellTypeStateImake_top6F_0_: generateOopMap.o;
text: .text%__1cMelapsedTimer2t6M_v_: generateOopMap.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter.o;
text: .text%__1cKEntryPoint2t6M_v_;
-text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter_i486.o;
-text: .text%__1cFRInfo2t6M_v_: interpreter_i486.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: interpreter_x86.o;
+text: .text%__1cFRInfo2t6M_v_: interpreter_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: java.o;
text: .text%__1cFRInfo2t6M_v_: java.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: jvmtiEnvBase.o;
@@ -151,16 +151,16 @@ text: .text%__1cNGrowableArray4CpnKMemoryPool__2t6Mii_v_: memoryService.o;
text: .text%__1cNGrowableArray4CpnNMemoryManager__2t6Mii_v_: memoryService.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: methodOop.o;
text: .text%__1cFRInfo2t6M_v_: methodOop.o;
-text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: nativeInst_i486.o;
-text: .text%__1cFRInfo2t6M_v_: nativeInst_i486.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: nativeInst_x86.o;
+text: .text%__1cFRInfo2t6M_v_: nativeInst_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: nmethod.o;
text: .text%__1cFRInfo2t6M_v_: nmethod.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: oopMap.o;
text: .text%__1cQDoNothingClosure2t6M_v_: oopMap.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: os_solaris.o;
text: .text%__1cFRInfo2t6M_v_: os_solaris.o;
-text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: os_solaris_i486.o;
-text: .text%__1cFRInfo2t6M_v_: os_solaris_i486.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: os_solaris_x86.o;
+text: .text%__1cFRInfo2t6M_v_: os_solaris_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: parGCAllocBuffer.o;
text: .text%__1cMarrayOopDescLheader_size6FnJBasicType__i_: parGCAllocBuffer.o;
text: .text%__1cRalign_object_size6Fi_i_: parGCAllocBuffer.o;
@@ -181,8 +181,8 @@ text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: runtimeService.o;
text: .text%__1cJTimeStamp2t6M_v_: runtimeService.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: safepoint.o;
text: .text%__1cFRInfo2t6M_v_: safepoint.o;
-text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: safepoint_solaris_i486.o;
-text: .text%__1cFRInfo2t6M_v_: safepoint_solaris_i486.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: safepoint_solaris_x86.o;
+text: .text%__1cFRInfo2t6M_v_: safepoint_solaris_x86.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedHeap.o;
text: .text%__1cTAssertIsPermClosure2t6M_v_: sharedHeap.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: sharedRuntime.o;
@@ -197,10 +197,10 @@ text: .text%__1cFRInfo2t6M_v_: vmStructs.o;
text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vm_version.o;
text: .text%__1cTAbstract_VM_VersionKvm_release6F_pkc_;
text: .text%__1cTAbstract_VM_VersionXinternal_vm_info_string6F_pkc_;
-text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vtableStubs_i486.o;
-text: .text%__1cFRInfo2t6M_v_: vtableStubs_i486.o;
-text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIROptimizer_i486.o;
-text: .text%__1cFRInfo2t6M_v_: c1_LIROptimizer_i486.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: vtableStubs_x86.o;
+text: .text%__1cFRInfo2t6M_v_: vtableStubs_x86.o;
+text: .text%__1cU__STATIC_CONSTRUCTOR6F_v_: c1_LIROptimizer_x86.o;
+text: .text%__1cFRInfo2t6M_v_: c1_LIROptimizer_x86.o;
text: .text%JNI_CreateJavaVM;
text: .text%__1cCosVatomic_xchg_bootstrap6Fipoi_i_;
text: .text%__1cHThreadsJcreate_vm6FpnOJavaVMInitArgs_pi_i_;
@@ -279,7 +279,7 @@ text: .text%__1cSThreadLocalStorageEinit6F_v_;
text: .text%__1cSThreadLocalStorageHpd_init6F_v_;
text: .text%__1cCosbDallocate_thread_local_storage6F_i_;
text: .text%__1cSThreadLocalStoragebCgenerate_code_for_get_thread6F_v_;
-text: .text%__1cRAllocateTLSOffset6F_v_: threadLS_solaris_i486.o;
+text: .text%__1cRAllocateTLSOffset6F_v_: threadLS_solaris_x86.o;
text: .text%__1cPvm_init_globals6F_v_;
text: .text%__1cScheck_ThreadShadow6F_v_;
text: .text%__1cRcheck_basic_types6F_v_;
@@ -463,7 +463,7 @@ text: .text%__1cKMemoryPoolImax_size6kM_I_: memoryPool.o;
text: .text%__1cXresource_allocate_bytes6FI_pc_;
text: .text%__1cKCodeBuffer2t6MpCi_v_;
text: .text%__1cRAbstractAssembler2t6MpnKCodeBuffer__v_;
-text: .text%__1cYVM_Version_StubGeneratorTgenerate_getPsrInfo6M_pC_: vm_version_i486.o;
+text: .text%__1cYVM_Version_StubGeneratorTgenerate_getPsrInfo6M_pC_: vm_version_x86.o;
text: .text%__1cMStubCodeMark2t6MpnRStubCodeGenerator_pkc4_v_;
text: .text%__1cRStubCodeGeneratorLstub_prolog6MpnMStubCodeDesc__v_;
text: .text%__1cJAssemblerFpushl6MpnMRegisterImpl__v_;
@@ -497,14 +497,14 @@ text: .text%__1cFVTuneNregister_stub6FpkcpC3_v_;
text: .text%__1cFForteNregister_stub6FpkcpC3_v_;
text: .text%__1cKVM_VersionWget_processor_features6F_v_;
text: .text%__1cCosMsupports_sse6F_i_;
-text: .text%__1cVcheck_for_sse_support6F_v_: os_solaris_i486.o;
+text: .text%__1cVcheck_for_sse_support6F_v_: os_solaris_x86.o;
text: .text%jio_snprintf;
text: .text%jio_vsnprintf;
text: .text%__1cPlocal_vsnprintf6FpcIpkcpv_i_;
text: .text%__1cSstubRoutines_init16F_v_;
text: .text%__1cMStubRoutinesLinitialize16F_v_;
text: .text%__1cWStubGenerator_generate6FpnKCodeBuffer_i_v_;
-text: .text%__1cNStubGeneratorbAgenerate_forward_exception6M_pC_: stubGenerator_i486.o;
+text: .text%__1cNStubGeneratorbAgenerate_forward_exception6M_pC_: stubGenerator_x86.o;
text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCpnMRegisterImpl__v_;
text: .text%__1cOMacroAssemblerMcall_VM_leaf6MpCi_v_;
text: .text%__1cOMacroAssemblerRcall_VM_leaf_base6MpCi_v_;
@@ -525,7 +525,7 @@ text: .text%__1cJAssemblerEleal6MpnMRegisterImpl_nHAddress__v_;
text: .text%__1cJAssemblerEmovl6MnHAddress_i_v_;
text: .text%__1cOMacroAssemblerKverify_oop6MpnMRegisterImpl_pkc_v_;
text: .text%__1cJAssemblerDjmp6MpnMRegisterImpl_nJrelocInfoJrelocType__v_;
-text: .text%__1cNStubGeneratorSgenerate_call_stub6MrpC_1_: stubGenerator_i486.o;
+text: .text%__1cNStubGeneratorSgenerate_call_stub6MrpC_1_: stubGenerator_x86.o;
text: .text%__1cOMacroAssemblerFenter6M_v_;
text: .text%__1cJAssemblerEsubl6MpnMRegisterImpl_i_v_;
text: .text%__1cJAssemblerFtestl6MpnMRegisterImpl_2_v_;
@@ -534,14 +534,14 @@ text: .text%__1cJAssemblerEcall6MpnMRegisterImpl_nJrelocInfoJrelocType__v_;
text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_i_v_;
text: .text%__1cJAssemblerGfstp_s6MnHAddress__v_;
text: .text%__1cJAssemblerGfstp_d6MnHAddress__v_;
-text: .text%__1cNStubGeneratorYgenerate_catch_exception6M_pC_: stubGenerator_i486.o;
+text: .text%__1cNStubGeneratorYgenerate_catch_exception6M_pC_: stubGenerator_x86.o;
text: .text%__1cJAssemblerDjmp6MpCnJrelocInfoJrelocType__v_;
-text: .text%__1cNStubGeneratorUgenerate_atomic_xchg6M_pC_: stubGenerator_i486.o;
+text: .text%__1cNStubGeneratorUgenerate_atomic_xchg6M_pC_: stubGenerator_x86.o;
text: .text%__1cJAssemblerExchg6MpnMRegisterImpl_nHAddress__v_;
text: .text%__1cJAssemblerGpushad6M_v_;
text: .text%__1cJAssemblerFpopad6M_v_;
-text: .text%__1cNStubGeneratorYgenerate_get_previous_fp6M_pC_: stubGenerator_i486.o;
-text: .text%__1cNStubGeneratorUgenerate_d2i_wrapper6MpC_1_: stubGenerator_i486.o;
+text: .text%__1cNStubGeneratorYgenerate_get_previous_fp6M_pC_: stubGenerator_x86.o;
+text: .text%__1cNStubGeneratorUgenerate_d2i_wrapper6MpC_1_: stubGenerator_x86.o;
text: .text%__1cOMacroAssemblerOpush_FPU_state6M_v_;
text: .text%__1cJAssemblerGfnsave6MnHAddress__v_;
text: .text%__1cJAssemblerFfwait6M_v_;
@@ -552,7 +552,7 @@ text: .text%__1cJAssemblerFffree6Mi_v_;
text: .text%__1cJAssemblerLemit_farith6Miii_v_;
text: .text%__1cOMacroAssemblerNpop_FPU_state6M_v_;
text: .text%__1cJAssemblerGfrstor6MnHAddress__v_;
-text: .text%__1cNStubGeneratorUcreate_control_words6M_v_: stubGenerator_i486.o;
+text: .text%__1cNStubGeneratorUcreate_control_words6M_v_: stubGenerator_x86.o;
text: .text%__1cJTraceTime2T6M_v_;
text: .text%__1cNcarSpace_init6F_v_;
text: .text%__1cICarSpaceEinit6F_v_;
@@ -773,7 +773,7 @@ text: .text%__1cUInterpreterGeneratorVgenerate_counter_incr6MpnFLabel_22_v_;
text: .text%__1cJAssemblerEaddl6MpnMRegisterImpl_2_v_;
text: .text%__1cJAssemblerEcmpl6MpnMRegisterImpl_nHAddress__v_;
text: .text%__1cbCAbstractInterpreterGeneratorXbang_stack_shadow_pages6Mi_v_;
-text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: interp_masm_i486.o;
+text: .text%__1cOMacroAssemblerWbang_stack_with_offset6Mi_v_: interp_masm_x86.o;
text: .text%__1cZInterpreterMacroAssemblerTnotify_method_entry6M_v_;
text: .text%__1cUInterpreterGeneratorZgenerate_counter_overflow6MpC_v_;
text: .text%__1cJAssemblerEnegl6MpnMRegisterImpl__v_;
@@ -785,7 +785,7 @@ text: .text%__1cJAssemblerDorl6MpnMRegisterImpl_nHAddress__v_;
text: .text%__1cUInterpreterGeneratorUgenerate_empty_entry6M_pC_;
text: .text%__1cUInterpreterGeneratorXgenerate_accessor_entry6M_pC_;
text: .text%__1cJAssemblerEshrl6MpnMRegisterImpl_i_v_;
-text: .text%__1cLlog2_intptr6Fi_i_: interpreter_i486.o;
+text: .text%__1cLlog2_intptr6Fi_i_: interpreter_x86.o;
text: .text%__1cOMacroAssemblerQload_signed_byte6MpnMRegisterImpl_nHAddress__i_;
text: .text%__1cJAssemblerGmovsxb6MpnMRegisterImpl_nHAddress__v_;
text: .text%__1cOMacroAssemblerQload_signed_word6MpnMRegisterImpl_nHAddress__i_;
@@ -982,7 +982,7 @@ text: .text%__1cNTemplateTableJfloat_cmp6Fii_v_;
text: .text%__1cOMacroAssemblerIfcmp2int6MpnMRegisterImpl_i_v_;
text: .text%__1cNTemplateTableKdouble_cmp6Fi_v_;
text: .text%__1cNTemplateTableHif_0cmp6Fn0AJCondition__v_;
-text: .text%__1cFj_not6FnNTemplateTableJCondition__nJAssemblerJCondition__: templateTable_i486.o;
+text: .text%__1cFj_not6FnNTemplateTableJCondition__nJAssemblerJCondition__: templateTable_x86.o;
text: .text%__1cNTemplateTableGbranch6Fii_v_;
text: .text%__1cZInterpreterMacroAssemblerUprofile_taken_branch6MpnMRegisterImpl_2_v_;
text: .text%__1cZInterpreterMacroAssemblerNdispatch_only6MnITosState__v_;
@@ -1488,7 +1488,7 @@ text: .text%__1cKSharedInfoLset_regName6F_v_;
text: .text%__1cIRegAllocYinit_register_allocation6F_v_;
text: .text%__1cIFrameMapEinit6F_v_;
text: .text%__1cKc1_RegMaskKinit_masks6Fi_v_;
-text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_FrameMap_i486.o;
+text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_FrameMap_x86.o;
text: .text%__1cNc1_AllocTableLinit_tables6F_v_;
text: .text%__1cIFrameMapOfirst_register6F_pnMRegisterImpl__;
text: .text%__1cIFrameMapLcpu_reg2rnr6FpnMRegisterImpl__i_;
@@ -1502,7 +1502,7 @@ text: .text%__1cKCodeBuffer2t6MiiiiiipnKBufferBlob_pnJrelocInfo_pnORelocateBuffe
text: .text%__1cKCodeBufferQalloc_relocation6MI_v_;
text: .text%__1cJOopMapSet2t6M_v_;
text: .text%__1cJAssemblerEsubl6MnHAddress_i_v_;
-text: .text%__1cTsave_live_registers6FpnOMacroAssembler_i_pnGOopMap__: c1_Runtime1_i486.o;
+text: .text%__1cTsave_live_registers6FpnOMacroAssembler_i_pnGOopMap__: c1_Runtime1_x86.o;
text: .text%__1cJAssemblerGfldenv6MnHAddress__v_;
text: .text%__1cGOopMap2t6Mii_v_;
text: .text%__1cGOopMapQset_callee_saved6MnHOptoRegEName_ii2_v_;
@@ -1564,10 +1564,10 @@ text: .text%__1cNStubAssemblerHcall_RT6MpnMRegisterImpl_2pCi_i_;
text: .text%__1cJStubFrame2T6M_v_;
text: .text%__1cIRuntime1Ygenerate_exception_throw6FpnNStubAssembler_pCpnMRegisterImpl__pnJOopMapSet__;
text: .text%__1cOMacroAssemblerLtlab_refill6MrnFLabel_22_v_;
-text: .text%__1cLlog2_intptr6Fi_i_: assembler_i486.o;
+text: .text%__1cLlog2_intptr6Fi_i_: assembler_x86.o;
text: .text%__1cOMacroAssemblerNeden_allocate6MpnMRegisterImpl_2i2rnFLabel__v_;
text: .text%__1cOMacroAssemblerLverify_tlab6M_v_;
-text: .text%__1cLlog2_intptr6Fi_i_: c1_Runtime1_i486.o;
+text: .text%__1cLlog2_intptr6Fi_i_: c1_Runtime1_x86.o;
text: .text%__1cOMacroAssemblerNtlab_allocate6MpnMRegisterImpl_2i22rnFLabel__v_;
text: .text%__1cRC1_MacroAssemblerRinitialize_object6MpnMRegisterImpl_22i22_v_;
text: .text%__1cRC1_MacroAssemblerRinitialize_header6MpnMRegisterImpl_22_v_;
@@ -1581,7 +1581,7 @@ text: .text%__1cIiEntries2t6Miiii_v_;
text: .text%__1cRNativeGeneralJumpQjump_destination6kM_pC_;
text: .text%__1cJAssemblerOlocate_operand6FpCn0AMWhichOperand__1_;
text: .text%__1cIRuntime1Rgenerate_patching6FpnNStubAssembler_pC_pnJOopMapSet__;
-text: .text%__1cWrestore_live_registers6FpnOMacroAssembler__v_: c1_Runtime1_i486.o;
+text: .text%__1cWrestore_live_registers6FpnOMacroAssembler__v_: c1_Runtime1_x86.o;
text: .text%__1cNSafepointBlobGcreate6FpnKCodeBuffer_pnJOopMapSet_i_p0_;
text: .text%__1cNSafepointBlob2n6FII_pv_;
text: .text%__1cNSafepointBlob2t6MpnKCodeBuffer_ipnJOopMapSet_i_v_;
@@ -1778,8 +1778,8 @@ text: .text%__1cYsun_reflect_ConstantPoolPcompute_offsets6F_v_;
text: .text%__1cZsun_misc_AtomicLongCSImplPcompute_offsets6F_v_;
text: .text%__1cSstubRoutines_init26F_v_;
text: .text%__1cMStubRoutinesLinitialize26F_v_;
-text: .text%__1cNStubGeneratorYgenerate_throw_exception6MpkcpCi_3_: stubGenerator_i486.o;
-text: .text%__1cNStubGeneratorTgenerate_verify_oop6M_pC_: stubGenerator_i486.o;
+text: .text%__1cNStubGeneratorYgenerate_throw_exception6MpkcpCi_3_: stubGenerator_x86.o;
+text: .text%__1cNStubGeneratorTgenerate_verify_oop6M_pC_: stubGenerator_x86.o;
text: .text%__1cJAssemblerEincl6MnHAddress__v_;
text: .text%__1cHThreadsDadd6FpnKJavaThread_i_v_;
text: .text%__1cNThreadServiceKadd_thread6FpnKJavaThread_i_v_;
@@ -3074,11 +3074,11 @@ text: .text%__1cEItemRget_jint_constant6kM_i_;
text: .text%__1cLLIR_EmitterRarithmetic_op_int6MnJBytecodesECode_pnLLIR_OprDesc_44nFRInfo__v_;
text: .text%__1cLLIR_EmitterNarithmetic_op6MnJBytecodesECode_pnLLIR_OprDesc_44inFRInfo_pnMCodeEmitInfo__v_;
text: .text%__1cLLIR_EmitterYstrength_reduce_multiply6MpnLLIR_OprDesc_i22_i_;
-text: .text%__1cILIR_ListHreg2reg6MnFRInfo_1nJBasicType__v_: c1_LIREmitter_i486.o;
-text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_LIREmitter_i486.o;
-text: .text%__1cLlog2_intptr6Fi_i_: c1_LIREmitter_i486.o;
+text: .text%__1cILIR_ListHreg2reg6MnFRInfo_1nJBasicType__v_: c1_LIREmitter_x86.o;
+text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_LIREmitter_x86.o;
+text: .text%__1cLlog2_intptr6Fi_i_: c1_LIREmitter_x86.o;
text: .text%__1cILIR_ListKshift_left6MpnLLIR_OprDesc_222_v_;
-text: .text%__1cILIR_ListDsub6MpnLLIR_OprDesc_22pnMCodeEmitInfo__v_: c1_LIREmitter_i486.o;
+text: .text%__1cILIR_ListDsub6MpnLLIR_OprDesc_22pnMCodeEmitInfo__v_: c1_LIREmitter_x86.o;
text: .text%__1cIValueGenWcan_inline_as_constant6MpnEItem__i_;
text: .text%__1cIRegAllocPget_register_rc6kMnFRInfo__i_;
text: .text%__1cLGetRefCountGdo_cpu6Mi_v_: c1_RegAlloc.o;
@@ -3098,7 +3098,7 @@ text: .text%__1cMLIR_OpBranch2t6Mn0ANLIR_Condition_pnICodeStub_pnMCodeEmitInfo__
text: .text%__1cLLIR_EmitterMindexed_load6MnFRInfo_nJBasicType_pnLLIR_OprDesc_4pnMCodeEmitInfo__v_;
text: .text%__1cLLIR_EmitterNarray_address6MpnLLIR_OprDesc_2inJBasicType__pnLLIR_Address__;
text: .text%__1cLLIR_AddressFscale6FnJBasicType__n0AFScale__;
-text: .text%__1cILIR_ListEmove6MpnLLIR_Address_pnLLIR_OprDesc_pnMCodeEmitInfo__v_: c1_LIREmitter_i486.o;
+text: .text%__1cILIR_ListEmove6MpnLLIR_Address_pnLLIR_OprDesc_pnMCodeEmitInfo__v_: c1_LIREmitter_x86.o;
text: .text%__1cIRegAllocNoops_in_spill6kM_pnIintStack__;
text: .text%__1cIRegAllocRoops_in_registers6kM_pnPRInfoCollection__;
text: .text%__1cIValueGenbDsafepoint_poll_needs_register6F_i_;
@@ -3137,9 +3137,9 @@ text: .text%__1cHLIR_Op1Fvisit6MpnQLIR_OpVisitState__v_;
text: .text%__1cPRegisterManagerElock6MnFRInfo__v_;
text: .text%__1cHLIR_Op2Fvisit6MpnQLIR_OpVisitState__v_;
text: .text%__1cMLIR_OpBranchFvisit6MpnQLIR_OpVisitState__v_;
-text: .text%__1cORangeCheckStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_i486.o;
-text: .text%__1cQLIR_OpVisitStateGappend6MnFRInfo__v_: c1_CodeStubs_i486.o;
-text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CodeStubs_i486.o;
+text: .text%__1cORangeCheckStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
+text: .text%__1cQLIR_OpVisitStateGappend6MnFRInfo__v_: c1_CodeStubs_x86.o;
+text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CodeStubs_x86.o;
text: .text%__1cNc1_AllocTableFmerge6Mp0_v_;
text: .text%__1cGLIR_OpFvisit6MpnQLIR_OpVisitState__v_;
text: .text%__1cQLIR_LocalCachingXcache_locals_for_blocks6MpnJBlockList_pnPRegisterManager_i_pnMLocalMapping__;
@@ -3201,7 +3201,7 @@ text: .text%__1cJLabelListIindex_of6kMkpnFLabel__i_: c1_LIROptimizer.o;
text: .text%__1cRLIR_PeepholeStateYset_disable_optimization6Mi_v_;
text: .text%__1cLLIR_OpLabelJemit_code6MpnVLIR_AbstractAssembler__v_;
text: .text%__1cNLIR_OptimizerMemit_opLabel6MpnLLIR_OpLabel__v_;
-text: .text%__1cNLIR_OptimizerFvisit6M_v_: c1_LIROptimizer_i486.o;
+text: .text%__1cNLIR_OptimizerFvisit6M_v_: c1_LIROptimizer_x86.o;
text: .text%__1cHLIR_Op0Jemit_code6MpnVLIR_AbstractAssembler__v_;
text: .text%__1cNLIR_OptimizerIemit_op06MpnHLIR_Op0__v_;
text: .text%__1cHLIR_Op2Jemit_code6MpnVLIR_AbstractAssembler__v_;
@@ -3225,7 +3225,7 @@ text: .text%__1cNLIR_OptimizerRreplace_stack_opr6MpnLLIR_OprDesc__2_;
text: .text%__1cNLIR_OptimizerNoptimize_move6MpnHLIR_Op1_rpnLLIR_OprDesc_5_i_;
text: .text%__1cRLIR_PeepholeStatebFequivalent_register_or_constant6MpnLLIR_OprDesc__2_;
text: .text%__1cRLIR_PeepholeStateOequivalent_opr6MpnLLIR_OprDesc__2_;
-text: .text%__1cNLIR_OptimizerKmaybe_opto6MpnLLIR_OprDesc_2_2_: c1_LIROptimizer_i486.o;
+text: .text%__1cNLIR_OptimizerKmaybe_opto6MpnLLIR_OprDesc_2_2_: c1_LIROptimizer_x86.o;
text: .text%__1cNLIR_OptimizerMis_cache_reg6MpnLLIR_OprDesc__i_;
text: .text%__1cMLocalMappingMis_cache_reg6kMpnLLIR_OprDesc__i_;
text: .text%__1cMLocalMappingMis_cache_reg6kMnFRInfo__i_;
@@ -3294,13 +3294,13 @@ text: .text%__1cNLIR_AssemblerVsetup_locals_at_entry6M_v_;
text: .text%__1cIFrameMapYsignature_type_array_for6FpknIciMethod__pnNBasicTypeList__;
text: .text%__1cIFrameMapScalling_convention6FpknIciMethod_pnIintArray__pnRCallingConvention__;
text: .text%__1cIFrameMapScalling_convention6FirknOBasicTypeArray_pnIintArray__pnRCallingConvention__;
-text: .text%__1cIintArray2t6Mki1_v_: c1_FrameMap_i486.o;
+text: .text%__1cIintArray2t6Mki1_v_: c1_FrameMap_x86.o;
text: .text%__1cIFrameMapRname_for_argument6Fi_i_;
text: .text%__1cIFrameMapSfp_offset_for_name6kMiii_i_;
text: .text%__1cIFrameMapPnum_local_names6kM_i_;
text: .text%__1cIFrameMapNlocal_to_slot6kMii_i_;
text: .text%__1cIFrameMapSfp_offset_for_slot6kMi_i_;
-text: .text%__1cQArgumentLocation2t6Mci_v_: c1_FrameMap_i486.o;
+text: .text%__1cQArgumentLocation2t6Mci_v_: c1_FrameMap_x86.o;
text: .text%__1cQArgumentLocationSset_stack_location6Mi_v_;
text: .text%__1cIFrameMapQaddress_for_name6kMiii_nHAddress__;
text: .text%__1cIFrameMapQmake_new_address6kMi_nHAddress__;
@@ -3321,12 +3321,12 @@ text: .text%__1cNLIR_AssemblerbIadd_debug_info_for_null_check_here6MpnMCodeEmitI
text: .text%__1cNLIR_AssemblerLcode_offset6kM_i_;
text: .text%__1cNLIR_AssemblerbDadd_debug_info_for_null_check6MipnMCodeEmitInfo__v_;
text: .text%__1cNLIR_AssemblerOemit_code_stub6MpnICodeStub__v_;
-text: .text%__1cVImplicitNullCheckStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o;
+text: .text%__1cVImplicitNullCheckStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
text: .text%__1cNLIR_AssemblerCpc6kM_pC_;
-text: .text%__1cICodeStubLset_code_pc6MpC_v_: c1_CodeStubs_i486.o;
-text: .text%__1cICodeStubMis_call_stub6kM_i_: c1_CodeStubs_i486.o;
+text: .text%__1cICodeStubLset_code_pc6MpC_v_: c1_CodeStubs_x86.o;
+text: .text%__1cICodeStubMis_call_stub6kM_i_: c1_CodeStubs_x86.o;
text: .text%__1cNCodeStubArrayIindex_of6kMkpnICodeStub__i_: c1_LIRAssembler.o;
-text: .text%__1cORangeCheckStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o;
+text: .text%__1cORangeCheckStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
text: .text%__1cNLIR_AssemblerOsafepoint_poll6MnFRInfo_pnMCodeEmitInfo__v_;
text: .text%__1cNLIR_AssemblerZadd_debug_info_for_branch6MpnMCodeEmitInfo__v_;
text: .text%__1cPpoll_RelocationEtype6M_nJrelocInfoJrelocType__: codeBlob.o;
@@ -3396,7 +3396,7 @@ text: .text%__1cNLIR_AssemblerWemit_exception_handler6M_i_;
text: .text%__1cRC1_MacroAssemblerRexception_handler6Mii_v_;
text: .text%__1cNLIR_AssemblerPemit_call_stubs6M_v_;
text: .text%__1cNLIR_AssemblerbCmaybe_adjust_stack_alignment6MpnIciMethod__v_;
-text: .text%__1cKreal_index6FpnIFrameMap_i_i_: c1_LIRAssembler_i486.o;
+text: .text%__1cKreal_index6FpnIFrameMap_i_i_: c1_LIRAssembler_x86.o;
text: .text%__1cLCompilationbEgenerate_exception_range_table6M_v_;
text: .text%__1cOExceptionScopeGequals6kMp0_i_;
text: .text%__1cLCompilationbBadd_exception_range_entries6MiipnOExceptionScope_ip2pi_v_;
@@ -3582,10 +3582,10 @@ text: .text%__1cLNewInstanceOas_NewInstance6M_p0_: c1_Instruction.o;
text: .text%__1cIValueGenQexceptionPcRInfo6F_nFRInfo__;
text: .text%__1cILIR_ListPthrow_exception6MnFRInfo_1pnMCodeEmitInfo__v_: c1_CodeGenerator.o;
text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CodeGenerator.o;
-text: .text%__1cPNewInstanceStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_i486.o;
+text: .text%__1cPNewInstanceStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
text: .text%__1cOLIR_OpJavaCallFvisit6MpnQLIR_OpVisitState__v_;
text: .text%__1cQLIR_OpVisitStateGappend6MnFRInfo__v_: c1_LIR.o;
-text: .text%__1cOStaticCallStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_i486.o;
+text: .text%__1cOStaticCallStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
text: .text%__1cIFrameMapWcaller_save_cpu_reg_at6Fi_pnLLIR_OprDesc__;
text: .text%__1cLInstructionLas_NewArray6M_pnINewArray__: c1_Instruction.o;
text: .text%__1cIVoidTypeDtag6kM_nIValueTag__: c1_ValueType.o;
@@ -3604,12 +3604,12 @@ text: .text%__1cOoop_RelocationJpack_data6M_i_;
text: .text%__1cNLIR_AssemblerPpatching_epilog6MpnMPatchingStub_nHLIR_Op1NLIR_PatchCode_pnMRegisterImpl_pnMCodeEmitInfo__v_;
text: .text%__1cMPatchingStubHinstall6MpnOMacroAssembler_nHLIR_Op1NLIR_PatchCode_pnMRegisterImpl_pnMCodeEmitInfo__v_: c1_LIRAssembler.o;
text: .text%__1cNLIR_AssemblerUappend_patching_stub6MpnMPatchingStub__v_;
-text: .text%__1cPNewInstanceStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o;
+text: .text%__1cPNewInstanceStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
text: .text%__1cNLIR_AssemblerJemit_call6MpnOLIR_OpJavaCall__v_;
text: .text%__1cNLIR_AssemblerKalign_call6MnILIR_Code__v_;
-text: .text%__1cICodeStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o;
-text: .text%__1cOStaticCallStubLset_code_pc6MpC_v_: c1_CodeStubs_i486.o;
-text: .text%__1cOStaticCallStubMis_call_stub6kM_i_: c1_CodeStubs_i486.o;
+text: .text%__1cICodeStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
+text: .text%__1cOStaticCallStubLset_code_pc6MpC_v_: c1_CodeStubs_x86.o;
+text: .text%__1cOStaticCallStubMis_call_stub6kM_i_: c1_CodeStubs_x86.o;
text: .text%__1cNLIR_AssemblerEcall6MpCnJrelocInfoJrelocType_pnMCodeEmitInfo__v_;
text: .text%__1cbBopt_virtual_call_RelocationEtype6M_nJrelocInfoJrelocType__: relocInfo.o;
text: .text%__1cKRelocationJpack_data6M_i_: relocInfo.o;
@@ -4010,15 +4010,15 @@ text: .text%__1cJTypeCheckPinput_values_do6MpFppnLInstruction__v_v_: c1_GraphBui
text: .text%__1cQNullCheckVisitorNdo_InstanceOf6MpnKInstanceOf__v_;
text: .text%__1cQNullCheckVisitorMdo_CheckCast6MpnJCheckCast__v_;
text: .text%__1cIValueGenNdo_InstanceOf6MpnKInstanceOf__v_;
-text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CodeGenerator_i486.o;
+text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_CodeGenerator_x86.o;
text: .text%__1cLLIR_EmitterNinstanceof_op6MpnLLIR_OprDesc_2pnHciKlass_nFRInfo_5ipnMCodeEmitInfo__v_;
text: .text%__1cILIR_ListKinstanceof6MpnLLIR_OprDesc_2pnHciKlass_22ipnMCodeEmitInfo__v_;
text: .text%__1cPLIR_OpTypeCheck2t6MnILIR_Code_pnLLIR_OprDesc_3pnHciKlass_33ipnMCodeEmitInfo_7pnICodeStub__v_;
text: .text%__1cIValueGenMdo_CheckCast6MpnJCheckCast__v_;
text: .text%__1cILIR_ListJcheckcast6MpnLLIR_OprDesc_2pnHciKlass_22ipnMCodeEmitInfo_6pnICodeStub__v_;
-text: .text%__1cILIR_ListJsafepoint6MnFRInfo_pnMCodeEmitInfo__v_: c1_CodeGenerator_i486.o;
+text: .text%__1cILIR_ListJsafepoint6MnFRInfo_pnMCodeEmitInfo__v_: c1_CodeGenerator_x86.o;
text: .text%__1cPLIR_OpTypeCheckFvisit6MpnQLIR_OpVisitState__v_;
-text: .text%__1cTSimpleExceptionStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_i486.o;
+text: .text%__1cTSimpleExceptionStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
text: .text%__1cPLIR_OpTypeCheckJemit_code6MpnVLIR_AbstractAssembler__v_;
text: .text%__1cNLIR_OptimizerQemit_opTypeCheck6MpnPLIR_OpTypeCheck__v_;
text: .text%__1cLLIR_OprDescIsize_for6FnJBasicType__n0AHOprSize__: c1_LIROptimizer.o;
@@ -4026,7 +4026,7 @@ text: .text%__1cIintArrayIindex_of6kMki_i_: c1_LIROptimizer.o;
text: .text%__1cNLIR_AssemblerQemit_opTypeCheck6MpnPLIR_OpTypeCheck__v_;
text: .text%__1cIciObjectIencoding6M_pnI_jobject__;
text: .text%__1cJAssemblerEcmpl6MnHAddress_pnI_jobject__v_;
-text: .text%__1cTSimpleExceptionStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o;
+text: .text%__1cTSimpleExceptionStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
text: .text%__1cTSimpleExceptionStubJemit_code6MpnNLIR_Assembler__v_;
text: .text%__1cJLoadFieldIis_equal6kMpnLInstruction__i_: c1_Instruction.o;
text: .text%__1cJLoadFieldMas_LoadField6M_p0_: c1_Instruction.o;
@@ -4194,7 +4194,7 @@ text: .text%__1cLLIR_EmitterOnew_type_array6MnFRInfo_nJBasicType_pnLLIR_OprDesc_
text: .text%__1cQNewTypeArrayStub2t6MnFRInfo_11pnMCodeEmitInfo__v_;
text: .text%__1cQciTypeArrayKlassEmake6FnJBasicType__p0_;
text: .text%__1cQciTypeArrayKlassJmake_impl6FnJBasicType__p0_;
-text: .text%__1cILIR_ListHoop2reg6MpnI_jobject_nFRInfo__v_: c1_LIREmitter_i486.o;
+text: .text%__1cILIR_ListHoop2reg6MpnI_jobject_nFRInfo__v_: c1_LIREmitter_x86.o;
text: .text%__1cILIR_ListOallocate_array6MnFRInfo_11111nJBasicType_1pnICodeStub__v_;
text: .text%__1cIValueGenMdo_Intrinsic6MpnJIntrinsic__v_;
text: .text%__1cIValueGenMdo_ArrayCopy6MpnJIntrinsic__v_;
@@ -4209,12 +4209,12 @@ text: .text%__1cLInstructionNdeclared_type6kM_pnGciType__: c1_Instruction.o;
text: .text%__1cRpositive_constant6FpnLInstruction__i_: c1_CodeGenerator.o;
text: .text%__1cLArrayLengthOas_ArrayLength6M_p0_: c1_GraphBuilder.o;
text: .text%__1cQis_constant_zero6FpnLInstruction__i_: c1_CodeGenerator.o;
-text: .text%__1cILIR_ListJarraycopy6MpnLLIR_OprDesc_22222pnMciArrayKlass_ipnMCodeEmitInfo__v_: c1_CodeGenerator_i486.o;
+text: .text%__1cILIR_ListJarraycopy6MpnLLIR_OprDesc_22222pnMciArrayKlass_ipnMCodeEmitInfo__v_: c1_CodeGenerator_x86.o;
text: .text%__1cLLIR_EmitterNwrite_barrier6MpnLLIR_OprDesc_2_v_;
-text: .text%__1cILIR_ListUunsigned_shift_right6MnFRInfo_i1_v_: c1_LIREmitter_i486.o;
+text: .text%__1cILIR_ListUunsigned_shift_right6MnFRInfo_i1_v_: c1_LIREmitter_x86.o;
text: .text%__1cILIR_ListUunsigned_shift_right6MpnLLIR_OprDesc_222_v_;
text: .text%__1cQLIR_OpAllocArrayFvisit6MpnQLIR_OpVisitState__v_;
-text: .text%__1cQNewTypeArrayStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_i486.o;
+text: .text%__1cQNewTypeArrayStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
text: .text%__1cPLIR_OpArrayCopyFvisit6MpnQLIR_OpVisitState__v_;
text: .text%__1cQLIR_OpAllocArrayJemit_code6MpnVLIR_AbstractAssembler__v_;
text: .text%__1cNLIR_OptimizerQemit_alloc_array6MpnQLIR_OpAllocArray__v_;
@@ -4229,12 +4229,12 @@ text: .text%__1cNLIR_AssemblerQemit_alloc_array6MpnQLIR_OpAllocArray__v_;
text: .text%__1cNLIR_AssemblerSarray_element_size6kMnJBasicType__nHAddressLScaleFactor__;
text: .text%__1cRC1_MacroAssemblerOallocate_array6MpnMRegisterImpl_222inHAddressLScaleFactor_2rnFLabel__v_;
text: .text%__1cRC1_MacroAssemblerMtry_allocate6MpnMRegisterImpl_2i22rnFLabel__v_;
-text: .text%__1cQNewTypeArrayStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o;
+text: .text%__1cQNewTypeArrayStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
text: .text%__1cNLIR_AssemblerOemit_arraycopy6MpnPLIR_OpArrayCopy__v_;
text: .text%__1cMciArrayKlassMelement_type6M_pnGciType__;
text: .text%__1cNArrayCopyStub2t6MpnMCodeEmitInfo_pnOStaticCallStub__v_;
text: .text%__1cFRInfoMset_word_reg6MkpnMRegisterImpl__v_;
-text: .text%__1cNArrayCopyStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o;
+text: .text%__1cNArrayCopyStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
text: .text%__1cNLIR_AssemblerOpush_parameter6MpnMRegisterImpl_i_v_;
text: .text%__1cQNewTypeArrayStubJemit_code6MpnNLIR_Assembler__v_;
text: .text%__1cNArrayCopyStubJemit_code6MpnNLIR_Assembler__v_;
@@ -4295,14 +4295,14 @@ text: .text%__1cLLIR_EmitterIshift_op6MnJBytecodesECode_nFRInfo_pnLLIR_OprDesc_5
text: .text%__1cILIR_ListKshift_left6MnFRInfo_i1_v_: c1_LIREmitter.o;
text: .text%__1cILIR_ListKlogical_or6MnFRInfo_pnLLIR_OprDesc_1_v_: c1_LIREmitter.o;
text: .text%__1cOLIR_OpAllocObjFvisit6MpnQLIR_OpVisitState__v_;
-text: .text%__1cSNewObjectArrayStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_i486.o;
+text: .text%__1cSNewObjectArrayStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
text: .text%__1cOLIR_OpAllocObjJemit_code6MpnVLIR_AbstractAssembler__v_;
text: .text%__1cNLIR_OptimizerOemit_alloc_obj6MpnOLIR_OpAllocObj__v_;
text: .text%__1cNLIR_AssemblerOemit_alloc_obj6MpnOLIR_OpAllocObj__v_;
text: .text%__1cRC1_MacroAssemblerPallocate_object6MpnMRegisterImpl_22ii2rnFLabel__v_;
text: .text%__1cNLIR_AssemblerOmembar_release6M_v_;
text: .text%__1cNLIR_AssemblerGmembar6M_v_;
-text: .text%__1cSNewObjectArrayStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o;
+text: .text%__1cSNewObjectArrayStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
text: .text%__1cNLIR_AssemblerOmembar_acquire6M_v_;
text: .text%__1cEBaseHas_Base6M_p0_: c1_IR.o;
text: .text%__1cNLIR_AssemblerOemit_osr_entry6MpnHIRScope_ipnFLabel_i_v_;
@@ -4708,11 +4708,11 @@ text: .text%__1cILIR_ListLshift_right6MpnLLIR_OprDesc_222_v_;
text: .text%__1cIValueGenLdo_NegateOp6MpnINegateOp__v_;
text: .text%__1cLLIR_EmitterGnegate6MnFRInfo_pnLLIR_OprDesc__v_;
text: .text%__1cILIR_ListGnegate6MnFRInfo_1_v_: c1_LIREmitter.o;
-text: .text%__1cXArrayStoreExceptionStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_i486.o;
-text: .text%__1cXArrayStoreExceptionStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o;
+text: .text%__1cXArrayStoreExceptionStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
+text: .text%__1cXArrayStoreExceptionStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
text: .text%__1cNLIR_AssemblerEleal6MpnLLIR_OprDesc_2_v_;
text: .text%__1cNLIR_AssemblerGnegate6MpnLLIR_OprDesc_2_v_;
-text: .text%__1cNCodeStubArrayIindex_of6kMkpnICodeStub__i_: c1_LIRAssembler_i486.o;
+text: .text%__1cNCodeStubArrayIindex_of6kMkpnICodeStub__i_: c1_LIRAssembler_x86.o;
text: .text%__1cXArrayStoreExceptionStubJemit_code6MpnNLIR_Assembler__v_;
text: .text%__1cIRuntime1Tresolve_static_call6FpnKJavaThread_pnHoopDesc__pC_;
text: .text%__1cSCompiledStaticCallNcompute_entry6FnMmethodHandle_rnOStaticCallInfo__v_;
@@ -4788,7 +4788,7 @@ text: .text%__1cNLIR_AssemblerIfpu_push6MnFRInfo__v_;
text: .text%__1cIFrameMapLFpuStackSimEpush6Mi_v_;
text: .text%__1cNLIR_AssemblerKfpu_on_tos6MnFRInfo__v_;
text: .text%__1cIFrameMapLFpuStackSimPoffset_from_tos6kMi_i_;
-text: .text%__1cIintArrayIindex_of6kMki_i_: c1_FrameMap_i486.o;
+text: .text%__1cIintArrayIindex_of6kMki_i_: c1_FrameMap_x86.o;
text: .text%__1cNLIR_AssemblerHfpu_pop6MnFRInfo__v_;
text: .text%__1cIFrameMapLFpuStackSimDpop6Mi_i_;
text: .text%__1cNLIR_AssemblerKround32_op6MpnLLIR_OprDesc_2_v_;
@@ -4797,7 +4797,7 @@ text: .text%__1cNLIR_AssemblerJreset_FPU6M_v_;
text: .text%__1cNLIR_AssemblerIemit_op36MpnHLIR_Op3__v_;
text: .text%__1cNLIR_AssemblerParithmetic_idiv6MnILIR_Code_pnLLIR_OprDesc_333pnMCodeEmitInfo__v_;
text: .text%__1cNLIR_AssemblerXadd_debug_info_for_div06MipnMCodeEmitInfo__v_;
-text: .text%__1cNDivByZeroStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o;
+text: .text%__1cNDivByZeroStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
text: .text%__1cNDivByZeroStubJemit_code6MpnNLIR_Assembler__v_;
text: .text%__1cIciObjectSis_obj_array_klass6M_i_: ciTypeArrayKlass.o;
text: .text%__1cLInstructionOas_ArrayLength6M_pnLArrayLength__: c1_GraphBuilder.o;
@@ -4874,12 +4874,12 @@ text: .text%__1cFKlassQup_cast_abstract6M_p0_;
text: .text%__1cRComputeEntryStackHdo_byte6M_v_: generateOopMap.o;
text: .text%__1cNSharedRuntimeDd2i6Fd_i_;
text: .text%__1cSInterpreterRuntimeWslow_signature_handler6FpnKJavaThread_pnNmethodOopDesc_pi5_pC_;
-text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: interpreterRT_i486.o;
-text: .text%__1cUSlowSignatureHandlerLpass_object6M_v_: interpreterRT_i486.o;
-text: .text%__1cXNativeSignatureIteratorIdo_array6Mii_v_: interpreterRT_i486.o;
-text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: interpreterRT_i486.o;
-text: .text%__1cUSlowSignatureHandlerIpass_int6M_v_: interpreterRT_i486.o;
-text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: interpreterRT_i486.o;
+text: .text%__1cXNativeSignatureIteratorJdo_object6Mii_v_: interpreterRT_x86.o;
+text: .text%__1cUSlowSignatureHandlerLpass_object6M_v_: interpreterRT_x86.o;
+text: .text%__1cXNativeSignatureIteratorIdo_array6Mii_v_: interpreterRT_x86.o;
+text: .text%__1cXNativeSignatureIteratorGdo_int6M_v_: interpreterRT_x86.o;
+text: .text%__1cUSlowSignatureHandlerIpass_int6M_v_: interpreterRT_x86.o;
+text: .text%__1cXNativeSignatureIteratorHdo_bool6M_v_: interpreterRT_x86.o;
text: .text%jni_GetFloatArrayRegion: jni.o;
text: .text%jni_GetCharArrayRegion: jni.o;
text: .text%jni_SetFloatField: jni.o;
@@ -4906,8 +4906,8 @@ text: .text%__1cLLIR_EmitterQreturn_op_prolog6Mi_v_;
text: .text%__1cLLIR_EmitterMmonitor_exit6MnFRInfo_11i_v_;
text: .text%__1cILIR_ListNunlock_object6MnFRInfo_11pnICodeStub__v_;
text: .text%__1cKLIR_OpLockFvisit6MpnQLIR_OpVisitState__v_;
-text: .text%__1cQMonitorEnterStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_i486.o;
-text: .text%__1cRMonitorAccessStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_i486.o;
+text: .text%__1cQMonitorEnterStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
+text: .text%__1cRMonitorAccessStubFvisit6MpnQLIR_OpVisitState__v_: c1_CodeStubs_x86.o;
text: .text%__1cKLIR_OpLockJemit_code6MpnVLIR_AbstractAssembler__v_;
text: .text%__1cNLIR_OptimizerJemit_lock6MpnKLIR_OpLock__v_;
text: .text%__1cNLIR_AssemblerPmonitor_address6MinFRInfo__v_;
@@ -4915,7 +4915,7 @@ text: .text%__1cIFrameMapbEaddress_for_monitor_lock_index6kMi_nHAddress__;
text: .text%__1cIFrameMapbAfp_offset_for_monitor_lock6kMi_i_;
text: .text%__1cNLIR_AssemblerJemit_lock6MpnKLIR_OpLock__v_;
text: .text%__1cRC1_MacroAssemblerLlock_object6MpnMRegisterImpl_22rnFLabel__v_;
-text: .text%__1cQMonitorEnterStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_i486.o;
+text: .text%__1cQMonitorEnterStubEinfo6kM_pnMCodeEmitInfo__: c1_CodeStubs_x86.o;
text: .text%__1cIFrameMapWmonitor_object_regname6kMi_nHOptoRegEName__;
text: .text%__1cIFrameMapbCfp_offset_for_monitor_object6kMi_i_;
text: .text%__1cMCodeEmitInfobHlocation_for_monitor_object_index6Mi_nILocation__;
@@ -4925,7 +4925,7 @@ text: .text%__1cIFrameMapbFlocation_for_monitor_lock_index6kMipnILocation__i_;
text: .text%__1cMMonitorValue2t6MpnKScopeValue_nILocation__v_;
text: .text%__1cMMonitorValueIwrite_on6MpnUDebugInfoWriteStream__v_;
text: .text%__1cRC1_MacroAssemblerNunlock_object6MpnMRegisterImpl_22rnFLabel__v_;
-text: .text%__1cPMonitorExitStubMis_call_stub6kM_i_: c1_CodeStubs_i486.o;
+text: .text%__1cPMonitorExitStubMis_call_stub6kM_i_: c1_CodeStubs_x86.o;
text: .text%__1cQMonitorEnterStubJemit_code6MpnNLIR_Assembler__v_;
text: .text%__1cNLIR_AssemblerRload_receiver_reg6MpnMRegisterImpl__v_;
text: .text%__1cNLIR_AssemblerLmonitorexit6MnFRInfo_1pnMRegisterImpl_i3_v_;
@@ -5168,7 +5168,7 @@ text: .text%__1cFRInfoOas_register_lo6kM_pnMRegisterImpl__;
text: .text%__1cCosHrealloc6FpvI_1_;
text: .text%Unsafe_GetNativeFloat;
text: .text%__1cIValueGenQdo_currentThread6MpnJIntrinsic__v_;
-text: .text%__1cILIR_ListKget_thread6MnFRInfo__v_: c1_CodeGenerator_i486.o;
+text: .text%__1cILIR_ListKget_thread6MnFRInfo__v_: c1_CodeGenerator_x86.o;
text: .text%__1cNLIR_AssemblerKget_thread6MpnLLIR_OprDesc__v_;
text: .text%__1cIValueGenSload_item_patching6MpnHIRScope_ipnEItem_pnKValueStack_pnOExceptionScope__v_;
text: .text%__1cEItemUget_jobject_constant6kM_pnIciObject__;
@@ -5246,7 +5246,7 @@ text: .text%__1cGThreadLnmethods_do6M_v_;
text: .text%__1cFframeLnmethods_do6M_v_;
text: .text%__1cFframeVnmethods_code_blob_do6M_v_;
text: .text%__1cILIR_ListEidiv6MnFRInfo_i11pnMCodeEmitInfo__v_;
-text: .text%__1cLlog2_intptr6Fi_i_: c1_LIRAssembler_i486.o;
+text: .text%__1cLlog2_intptr6Fi_i_: c1_LIRAssembler_x86.o;
text: .text%__1cONMethodSweeperPprocess_nmethod6FpnHnmethod__v_;
text: .text%__1cHnmethodPis_locked_by_vm6kM_i_: nmethod.o;
text: .text%__1cHnmethodLis_unloaded6kM_i_: nmethod.o;
@@ -5423,13 +5423,13 @@ text: .text%__1cIValueGenPdo_UnsafeGetRaw6MpnMUnsafeGetRaw__v_;
text: .text%__1cLLIR_EmitterOget_raw_unsafe6MnFRInfo_pnLLIR_OprDesc_3inJBasicType__v_;
text: .text%__1cILIR_ListMload_mem_reg6MpnLLIR_Address_nFRInfo_nJBasicType_pnMCodeEmitInfo_nHLIR_Op1NLIR_PatchCode__v_;
text: .text%__1cIValueGenPdo_LookupSwitch6MpnMLookupSwitch__v_;
-text: .text%__1cUcreate_lookup_ranges6FpnMLookupSwitch__pnQLookupRangeArray__: c1_CodeGenerator_i486.o;
+text: .text%__1cUcreate_lookup_ranges6FpnMLookupSwitch__pnQLookupRangeArray__: c1_CodeGenerator_x86.o;
text: .text%__1cLLIR_EmitterVlookupswitch_range_op6MpnLLIR_OprDesc_iipnKBlockBegin__v_;
text: .text%__1cNSharedRuntimeEldiv6Fxx_x_;
text: .text%Unsafe_GetObjectVolatile;
text: .text%signalHandler;
text: .text%JVM_handle_solaris_signal;
-text: .text%__1cKJavaThreadUin_stack_yellow_zone6MpC_i_: os_solaris_i486.o;
+text: .text%__1cKJavaThreadUin_stack_yellow_zone6MpC_i_: os_solaris_x86.o;
text: .text%__1cICodeBlobRis_at_poll_return6MpC_i_;
text: .text%__1cUSafepointSynchronizebDhandle_polling_page_exception6FpnKJavaThread__pC_;
text: .text%__1cbCCompiledCodeSafepointHandlerbDhandle_polling_page_exception6M_pC_;
diff --git a/make/solaris/makefiles/sa.make b/make/solaris/makefiles/sa.make
index 6d700b234..f8c1bf416 100644
--- a/make/solaris/makefiles/sa.make
+++ b/make/solaris/makefiles/sa.make
@@ -37,8 +37,9 @@ GENERATED = ../generated
SA_CLASSPATH = $(BOOT_JAVA_HOME)/lib/tools.jar
# gnumake 3.78.1 does not accept the *s that
-# are in AGENT_ALLFILES, so use the shell to expand them
-AGENT_ALLFILES := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_ALLFILES))
+# are in AGENT_FILES1 and AGENT_FILES2, so use the shell to expand them
+AGENT_FILES1 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES1))
+AGENT_FILES2 := $(shell /usr/bin/test -d $(AGENT_DIR) && /bin/ls $(AGENT_FILES2))
SA_CLASSDIR = $(GENERATED)/saclasses
@@ -52,7 +53,7 @@ all:
$(MAKE) -f sa.make $(GENERATED)/sa-jdi.jar; \
fi
-$(GENERATED)/sa-jdi.jar: $(AGENT_ALLFILES)
+$(GENERATED)/sa-jdi.jar: $(AGENT_FILES1) $(AGENT_FILES2)
$(QUIETLY) echo "Making $@";
$(QUIETLY) if [ "$(BOOT_JAVA_HOME)" = "" ]; then \
echo "ALT_BOOTDIR, BOOTDIR or JAVA_HOME needs to be defined to build SA"; \
@@ -66,9 +67,17 @@ $(GENERATED)/sa-jdi.jar: $(AGENT_ALLFILES)
$(QUIETLY) if [ ! -d $(SA_CLASSDIR) ] ; then \
mkdir -p $(SA_CLASSDIR); \
fi
- $(QUIETLY) $(COMPILE.JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -g -d $(SA_CLASSDIR) $(AGENT_ALLFILES)
+ $(QUIETLY) $(COMPILE.JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES1)
+ $(QUIETLY) $(COMPILE.JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES2)
+
$(QUIETLY) $(COMPILE.RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
$(QUIETLY) echo "$(SA_BUILD_VERSION_PROP)" > $(SA_PROPERTIES)
+ $(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js
+ $(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql
+ $(QUIETLY) mkdir -p $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
+ $(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/*
+ $(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/
+ $(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR)/
$(QUIETLY) $(RUN.JAR) cf $@ -C $(SA_CLASSDIR)/ .
$(QUIETLY) $(RUN.JAR) uf $@ -C $(AGENT_SRC_DIR) META-INF/services/com.sun.jdi.connect.Connector
$(QUIETLY) $(RUN.JAVAH) -classpath $(SA_CLASSDIR) -d $(GENERATED) -jni sun.jvm.hotspot.debugger.proc.ProcDebuggerLocal
diff --git a/make/windows/makefiles/sa.make b/make/windows/makefiles/sa.make
index 481b5149d..57fd493f8 100644
--- a/make/windows/makefiles/sa.make
+++ b/make/windows/makefiles/sa.make
@@ -49,15 +49,22 @@ SA_PROPERTIES = $(SA_CLASSDIR)\sa.properties
default:: $(GENERATED)\sa-jdi.jar
-$(GENERATED)\sa-jdi.jar: $(AGENT_ALLFILES:/=\)
+$(GENERATED)\sa-jdi.jar: $(AGENT_FILES1:/=\) $(AGENT_FILES2:/=\)
@if not exist $(SA_CLASSDIR) mkdir $(SA_CLASSDIR)
@echo ...Building sa-jdi.jar
@echo ...$(COMPILE_JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -g -d $(SA_CLASSDIR) ....
- @$(COMPILE_JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -g -d $(SA_CLASSDIR) $(AGENT_ALLFILES:/=\)
+ @$(COMPILE_JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES1:/=\)
+ @$(COMPILE_JAVAC) -source 1.4 -classpath $(SA_CLASSPATH) -sourcepath $(AGENT_SRC_DIR) -g -d $(SA_CLASSDIR) $(AGENT_FILES2:/=\)
$(COMPILE_RMIC) -classpath $(SA_CLASSDIR) -d $(SA_CLASSDIR) sun.jvm.hotspot.debugger.remote.RemoteDebuggerServer
$(QUIETLY) echo $(SA_BUILD_VERSION_PROP) > $(SA_PROPERTIES)
$(RUN_JAR) cf $@ -C saclasses .
$(RUN_JAR) uf $@ -C $(AGENT_SRC_DIR:/=\) META-INF\services\com.sun.jdi.connect.Connector
+ $(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql/sa.js
+ $(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/utilities/soql/sa.js $(SA_CLASSDIR)/sun/jvm/hotspot/utilities/soql
+ $(QUIETLY) mkdir -p $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources
+ $(QUIETLY) rm -f $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/*
+ $(QUIETLY) cp $(AGENT_SRC_DIR)/sun/jvm/hotspot/ui/resources/*.png $(SA_CLASSDIR)/sun/jvm/hotspot/ui/resources/
+ $(QUIETLY) cp -r $(AGENT_SRC_DIR)/images/* $(SA_CLASSDIR)/
$(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.windbg.WindbgDebuggerLocal
$(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.x86.X86ThreadContext
$(RUN_JAVAH) -classpath $(SA_CLASSDIR) -jni sun.jvm.hotspot.debugger.ia64.IA64ThreadContext
diff --git a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp
index d5696d442..6d941c368 100644
--- a/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp
+++ b/src/cpu/sparc/vm/c1_LIRGenerator_sparc.cpp
@@ -956,7 +956,8 @@ void LIRGenerator::do_NewMultiArray(NewMultiArray* x) {
size->load_item();
store_stack_parameter (size->result(),
in_ByteSize(STACK_BIAS +
- (i + frame::memory_parameter_word_sp_offset) * wordSize));
+ frame::memory_parameter_word_sp_offset * wordSize +
+ i * sizeof(jint)));
}
// This instruction can be deoptimized in the slow path : use
diff --git a/src/cpu/sparc/vm/relocInfo_sparc.cpp b/src/cpu/sparc/vm/relocInfo_sparc.cpp
index 795e1831e..ab3655582 100644
--- a/src/cpu/sparc/vm/relocInfo_sparc.cpp
+++ b/src/cpu/sparc/vm/relocInfo_sparc.cpp
@@ -204,3 +204,9 @@ void Relocation::pd_swap_out_breakpoint(address x, short* instrs, int instrlen)
NativeInstruction* ni = nativeInstruction_at(x);
ni->set_long_at(0, u.l);
}
+
+void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
+}
+
+void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
+}
diff --git a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
index 3812e2eae..6fed65b3d 100644
--- a/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
+++ b/src/cpu/sparc/vm/sharedRuntime_sparc.cpp
@@ -465,9 +465,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
case T_LONG:
assert(sig_bt[i+1] == T_VOID, "expecting VOID in other half");
-#ifdef COMPILER2
#ifdef _LP64
- // Can't be tiered (yet)
if (int_reg < int_reg_max) {
Register r = is_outgoing ? as_oRegister(int_reg++) : as_iRegister(int_reg++);
regs[i].set2(r->as_VMReg());
@@ -476,11 +474,12 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
stk_reg_pairs += 2;
}
#else
+#ifdef COMPILER2
// For 32-bit build, can't pass longs in O-regs because they become
// I-regs and get trashed. Use G-regs instead. G1 and G4 are almost
// spare and available. This convention isn't used by the Sparc ABI or
// anywhere else. If we're tiered then we don't use G-regs because c1
- // can't deal with them as a "pair".
+ // can't deal with them as a "pair". (Tiered makes this code think g's are filled)
// G0: zero
// G1: 1st Long arg
// G2: global allocated to TLS
@@ -500,7 +499,6 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
regs[i].set2(VMRegImpl::stack2reg(stk_reg_pairs));
stk_reg_pairs += 2;
}
-#endif // _LP64
#else // COMPILER2
if (int_reg_pairs + 1 < int_reg_max) {
if (is_outgoing) {
@@ -514,6 +512,7 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
stk_reg_pairs += 2;
}
#endif // COMPILER2
+#endif // _LP64
break;
case T_FLOAT:
@@ -699,17 +698,16 @@ Register AdapterGenerator::next_arg_slot(const int st_off){
// Stores long into offset pointed to by base
void AdapterGenerator::store_c2i_long(Register r, Register base,
const int st_off, bool is_stack) {
-#ifdef COMPILER2
#ifdef _LP64
// In V9, longs are given 2 64-bit slots in the interpreter, but the
// data is passed in only 1 slot.
__ stx(r, base, next_arg_slot(st_off));
#else
+#ifdef COMPILER2
// Misaligned store of 64-bit data
__ stw(r, base, arg_slot(st_off)); // lo bits
__ srlx(r, 32, r);
__ stw(r, base, next_arg_slot(st_off)); // hi bits
-#endif // _LP64
#else
if (is_stack) {
// Misaligned store of 64-bit data
@@ -721,6 +719,7 @@ void AdapterGenerator::store_c2i_long(Register r, Register base,
__ stw(r , base, next_arg_slot(st_off)); // hi bits
}
#endif // COMPILER2
+#endif // _LP64
tag_c2i_arg(frame::TagCategory2, base, st_off, r);
}
@@ -1637,7 +1636,7 @@ static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
}
} else if (dst.is_single_phys_reg()) {
if (src.is_adjacent_aligned_on_stack(2)) {
- __ ld_long(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
+ __ ldx(FP, reg2offset(src.first()) + STACK_BIAS, dst.first()->as_Register());
} else {
// dst is a single reg.
// Remember lo is low address not msb for stack slots
@@ -1811,7 +1810,6 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler* masm,
VMRegPair *in_regs,
BasicType ret_type) {
-
// Native nmethod wrappers never take possesion of the oop arguments.
// So the caller will gc the arguments. The only thing we need an
// oopMap for is if the call is static
diff --git a/src/cpu/x86/vm/assembler_x86_64.cpp b/src/cpu/x86/vm/assembler_x86.cpp
index 431c233df..d8bc4948c 100644
--- a/src/cpu/x86/vm/assembler_x86_64.cpp
+++ b/src/cpu/x86/vm/assembler_x86.cpp
@@ -1,5 +1,5 @@
/*
- * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -23,7 +23,7 @@
*/
#include "incls/_precompiled.incl"
-#include "incls/_assembler_x86_64.cpp.incl"
+#include "incls/_assembler_x86.cpp.incl"
// Implementation of AddressLiteral
@@ -52,6 +52,10 @@ AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
case relocInfo::runtime_call_type:
_rspec = runtime_call_Relocation::spec();
break;
+ case relocInfo::poll_type:
+ case relocInfo::poll_return_type:
+ _rspec = Relocation::spec_simple(rtype);
+ break;
case relocInfo::none:
break;
default:
@@ -62,20 +66,13 @@ AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
// Implementation of Address
-Address Address::make_array(ArrayAddress adr) {
#ifdef _LP64
+
+Address Address::make_array(ArrayAddress adr) {
// Not implementable on 64bit machines
// Should have been handled higher up the call chain.
ShouldNotReachHere();
return Address();
-#else
- AddressLiteral base = adr.base();
- Address index = adr.index();
- assert(index._disp == 0, "must not have disp"); // maybe it can?
- Address array(index._base, index._index, index._scale, (intptr_t) base.target());
- array._rspec = base._rspec;
- return array;
-#endif // _LP64
}
// exceedingly dangerous constructor
@@ -95,12 +92,39 @@ Address::Address(int disp, address loc, relocInfo::relocType rtype) {
// HMM
_rspec = runtime_call_Relocation::spec();
break;
+ case relocInfo::poll_type:
+ case relocInfo::poll_return_type:
+ _rspec = Relocation::spec_simple(rtype);
+ break;
case relocInfo::none:
break;
default:
ShouldNotReachHere();
}
}
+#else // LP64
+
+Address Address::make_array(ArrayAddress adr) {
+ AddressLiteral base = adr.base();
+ Address index = adr.index();
+ assert(index._disp == 0, "must not have disp"); // maybe it can?
+ Address array(index._base, index._index, index._scale, (intptr_t) base.target());
+ array._rspec = base._rspec;
+ return array;
+}
+
+// exceedingly dangerous constructor
+Address::Address(address loc, RelocationHolder spec) {
+ _base = noreg;
+ _index = noreg;
+ _scale = no_scale;
+ _disp = (intptr_t) loc;
+ _rspec = spec;
+}
+
+#endif // _LP64
+
+
// Convert the raw encoding form into the form expected by the constructor for
// Address. An index of 4 (rsp) corresponds to having no index, so convert
@@ -116,89 +140,21 @@ Address Address::make_raw(int base, int index, int scale, int disp) {
}
}
-
// Implementation of Assembler
+
int AbstractAssembler::code_fill_byte() {
return (u_char)'\xF4'; // hlt
}
-// This should only be used by 64bit instructions that can use rip-relative
-// it cannot be used by instructions that want an immediate value.
-
-bool Assembler::reachable(AddressLiteral adr) {
- int64_t disp;
-
- // None will force a 64bit literal to the code stream. Likely a placeholder
- // for something that will be patched later and we need to certain it will
- // always be reachable.
- if (adr.reloc() == relocInfo::none) {
- return false;
- }
- if (adr.reloc() == relocInfo::internal_word_type) {
- // This should be rip relative and easily reachable.
- return true;
- }
- if (adr.reloc() != relocInfo::external_word_type &&
- adr.reloc() != relocInfo::runtime_call_type ) {
- return false;
- }
-
- // Stress the correction code
- if (ForceUnreachable) {
- // Must be runtimecall reloc, see if it is in the codecache
- // Flipping stuff in the codecache to be unreachable causes issues
- // with things like inline caches where the additional instructions
- // are not handled.
- if (CodeCache::find_blob(adr._target) == NULL) {
- return false;
- }
- }
- // For external_word_type/runtime_call_type if it is reachable from where we
- // are now (possibly a temp buffer) and where we might end up
- // anywhere in the codeCache then we are always reachable.
- // This would have to change if we ever save/restore shared code
- // to be more pessimistic.
-
- disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int));
- if (!is_simm32(disp)) return false;
- disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int));
- if (!is_simm32(disp)) return false;
-
- disp = (int64_t)adr._target - ((int64_t)_code_pos + sizeof(int));
-
- // Because rip relative is a disp + address_of_next_instruction and we
- // don't know the value of address_of_next_instruction we apply a fudge factor
- // to make sure we will be ok no matter the size of the instruction we get placed into.
- // We don't have to fudge the checks above here because they are already worst case.
-
- // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal
- // + 4 because better safe than sorry.
- const int fudge = 12 + 4;
- if (disp < 0) {
- disp -= fudge;
- } else {
- disp += fudge;
- }
- return is_simm32(disp);
+// make this go away someday
+void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) {
+ if (rtype == relocInfo::none)
+ emit_long(data);
+ else emit_data(data, Relocation::spec_simple(rtype), format);
}
-
-// make this go away eventually
-void Assembler::emit_data(jint data,
- relocInfo::relocType rtype,
- int format) {
- if (rtype == relocInfo::none) {
- emit_long(data);
- } else {
- emit_data(data, Relocation::spec_simple(rtype), format);
- }
-}
-
-void Assembler::emit_data(jint data,
- RelocationHolder const& rspec,
- int format) {
- assert(imm64_operand == 0, "default format must be imm64 in this file");
- assert(imm64_operand != format, "must not be imm64");
+void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) {
+ assert(imm_operand == 0, "default format must be immediate in this file");
assert(inst_mark() != NULL, "must be inside InstructionMark");
if (rspec.type() != relocInfo::none) {
#ifdef ASSERT
@@ -216,67 +172,50 @@ void Assembler::emit_data(jint data,
emit_long(data);
}
-void Assembler::emit_data64(jlong data,
- relocInfo::relocType rtype,
- int format) {
- if (rtype == relocInfo::none) {
- emit_long64(data);
- } else {
- emit_data64(data, Relocation::spec_simple(rtype), format);
+static int encode(Register r) {
+ int enc = r->encoding();
+ if (enc >= 8) {
+ enc -= 8;
}
+ return enc;
}
-void Assembler::emit_data64(jlong data,
- RelocationHolder const& rspec,
- int format) {
- assert(imm64_operand == 0, "default format must be imm64 in this file");
- assert(imm64_operand == format, "must be imm64");
- assert(inst_mark() != NULL, "must be inside InstructionMark");
- // Do not use AbstractAssembler::relocate, which is not intended for
- // embedded words. Instead, relocate to the enclosing instruction.
- code_section()->relocate(inst_mark(), rspec, format);
-#ifdef ASSERT
- check_relocation(rspec, format);
-#endif
- emit_long64(data);
+static int encode(XMMRegister r) {
+ int enc = r->encoding();
+ if (enc >= 8) {
+ enc -= 8;
+ }
+ return enc;
}
void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
+ assert(dst->has_byte_register(), "must have byte register");
assert(isByte(op1) && isByte(op2), "wrong opcode");
assert(isByte(imm8), "not a byte");
assert((op1 & 0x01) == 0, "should be 8bit operation");
- int dstenc = dst->encoding();
- if (dstenc >= 8) {
- dstenc -= 8;
- }
emit_byte(op1);
- emit_byte(op2 | dstenc);
+ emit_byte(op2 | encode(dst));
emit_byte(imm8);
}
-void Assembler::emit_arith(int op1, int op2, Register dst, int imm32) {
+
+void Assembler::emit_arith(int op1, int op2, Register dst, int32_t imm32) {
assert(isByte(op1) && isByte(op2), "wrong opcode");
assert((op1 & 0x01) == 1, "should be 32bit operation");
assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
- int dstenc = dst->encoding();
- if (dstenc >= 8) {
- dstenc -= 8;
- }
if (is8bit(imm32)) {
emit_byte(op1 | 0x02); // set sign bit
- emit_byte(op2 | dstenc);
+ emit_byte(op2 | encode(dst));
emit_byte(imm32 & 0xFF);
} else {
emit_byte(op1);
- emit_byte(op2 | dstenc);
+ emit_byte(op2 | encode(dst));
emit_long(imm32);
}
}
// immediate-to-memory forms
-void Assembler::emit_arith_operand(int op1,
- Register rm, Address adr,
- int imm32) {
+void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32) {
assert((op1 & 0x01) == 1, "should be 32bit operation");
assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
if (is8bit(imm32)) {
@@ -290,127 +229,117 @@ void Assembler::emit_arith_operand(int op1,
}
}
+void Assembler::emit_arith(int op1, int op2, Register dst, jobject obj) {
+ LP64_ONLY(ShouldNotReachHere());
+ assert(isByte(op1) && isByte(op2), "wrong opcode");
+ assert((op1 & 0x01) == 1, "should be 32bit operation");
+ assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
+ InstructionMark im(this);
+ emit_byte(op1);
+ emit_byte(op2 | encode(dst));
+ emit_data((intptr_t)obj, relocInfo::oop_type, 0);
+}
+
void Assembler::emit_arith(int op1, int op2, Register dst, Register src) {
assert(isByte(op1) && isByte(op2), "wrong opcode");
- int dstenc = dst->encoding();
- int srcenc = src->encoding();
- if (dstenc >= 8) {
- dstenc -= 8;
- }
- if (srcenc >= 8) {
- srcenc -= 8;
- }
emit_byte(op1);
- emit_byte(op2 | dstenc << 3 | srcenc);
+ emit_byte(op2 | encode(dst) << 3 | encode(src));
}
+
void Assembler::emit_operand(Register reg, Register base, Register index,
Address::ScaleFactor scale, int disp,
RelocationHolder const& rspec,
int rip_relative_correction) {
relocInfo::relocType rtype = (relocInfo::relocType) rspec.type();
- int regenc = reg->encoding();
- if (regenc >= 8) {
- regenc -= 8;
- }
+
+ // Encode the registers as needed in the fields they are used in
+
+ int regenc = encode(reg) << 3;
+ int indexenc = index->is_valid() ? encode(index) << 3 : 0;
+ int baseenc = base->is_valid() ? encode(base) : 0;
+
if (base->is_valid()) {
if (index->is_valid()) {
assert(scale != Address::no_scale, "inconsistent address");
- int indexenc = index->encoding();
- if (indexenc >= 8) {
- indexenc -= 8;
- }
- int baseenc = base->encoding();
- if (baseenc >= 8) {
- baseenc -= 8;
- }
// [base + index*scale + disp]
if (disp == 0 && rtype == relocInfo::none &&
- base != rbp && base != r13) {
+ base != rbp LP64_ONLY(&& base != r13)) {
// [base + index*scale]
// [00 reg 100][ss index base]
assert(index != rsp, "illegal addressing mode");
- emit_byte(0x04 | regenc << 3);
- emit_byte(scale << 6 | indexenc << 3 | baseenc);
+ emit_byte(0x04 | regenc);
+ emit_byte(scale << 6 | indexenc | baseenc);
} else if (is8bit(disp) && rtype == relocInfo::none) {
// [base + index*scale + imm8]
// [01 reg 100][ss index base] imm8
assert(index != rsp, "illegal addressing mode");
- emit_byte(0x44 | regenc << 3);
- emit_byte(scale << 6 | indexenc << 3 | baseenc);
+ emit_byte(0x44 | regenc);
+ emit_byte(scale << 6 | indexenc | baseenc);
emit_byte(disp & 0xFF);
} else {
// [base + index*scale + disp32]
// [10 reg 100][ss index base] disp32
assert(index != rsp, "illegal addressing mode");
- emit_byte(0x84 | regenc << 3);
- emit_byte(scale << 6 | indexenc << 3 | baseenc);
+ emit_byte(0x84 | regenc);
+ emit_byte(scale << 6 | indexenc | baseenc);
emit_data(disp, rspec, disp32_operand);
}
- } else if (base == rsp || base == r12) {
+ } else if (base == rsp LP64_ONLY(|| base == r12)) {
// [rsp + disp]
if (disp == 0 && rtype == relocInfo::none) {
// [rsp]
// [00 reg 100][00 100 100]
- emit_byte(0x04 | regenc << 3);
+ emit_byte(0x04 | regenc);
emit_byte(0x24);
} else if (is8bit(disp) && rtype == relocInfo::none) {
// [rsp + imm8]
// [01 reg 100][00 100 100] disp8
- emit_byte(0x44 | regenc << 3);
+ emit_byte(0x44 | regenc);
emit_byte(0x24);
emit_byte(disp & 0xFF);
} else {
// [rsp + imm32]
// [10 reg 100][00 100 100] disp32
- emit_byte(0x84 | regenc << 3);
+ emit_byte(0x84 | regenc);
emit_byte(0x24);
emit_data(disp, rspec, disp32_operand);
}
} else {
// [base + disp]
- assert(base != rsp && base != r12, "illegal addressing mode");
- int baseenc = base->encoding();
- if (baseenc >= 8) {
- baseenc -= 8;
- }
+ assert(base != rsp LP64_ONLY(&& base != r12), "illegal addressing mode");
if (disp == 0 && rtype == relocInfo::none &&
- base != rbp && base != r13) {
+ base != rbp LP64_ONLY(&& base != r13)) {
// [base]
// [00 reg base]
- emit_byte(0x00 | regenc << 3 | baseenc);
+ emit_byte(0x00 | regenc | baseenc);
} else if (is8bit(disp) && rtype == relocInfo::none) {
// [base + disp8]
// [01 reg base] disp8
- emit_byte(0x40 | regenc << 3 | baseenc);
+ emit_byte(0x40 | regenc | baseenc);
emit_byte(disp & 0xFF);
} else {
// [base + disp32]
// [10 reg base] disp32
- emit_byte(0x80 | regenc << 3 | baseenc);
+ emit_byte(0x80 | regenc | baseenc);
emit_data(disp, rspec, disp32_operand);
}
}
} else {
if (index->is_valid()) {
assert(scale != Address::no_scale, "inconsistent address");
- int indexenc = index->encoding();
- if (indexenc >= 8) {
- indexenc -= 8;
- }
// [index*scale + disp]
// [00 reg 100][ss index 101] disp32
assert(index != rsp, "illegal addressing mode");
- emit_byte(0x04 | regenc << 3);
- emit_byte(scale << 6 | indexenc << 3 | 0x05);
+ emit_byte(0x04 | regenc);
+ emit_byte(scale << 6 | indexenc | 0x05);
emit_data(disp, rspec, disp32_operand);
-#ifdef _LP64
} else if (rtype != relocInfo::none ) {
- // [disp] RIP-RELATIVE
+ // [disp] (64bit) RIP-RELATIVE (32bit) abs
// [00 000 101] disp32
- emit_byte(0x05 | regenc << 3);
+ emit_byte(0x05 | regenc);
// Note that the RIP-rel. correction applies to the generated
// disp field, but _not_ to the target address in the rspec.
@@ -419,16 +348,18 @@ void Assembler::emit_operand(Register reg, Register base, Register index,
// intptr_t disp = target - next_ip;
assert(inst_mark() != NULL, "must be inside InstructionMark");
address next_ip = pc() + sizeof(int32_t) + rip_relative_correction;
- int64_t adjusted = (int64_t) disp - (next_ip - inst_mark());
+ int64_t adjusted = disp;
+ // Do rip-rel adjustment for 64bit
+ LP64_ONLY(adjusted -= (next_ip - inst_mark()));
assert(is_simm32(adjusted),
"must be 32bit offset (RIP relative address)");
- emit_data((int) adjusted, rspec, disp32_operand);
+ emit_data((int32_t) adjusted, rspec, disp32_operand);
-#endif // _LP64
} else {
+ // 32bit never did this, did everything as the rip-rel/disp code above
// [disp] ABSOLUTE
// [00 reg 100][00 100 101] disp32
- emit_byte(0x04 | regenc << 3);
+ emit_byte(0x04 | regenc);
emit_byte(0x25);
emit_data(disp, rspec, disp32_operand);
}
@@ -437,132 +368,8 @@ void Assembler::emit_operand(Register reg, Register base, Register index,
void Assembler::emit_operand(XMMRegister reg, Register base, Register index,
Address::ScaleFactor scale, int disp,
- RelocationHolder const& rspec,
- int rip_relative_correction) {
- relocInfo::relocType rtype = (relocInfo::relocType) rspec.type();
- int regenc = reg->encoding();
- if (regenc >= 8) {
- regenc -= 8;
- }
- if (base->is_valid()) {
- if (index->is_valid()) {
- assert(scale != Address::no_scale, "inconsistent address");
- int indexenc = index->encoding();
- if (indexenc >= 8) {
- indexenc -= 8;
- }
- int baseenc = base->encoding();
- if (baseenc >= 8) {
- baseenc -= 8;
- }
- // [base + index*scale + disp]
- if (disp == 0 && rtype == relocInfo::none &&
- base != rbp && base != r13) {
- // [base + index*scale]
- // [00 reg 100][ss index base]
- assert(index != rsp, "illegal addressing mode");
- emit_byte(0x04 | regenc << 3);
- emit_byte(scale << 6 | indexenc << 3 | baseenc);
- } else if (is8bit(disp) && rtype == relocInfo::none) {
- // [base + index*scale + disp8]
- // [01 reg 100][ss index base] disp8
- assert(index != rsp, "illegal addressing mode");
- emit_byte(0x44 | regenc << 3);
- emit_byte(scale << 6 | indexenc << 3 | baseenc);
- emit_byte(disp & 0xFF);
- } else {
- // [base + index*scale + disp32]
- // [10 reg 100][ss index base] disp32
- assert(index != rsp, "illegal addressing mode");
- emit_byte(0x84 | regenc << 3);
- emit_byte(scale << 6 | indexenc << 3 | baseenc);
- emit_data(disp, rspec, disp32_operand);
- }
- } else if (base == rsp || base == r12) {
- // [rsp + disp]
- if (disp == 0 && rtype == relocInfo::none) {
- // [rsp]
- // [00 reg 100][00 100 100]
- emit_byte(0x04 | regenc << 3);
- emit_byte(0x24);
- } else if (is8bit(disp) && rtype == relocInfo::none) {
- // [rsp + imm8]
- // [01 reg 100][00 100 100] disp8
- emit_byte(0x44 | regenc << 3);
- emit_byte(0x24);
- emit_byte(disp & 0xFF);
- } else {
- // [rsp + imm32]
- // [10 reg 100][00 100 100] disp32
- emit_byte(0x84 | regenc << 3);
- emit_byte(0x24);
- emit_data(disp, rspec, disp32_operand);
- }
- } else {
- // [base + disp]
- assert(base != rsp && base != r12, "illegal addressing mode");
- int baseenc = base->encoding();
- if (baseenc >= 8) {
- baseenc -= 8;
- }
- if (disp == 0 && rtype == relocInfo::none &&
- base != rbp && base != r13) {
- // [base]
- // [00 reg base]
- emit_byte(0x00 | regenc << 3 | baseenc);
- } else if (is8bit(disp) && rtype == relocInfo::none) {
- // [base + imm8]
- // [01 reg base] disp8
- emit_byte(0x40 | regenc << 3 | baseenc);
- emit_byte(disp & 0xFF);
- } else {
- // [base + imm32]
- // [10 reg base] disp32
- emit_byte(0x80 | regenc << 3 | baseenc);
- emit_data(disp, rspec, disp32_operand);
- }
- }
- } else {
- if (index->is_valid()) {
- assert(scale != Address::no_scale, "inconsistent address");
- int indexenc = index->encoding();
- if (indexenc >= 8) {
- indexenc -= 8;
- }
- // [index*scale + disp]
- // [00 reg 100][ss index 101] disp32
- assert(index != rsp, "illegal addressing mode");
- emit_byte(0x04 | regenc << 3);
- emit_byte(scale << 6 | indexenc << 3 | 0x05);
- emit_data(disp, rspec, disp32_operand);
-#ifdef _LP64
- } else if ( rtype != relocInfo::none ) {
- // [disp] RIP-RELATIVE
- // [00 reg 101] disp32
- emit_byte(0x05 | regenc << 3);
- // Note that the RIP-rel. correction applies to the generated
- // disp field, but _not_ to the target address in the rspec.
-
- // disp was created by converting the target address minus the pc
- // at the start of the instruction. That needs more correction here.
- // intptr_t disp = target - next_ip;
-
- assert(inst_mark() != NULL, "must be inside InstructionMark");
- address next_ip = pc() + sizeof(int32_t) + rip_relative_correction;
-
- int64_t adjusted = (int64_t) disp - (next_ip - inst_mark());
- assert(is_simm32(adjusted),
- "must be 32bit offset (RIP relative address)");
- emit_data((int) adjusted, rspec, disp32_operand);
-#endif // _LP64
- } else {
- // [disp] ABSOLUTE
- // [00 reg 100][00 100 101] disp32
- emit_byte(0x04 | regenc << 3);
- emit_byte(0x25);
- emit_data(disp, rspec, disp32_operand);
- }
- }
+ RelocationHolder const& rspec) {
+ emit_operand((Register)reg, base, index, scale, disp, rspec);
}
// Secret local extension to Assembler::WhichOperand:
@@ -603,8 +410,9 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case ES_segment:
case FS_segment:
case GS_segment:
- assert(0, "shouldn't have that prefix");
- assert(ip == inst + 1 || ip == inst + 2, "only two prefixes allowed");
+ // Seems dubious
+ LP64_ONLY(assert(false, "shouldn't have that prefix"));
+ assert(ip == inst+1, "only one prefix allowed");
goto again_after_prefix;
case 0x67:
@@ -616,7 +424,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case REX_RB:
case REX_RX:
case REX_RXB:
-// assert(ip == inst + 1, "only one prefix allowed");
+ NOT_LP64(assert(false, "64bit prefixes"));
goto again_after_prefix;
case REX_W:
@@ -627,8 +435,8 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case REX_WRB:
case REX_WRX:
case REX_WRXB:
+ NOT_LP64(assert(false, "64bit prefixes"));
is_64bit = true;
-// assert(ip == inst + 1, "only one prefix allowed");
goto again_after_prefix;
case 0xFF: // pushq a; decl a; incl a; call a; jmp a
@@ -637,15 +445,15 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case 0x8A: // movb r, a
case 0x8B: // movl r, a
case 0x8F: // popl a
- debug_only(has_disp32 = true;)
+ debug_only(has_disp32 = true);
break;
case 0x68: // pushq #32
if (which == end_pc_operand) {
return ip + 4;
}
- assert(0, "pushq has no disp32 or imm64");
- ShouldNotReachHere();
+ assert(which == imm_operand && !is_64bit, "pushl has no disp32 or 64bit immediate");
+ return ip; // not produced by emit_operand
case 0x66: // movw ... (size prefix)
again_after_size_prefix2:
@@ -666,11 +474,14 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case REX_WRB:
case REX_WRX:
case REX_WRXB:
+ NOT_LP64(assert(false, "64bit prefix found"));
goto again_after_size_prefix2;
case 0x8B: // movw r, a
case 0x89: // movw a, r
+ debug_only(has_disp32 = true);
break;
case 0xC7: // movw a, #16
+ debug_only(has_disp32 = true);
tail_size = 2; // the imm16
break;
case 0x0F: // several SSE/SSE2 variants
@@ -683,8 +494,13 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case REP8(0xB8): // movl/q r, #32/#64(oop?)
if (which == end_pc_operand) return ip + (is_64bit ? 8 : 4);
- assert((which == call32_operand || which == imm64_operand) && is_64bit ||
+ // these asserts are somewhat nonsensical
+#ifndef _LP64
+ assert(which == imm_operand || which == disp32_operand, "");
+#else
+ assert((which == call32_operand || which == imm_operand) && is_64bit ||
which == narrow_oop_operand && !is_64bit, "");
+#endif // _LP64
return ip;
case 0x69: // imul r, a, #32
@@ -700,18 +516,23 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case 0x2E: // ucomiss
case 0x2F: // comiss
case 0x54: // andps
+ case 0x55: // andnps
+ case 0x56: // orps
case 0x57: // xorps
case 0x6E: // movd
case 0x7E: // movd
case 0xAE: // ldmxcsr a
- debug_only(has_disp32 = true); // has both kinds of operands!
+ // 64bit side says it these have both operands but that doesn't
+ // appear to be true
+ debug_only(has_disp32 = true);
break;
+
case 0xAD: // shrd r, a, %cl
case 0xAF: // imul r, a
- case 0xBE: // movsbl r, a
- case 0xBF: // movswl r, a
- case 0xB6: // movzbl r, a
- case 0xB7: // movzwl r, a
+ case 0xBE: // movsbl r, a (movsxb)
+ case 0xBF: // movswl r, a (movsxw)
+ case 0xB6: // movzbl r, a (movzxb)
+ case 0xB7: // movzwl r, a (movzxw)
case REP16(0x40): // cmovl cc, r, a
case 0xB0: // cmpxchgb
case 0xB1: // cmpxchg
@@ -721,13 +542,15 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
debug_only(has_disp32 = true);
// fall out of the switch to decode the address
break;
+
case 0xAC: // shrd r, a, #8
debug_only(has_disp32 = true);
tail_size = 1; // the imm8
break;
+
case REP16(0x80): // jcc rdisp32
if (which == end_pc_operand) return ip + 4;
- assert(which == call32_operand, "jcc has no disp32 or imm64");
+ assert(which == call32_operand, "jcc has no disp32 or imm");
return ip;
default:
ShouldNotReachHere();
@@ -736,6 +559,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case 0x81: // addl a, #32; addl r, #32
// also: orl, adcl, sbbl, andl, subl, xorl, cmpl
+ // on 32bit in the case of cmpl, the imm might be an oop
tail_size = 4;
debug_only(has_disp32 = true); // has both kinds of operands!
break;
@@ -764,11 +588,9 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case REP4(0x18): // sbb...
case REP4(0x28): // sub...
case 0xF7: // mull a
+ case 0x8D: // lea r, a
case 0x87: // xchg r, a
- debug_only(has_disp32 = true);
- break;
case REP4(0x38): // cmp...
- case 0x8D: // lea r, a
case 0x85: // test r, a
debug_only(has_disp32 = true); // has both kinds of operands!
break;
@@ -784,7 +606,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case 0xE8: // call rdisp32
case 0xE9: // jmp rdisp32
if (which == end_pc_operand) return ip + 4;
- assert(which == call32_operand, "call has no disp32 or imm32");
+ assert(which == call32_operand, "call has no disp32 or imm");
return ip;
case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
@@ -818,6 +640,7 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
case REX_WRB:
case REX_WRX:
case REX_WRXB:
+ NOT_LP64(assert(false, "found 64bit prefix"));
ip++;
default:
ip++;
@@ -833,7 +656,12 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
}
assert(which != call32_operand, "instruction is not a call, jmp, or jcc");
- assert(which != imm64_operand, "instruction is not a movq reg, imm64");
+#ifdef _LP64
+ assert(which != imm_operand, "instruction is not a movq reg, imm64");
+#else
+ // assert(which != imm_operand || has_imm32, "instruction has no imm32 field");
+ assert(which != imm_operand || has_disp32, "instruction has no imm32 field");
+#endif // LP64
assert(which != disp32_operand || has_disp32, "instruction has no disp32 field");
// parse the output of emit_operand
@@ -888,7 +716,11 @@ address Assembler::locate_operand(address inst, WhichOperand which) {
return ip + tail_size;
}
- assert(0, "fix locate_operand");
+#ifdef _LP64
+ assert(false, "fix locate_operand");
+#else
+ assert(which == imm_operand, "instruction has only an imm field");
+#endif // LP64
return ip;
}
@@ -897,597 +729,733 @@ address Assembler::locate_next_instruction(address inst) {
return locate_operand(inst, end_pc_operand);
}
+
#ifdef ASSERT
void Assembler::check_relocation(RelocationHolder const& rspec, int format) {
address inst = inst_mark();
- assert(inst != NULL && inst < pc(),
- "must point to beginning of instruction");
+ assert(inst != NULL && inst < pc(), "must point to beginning of instruction");
address opnd;
Relocation* r = rspec.reloc();
if (r->type() == relocInfo::none) {
return;
} else if (r->is_call() || format == call32_operand) {
+ // assert(format == imm32_operand, "cannot specify a nonzero format");
opnd = locate_operand(inst, call32_operand);
} else if (r->is_data()) {
- assert(format == imm64_operand || format == disp32_operand ||
- format == narrow_oop_operand, "format ok");
- opnd = locate_operand(inst, (WhichOperand) format);
+ assert(format == imm_operand || format == disp32_operand
+ LP64_ONLY(|| format == narrow_oop_operand), "format ok");
+ opnd = locate_operand(inst, (WhichOperand)format);
} else {
- assert(format == 0, "cannot specify a format");
+ assert(format == imm_operand, "cannot specify a format");
return;
}
assert(opnd == pc(), "must put operand where relocs can find it");
}
-#endif
+#endif // ASSERT
-int Assembler::prefix_and_encode(int reg_enc, bool byteinst) {
- if (reg_enc >= 8) {
- prefix(REX_B);
- reg_enc -= 8;
- } else if (byteinst && reg_enc >= 4) {
- prefix(REX);
- }
- return reg_enc;
+void Assembler::emit_operand32(Register reg, Address adr) {
+ assert(reg->encoding() < 8, "no extended registers");
+ assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
+ emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
+ adr._rspec);
}
-int Assembler::prefixq_and_encode(int reg_enc) {
- if (reg_enc < 8) {
- prefix(REX_W);
- } else {
- prefix(REX_WB);
- reg_enc -= 8;
- }
- return reg_enc;
+void Assembler::emit_operand(Register reg, Address adr,
+ int rip_relative_correction) {
+ emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
+ adr._rspec,
+ rip_relative_correction);
}
-int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) {
- if (dst_enc < 8) {
- if (src_enc >= 8) {
- prefix(REX_B);
- src_enc -= 8;
- } else if (byteinst && src_enc >= 4) {
- prefix(REX);
- }
- } else {
- if (src_enc < 8) {
- prefix(REX_R);
- } else {
- prefix(REX_RB);
- src_enc -= 8;
- }
- dst_enc -= 8;
- }
- return dst_enc << 3 | src_enc;
+void Assembler::emit_operand(XMMRegister reg, Address adr) {
+ emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
+ adr._rspec);
}
-int Assembler::prefixq_and_encode(int dst_enc, int src_enc) {
- if (dst_enc < 8) {
- if (src_enc < 8) {
- prefix(REX_W);
- } else {
- prefix(REX_WB);
- src_enc -= 8;
- }
- } else {
- if (src_enc < 8) {
- prefix(REX_WR);
- } else {
- prefix(REX_WRB);
- src_enc -= 8;
- }
- dst_enc -= 8;
- }
- return dst_enc << 3 | src_enc;
+// MMX operations
+void Assembler::emit_operand(MMXRegister reg, Address adr) {
+ assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
+ emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
}
-void Assembler::prefix(Register reg) {
- if (reg->encoding() >= 8) {
- prefix(REX_B);
- }
+// work around gcc (3.2.1-7a) bug
+void Assembler::emit_operand(Address adr, MMXRegister reg) {
+ assert(!adr.base_needs_rex() && !adr.index_needs_rex(), "no extended registers");
+ emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
}
-void Assembler::prefix(Address adr) {
- if (adr.base_needs_rex()) {
- if (adr.index_needs_rex()) {
- prefix(REX_XB);
- } else {
- prefix(REX_B);
- }
- } else {
- if (adr.index_needs_rex()) {
- prefix(REX_X);
- }
- }
-}
-void Assembler::prefixq(Address adr) {
- if (adr.base_needs_rex()) {
- if (adr.index_needs_rex()) {
- prefix(REX_WXB);
- } else {
- prefix(REX_WB);
- }
- } else {
- if (adr.index_needs_rex()) {
- prefix(REX_WX);
- } else {
- prefix(REX_W);
- }
- }
+void Assembler::emit_farith(int b1, int b2, int i) {
+ assert(isByte(b1) && isByte(b2), "wrong opcode");
+ assert(0 <= i && i < 8, "illegal stack offset");
+ emit_byte(b1);
+ emit_byte(b2 + i);
}
-void Assembler::prefix(Address adr, Register reg, bool byteinst) {
- if (reg->encoding() < 8) {
- if (adr.base_needs_rex()) {
- if (adr.index_needs_rex()) {
- prefix(REX_XB);
- } else {
- prefix(REX_B);
- }
- } else {
- if (adr.index_needs_rex()) {
- prefix(REX_X);
- } else if (reg->encoding() >= 4 ) {
- prefix(REX);
- }
- }
- } else {
- if (adr.base_needs_rex()) {
- if (adr.index_needs_rex()) {
- prefix(REX_RXB);
- } else {
- prefix(REX_RB);
- }
- } else {
- if (adr.index_needs_rex()) {
- prefix(REX_RX);
- } else {
- prefix(REX_R);
- }
- }
- }
-}
+// Now the Assembler instruction (identical for 32/64 bits)
-void Assembler::prefixq(Address adr, Register src) {
- if (src->encoding() < 8) {
- if (adr.base_needs_rex()) {
- if (adr.index_needs_rex()) {
- prefix(REX_WXB);
- } else {
- prefix(REX_WB);
- }
- } else {
- if (adr.index_needs_rex()) {
- prefix(REX_WX);
- } else {
- prefix(REX_W);
- }
- }
- } else {
- if (adr.base_needs_rex()) {
- if (adr.index_needs_rex()) {
- prefix(REX_WRXB);
- } else {
- prefix(REX_WRB);
- }
- } else {
- if (adr.index_needs_rex()) {
- prefix(REX_WRX);
- } else {
- prefix(REX_WR);
- }
- }
- }
+void Assembler::adcl(Register dst, int32_t imm32) {
+ prefix(dst);
+ emit_arith(0x81, 0xD0, dst, imm32);
}
-void Assembler::prefix(Address adr, XMMRegister reg) {
- if (reg->encoding() < 8) {
- if (adr.base_needs_rex()) {
- if (adr.index_needs_rex()) {
- prefix(REX_XB);
- } else {
- prefix(REX_B);
- }
- } else {
- if (adr.index_needs_rex()) {
- prefix(REX_X);
- }
- }
- } else {
- if (adr.base_needs_rex()) {
- if (adr.index_needs_rex()) {
- prefix(REX_RXB);
- } else {
- prefix(REX_RB);
- }
- } else {
- if (adr.index_needs_rex()) {
- prefix(REX_RX);
- } else {
- prefix(REX_R);
- }
- }
- }
+void Assembler::adcl(Register dst, Address src) {
+ InstructionMark im(this);
+ prefix(src, dst);
+ emit_byte(0x13);
+ emit_operand(dst, src);
}
-void Assembler::emit_operand(Register reg, Address adr,
- int rip_relative_correction) {
- emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
- adr._rspec,
- rip_relative_correction);
+void Assembler::adcl(Register dst, Register src) {
+ (void) prefix_and_encode(dst->encoding(), src->encoding());
+ emit_arith(0x13, 0xC0, dst, src);
}
-void Assembler::emit_operand(XMMRegister reg, Address adr,
- int rip_relative_correction) {
- emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp,
- adr._rspec,
- rip_relative_correction);
+void Assembler::addl(Address dst, int32_t imm32) {
+ InstructionMark im(this);
+ prefix(dst);
+ emit_arith_operand(0x81, rax, dst, imm32);
}
-void Assembler::emit_farith(int b1, int b2, int i) {
- assert(isByte(b1) && isByte(b2), "wrong opcode");
- assert(0 <= i && i < 8, "illegal stack offset");
- emit_byte(b1);
- emit_byte(b2 + i);
+void Assembler::addl(Address dst, Register src) {
+ InstructionMark im(this);
+ prefix(dst, src);
+ emit_byte(0x01);
+ emit_operand(src, dst);
}
-// pushad is invalid, use this instead.
-// NOTE: Kills flags!!
-void Assembler::pushaq() {
- // we have to store original rsp. ABI says that 128 bytes
- // below rsp are local scratch.
- movq(Address(rsp, -5 * wordSize), rsp);
-
- subq(rsp, 16 * wordSize);
-
- movq(Address(rsp, 15 * wordSize), rax);
- movq(Address(rsp, 14 * wordSize), rcx);
- movq(Address(rsp, 13 * wordSize), rdx);
- movq(Address(rsp, 12 * wordSize), rbx);
- // skip rsp
- movq(Address(rsp, 10 * wordSize), rbp);
- movq(Address(rsp, 9 * wordSize), rsi);
- movq(Address(rsp, 8 * wordSize), rdi);
- movq(Address(rsp, 7 * wordSize), r8);
- movq(Address(rsp, 6 * wordSize), r9);
- movq(Address(rsp, 5 * wordSize), r10);
- movq(Address(rsp, 4 * wordSize), r11);
- movq(Address(rsp, 3 * wordSize), r12);
- movq(Address(rsp, 2 * wordSize), r13);
- movq(Address(rsp, wordSize), r14);
- movq(Address(rsp, 0), r15);
+void Assembler::addl(Register dst, int32_t imm32) {
+ prefix(dst);
+ emit_arith(0x81, 0xC0, dst, imm32);
}
-// popad is invalid, use this instead
-// NOTE: Kills flags!!
-void Assembler::popaq() {
- movq(r15, Address(rsp, 0));
- movq(r14, Address(rsp, wordSize));
- movq(r13, Address(rsp, 2 * wordSize));
- movq(r12, Address(rsp, 3 * wordSize));
- movq(r11, Address(rsp, 4 * wordSize));
- movq(r10, Address(rsp, 5 * wordSize));
- movq(r9, Address(rsp, 6 * wordSize));
- movq(r8, Address(rsp, 7 * wordSize));
- movq(rdi, Address(rsp, 8 * wordSize));
- movq(rsi, Address(rsp, 9 * wordSize));
- movq(rbp, Address(rsp, 10 * wordSize));
- // skip rsp
- movq(rbx, Address(rsp, 12 * wordSize));
- movq(rdx, Address(rsp, 13 * wordSize));
- movq(rcx, Address(rsp, 14 * wordSize));
- movq(rax, Address(rsp, 15 * wordSize));
-
- addq(rsp, 16 * wordSize);
+void Assembler::addl(Register dst, Address src) {
+ InstructionMark im(this);
+ prefix(src, dst);
+ emit_byte(0x03);
+ emit_operand(dst, src);
}
-void Assembler::pushfq() {
- emit_byte(0x9C);
+void Assembler::addl(Register dst, Register src) {
+ (void) prefix_and_encode(dst->encoding(), src->encoding());
+ emit_arith(0x03, 0xC0, dst, src);
}
-void Assembler::popfq() {
- emit_byte(0x9D);
+void Assembler::addr_nop_4() {
+ // 4 bytes: NOP DWORD PTR [EAX+0]
+ emit_byte(0x0F);
+ emit_byte(0x1F);
+ emit_byte(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
+ emit_byte(0); // 8-bits offset (1 byte)
}
-void Assembler::pushq(int imm32) {
- emit_byte(0x68);
- emit_long(imm32);
+void Assembler::addr_nop_5() {
+ // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
+ emit_byte(0x0F);
+ emit_byte(0x1F);
+ emit_byte(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
+ emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
+ emit_byte(0); // 8-bits offset (1 byte)
}
-void Assembler::pushq(Register src) {
- int encode = prefix_and_encode(src->encoding());
-
- emit_byte(0x50 | encode);
+void Assembler::addr_nop_7() {
+ // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
+ emit_byte(0x0F);
+ emit_byte(0x1F);
+ emit_byte(0x80); // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
+ emit_long(0); // 32-bits offset (4 bytes)
}
-void Assembler::pushq(Address src) {
- InstructionMark im(this);
- prefix(src);
- emit_byte(0xFF);
- emit_operand(rsi, src);
+void Assembler::addr_nop_8() {
+ // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
+ emit_byte(0x0F);
+ emit_byte(0x1F);
+ emit_byte(0x84); // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
+ emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
+ emit_long(0); // 32-bits offset (4 bytes)
}
-void Assembler::popq(Register dst) {
- int encode = prefix_and_encode(dst->encoding());
- emit_byte(0x58 | encode);
+void Assembler::addsd(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ emit_byte(0xF2);
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
+ emit_byte(0x0F);
+ emit_byte(0x58);
+ emit_byte(0xC0 | encode);
}
-void Assembler::popq(Address dst) {
+void Assembler::addsd(XMMRegister dst, Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
- prefix(dst);
- emit_byte(0x8F);
- emit_operand(rax, dst);
+ emit_byte(0xF2);
+ prefix(src, dst);
+ emit_byte(0x0F);
+ emit_byte(0x58);
+ emit_operand(dst, src);
}
-void Assembler::prefix(Prefix p) {
- a_byte(p);
+void Assembler::addss(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
+ emit_byte(0xF3);
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
+ emit_byte(0x0F);
+ emit_byte(0x58);
+ emit_byte(0xC0 | encode);
}
-void Assembler::movb(Register dst, Address src) {
+void Assembler::addss(XMMRegister dst, Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
InstructionMark im(this);
- prefix(src, dst, true);
- emit_byte(0x8A);
+ emit_byte(0xF3);
+ prefix(src, dst);
+ emit_byte(0x0F);
+ emit_byte(0x58);
emit_operand(dst, src);
}
-void Assembler::movb(Address dst, int imm8) {
- InstructionMark im(this);
+void Assembler::andl(Register dst, int32_t imm32) {
prefix(dst);
- emit_byte(0xC6);
- emit_operand(rax, dst, 1);
- emit_byte(imm8);
+ emit_arith(0x81, 0xE0, dst, imm32);
}
-void Assembler::movb(Address dst, Register src) {
+void Assembler::andl(Register dst, Address src) {
InstructionMark im(this);
- prefix(dst, src, true);
- emit_byte(0x88);
- emit_operand(src, dst);
+ prefix(src, dst);
+ emit_byte(0x23);
+ emit_operand(dst, src);
}
-void Assembler::movw(Address dst, int imm16) {
- InstructionMark im(this);
- emit_byte(0x66); // switch to 16-bit mode
- prefix(dst);
- emit_byte(0xC7);
- emit_operand(rax, dst, 2);
- emit_word(imm16);
+void Assembler::andl(Register dst, Register src) {
+ (void) prefix_and_encode(dst->encoding(), src->encoding());
+ emit_arith(0x23, 0xC0, dst, src);
}
-void Assembler::movw(Register dst, Address src) {
+void Assembler::andpd(XMMRegister dst, Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
emit_byte(0x66);
prefix(src, dst);
- emit_byte(0x8B);
+ emit_byte(0x0F);
+ emit_byte(0x54);
emit_operand(dst, src);
}
-void Assembler::movw(Address dst, Register src) {
+void Assembler::bswapl(Register reg) { // bswap
+ int encode = prefix_and_encode(reg->encoding());
+ emit_byte(0x0F);
+ emit_byte(0xC8 | encode);
+}
+
+void Assembler::call(Label& L, relocInfo::relocType rtype) {
+ // suspect disp32 is always good
+ int operand = LP64_ONLY(disp32_operand) NOT_LP64(imm_operand);
+
+ if (L.is_bound()) {
+ const int long_size = 5;
+ int offs = (int)( target(L) - pc() );
+ assert(offs <= 0, "assembler error");
+ InstructionMark im(this);
+ // 1110 1000 #32-bit disp
+ emit_byte(0xE8);
+ emit_data(offs - long_size, rtype, operand);
+ } else {
+ InstructionMark im(this);
+ // 1110 1000 #32-bit disp
+ L.add_patch_at(code(), locator());
+
+ emit_byte(0xE8);
+ emit_data(int(0), rtype, operand);
+ }
+}
+
+void Assembler::call(Register dst) {
+ // This was originally using a 32bit register encoding
+ // and surely we want 64bit!
+ // this is a 32bit encoding but in 64bit mode the default
+ // operand size is 64bit so there is no need for the
+ // wide prefix. So prefix only happens if we use the
+ // new registers. Much like push/pop.
+ int x = offset();
+ // this may be true but dbx disassembles it as if it
+ // were 32bits...
+ // int encode = prefix_and_encode(dst->encoding());
+ // if (offset() != x) assert(dst->encoding() >= 8, "what?");
+ int encode = prefixq_and_encode(dst->encoding());
+
+ emit_byte(0xFF);
+ emit_byte(0xD0 | encode);
+}
+
+
+void Assembler::call(Address adr) {
InstructionMark im(this);
- emit_byte(0x66);
- prefix(dst, src);
- emit_byte(0x89);
- emit_operand(src, dst);
+ prefix(adr);
+ emit_byte(0xFF);
+ emit_operand(rdx, adr);
}
-// Uses zero extension.
-void Assembler::movl(Register dst, int imm32) {
- int encode = prefix_and_encode(dst->encoding());
- emit_byte(0xB8 | encode);
- emit_long(imm32);
+void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
+ assert(entry != NULL, "call most probably wrong");
+ InstructionMark im(this);
+ emit_byte(0xE8);
+ intptr_t disp = entry - (_code_pos + sizeof(int32_t));
+ assert(is_simm32(disp), "must be 32bit offset (call2)");
+ // Technically, should use call32_operand, but this format is
+ // implied by the fact that we're emitting a call instruction.
+
+ int operand = LP64_ONLY(disp32_operand) NOT_LP64(call32_operand);
+ emit_data((int) disp, rspec, operand);
}
-void Assembler::movl(Register dst, Register src) {
+void Assembler::cdql() {
+ emit_byte(0x99);
+}
+
+void Assembler::cmovl(Condition cc, Register dst, Register src) {
+ NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
int encode = prefix_and_encode(dst->encoding(), src->encoding());
- emit_byte(0x8B);
+ emit_byte(0x0F);
+ emit_byte(0x40 | cc);
emit_byte(0xC0 | encode);
}
-void Assembler::movl(Register dst, Address src) {
- InstructionMark im(this);
+
+void Assembler::cmovl(Condition cc, Register dst, Address src) {
+ NOT_LP64(guarantee(VM_Version::supports_cmov(), "illegal instruction"));
prefix(src, dst);
- emit_byte(0x8B);
+ emit_byte(0x0F);
+ emit_byte(0x40 | cc);
emit_operand(dst, src);
}
-void Assembler::movl(Address dst, int imm32) {
+void Assembler::cmpb(Address dst, int imm8) {
InstructionMark im(this);
prefix(dst);
- emit_byte(0xC7);
- emit_operand(rax, dst, 4);
- emit_long(imm32);
+ emit_byte(0x80);
+ emit_operand(rdi, dst, 1);
+ emit_byte(imm8);
}
-void Assembler::movl(Address dst, Register src) {
+void Assembler::cmpl(Address dst, int32_t imm32) {
InstructionMark im(this);
- prefix(dst, src);
- emit_byte(0x89);
- emit_operand(src, dst);
+ prefix(dst);
+ emit_byte(0x81);
+ emit_operand(rdi, dst, 4);
+ emit_long(imm32);
}
-void Assembler::mov64(Register dst, intptr_t imm64) {
- InstructionMark im(this);
- int encode = prefixq_and_encode(dst->encoding());
- emit_byte(0xB8 | encode);
- emit_long64(imm64);
+void Assembler::cmpl(Register dst, int32_t imm32) {
+ prefix(dst);
+ emit_arith(0x81, 0xF8, dst, imm32);
}
-void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) {
- InstructionMark im(this);
- int encode = prefixq_and_encode(dst->encoding());
- emit_byte(0xB8 | encode);
- emit_data64(imm64, rspec);
+void Assembler::cmpl(Register dst, Register src) {
+ (void) prefix_and_encode(dst->encoding(), src->encoding());
+ emit_arith(0x3B, 0xC0, dst, src);
}
-void Assembler::movq(Register dst, Register src) {
- int encode = prefixq_and_encode(dst->encoding(), src->encoding());
- emit_byte(0x8B);
- emit_byte(0xC0 | encode);
-}
-void Assembler::movq(Register dst, Address src) {
+void Assembler::cmpl(Register dst, Address src) {
InstructionMark im(this);
- prefixq(src, dst);
- emit_byte(0x8B);
+ prefix(src, dst);
+ emit_byte(0x3B);
emit_operand(dst, src);
}
-void Assembler::mov64(Address dst, intptr_t imm32) {
- assert(is_simm32(imm32), "lost bits");
+void Assembler::cmpw(Address dst, int imm16) {
InstructionMark im(this);
- prefixq(dst);
- emit_byte(0xC7);
- emit_operand(rax, dst, 4);
- emit_long(imm32);
+ assert(!dst.base_needs_rex() && !dst.index_needs_rex(), "no extended registers");
+ emit_byte(0x66);
+ emit_byte(0x81);
+ emit_operand(rdi, dst, 2);
+ emit_word(imm16);
}
-void Assembler::movq(Address dst, Register src) {
- InstructionMark im(this);
- prefixq(dst, src);
- emit_byte(0x89);
- emit_operand(src, dst);
+// The 32-bit cmpxchg compares the value at adr with the contents of rax,
+// and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
+// The ZF is set if the compared values were equal, and cleared otherwise.
+void Assembler::cmpxchgl(Register reg, Address adr) { // cmpxchg
+ if (Atomics & 2) {
+ // caveat: no instructionmark, so this isn't relocatable.
+ // Emit a synthetic, non-atomic, CAS equivalent.
+ // Beware. The synthetic form sets all ICCs, not just ZF.
+ // cmpxchg r,[m] is equivalent to rax, = CAS (m, rax, r)
+ cmpl(rax, adr);
+ movl(rax, adr);
+ if (reg != rax) {
+ Label L ;
+ jcc(Assembler::notEqual, L);
+ movl(adr, reg);
+ bind(L);
+ }
+ } else {
+ InstructionMark im(this);
+ prefix(adr, reg);
+ emit_byte(0x0F);
+ emit_byte(0xB1);
+ emit_operand(reg, adr);
+ }
}
-void Assembler::movsbl(Register dst, Address src) {
+void Assembler::comisd(XMMRegister dst, Address src) {
+ // NOTE: dbx seems to decode this as comiss even though the
+ // 0x66 is there. Strangly ucomisd comes out correct
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ emit_byte(0x66);
+ comiss(dst, src);
+}
+
+void Assembler::comiss(XMMRegister dst, Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
+
InstructionMark im(this);
prefix(src, dst);
emit_byte(0x0F);
- emit_byte(0xBE);
+ emit_byte(0x2F);
emit_operand(dst, src);
}
-void Assembler::movsbl(Register dst, Register src) {
- int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
+void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ emit_byte(0xF3);
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
- emit_byte(0xBE);
+ emit_byte(0xE6);
emit_byte(0xC0 | encode);
}
-void Assembler::movswl(Register dst, Address src) {
- InstructionMark im(this);
- prefix(src, dst);
+void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
- emit_byte(0xBF);
- emit_operand(dst, src);
+ emit_byte(0x5B);
+ emit_byte(0xC0 | encode);
}
-void Assembler::movswl(Register dst, Register src) {
+void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ emit_byte(0xF2);
int encode = prefix_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
- emit_byte(0xBF);
+ emit_byte(0x5A);
emit_byte(0xC0 | encode);
}
-void Assembler::movslq(Register dst, Address src) {
- InstructionMark im(this);
- prefixq(src, dst);
- emit_byte(0x63);
- emit_operand(dst, src);
-}
-
-void Assembler::movslq(Register dst, Register src) {
- int encode = prefixq_and_encode(dst->encoding(), src->encoding());
- emit_byte(0x63);
+void Assembler::cvtsi2sdl(XMMRegister dst, Register src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ emit_byte(0xF2);
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
+ emit_byte(0x0F);
+ emit_byte(0x2A);
emit_byte(0xC0 | encode);
}
-void Assembler::movzbl(Register dst, Address src) {
- InstructionMark im(this);
- prefix(src, dst);
+void Assembler::cvtsi2ssl(XMMRegister dst, Register src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
+ emit_byte(0xF3);
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
- emit_byte(0xB6);
- emit_operand(dst, src);
+ emit_byte(0x2A);
+ emit_byte(0xC0 | encode);
}
-void Assembler::movzbl(Register dst, Register src) {
- int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
+void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ emit_byte(0xF3);
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
- emit_byte(0xB6);
+ emit_byte(0x5A);
emit_byte(0xC0 | encode);
}
-void Assembler::movzwl(Register dst, Address src) {
- InstructionMark im(this);
- prefix(src, dst);
+void Assembler::cvttsd2sil(Register dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ emit_byte(0xF2);
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
- emit_byte(0xB7);
- emit_operand(dst, src);
+ emit_byte(0x2C);
+ emit_byte(0xC0 | encode);
}
-void Assembler::movzwl(Register dst, Register src) {
+void Assembler::cvttss2sil(Register dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
+ emit_byte(0xF3);
int encode = prefix_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
- emit_byte(0xB7);
+ emit_byte(0x2C);
emit_byte(0xC0 | encode);
}
-void Assembler::movss(XMMRegister dst, XMMRegister src) {
- emit_byte(0xF3);
+void Assembler::decl(Address dst) {
+ // Don't use it directly. Use MacroAssembler::decrement() instead.
+ InstructionMark im(this);
+ prefix(dst);
+ emit_byte(0xFF);
+ emit_operand(rcx, dst);
+}
+
+void Assembler::divsd(XMMRegister dst, Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ InstructionMark im(this);
+ emit_byte(0xF2);
+ prefix(src, dst);
+ emit_byte(0x0F);
+ emit_byte(0x5E);
+ emit_operand(dst, src);
+}
+
+void Assembler::divsd(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ emit_byte(0xF2);
int encode = prefix_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
- emit_byte(0x10);
+ emit_byte(0x5E);
emit_byte(0xC0 | encode);
}
-void Assembler::movss(XMMRegister dst, Address src) {
+void Assembler::divss(XMMRegister dst, Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
InstructionMark im(this);
emit_byte(0xF3);
prefix(src, dst);
emit_byte(0x0F);
- emit_byte(0x10);
+ emit_byte(0x5E);
emit_operand(dst, src);
}
-void Assembler::movss(Address dst, XMMRegister src) {
- InstructionMark im(this);
+void Assembler::divss(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
emit_byte(0xF3);
- prefix(dst, src);
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
- emit_byte(0x11);
- emit_operand(src, dst);
+ emit_byte(0x5E);
+ emit_byte(0xC0 | encode);
}
-void Assembler::movsd(XMMRegister dst, XMMRegister src) {
- emit_byte(0xF2);
+void Assembler::emms() {
+ NOT_LP64(assert(VM_Version::supports_mmx(), ""));
+ emit_byte(0x0F);
+ emit_byte(0x77);
+}
+
+void Assembler::hlt() {
+ emit_byte(0xF4);
+}
+
+void Assembler::idivl(Register src) {
+ int encode = prefix_and_encode(src->encoding());
+ emit_byte(0xF7);
+ emit_byte(0xF8 | encode);
+}
+
+void Assembler::imull(Register dst, Register src) {
int encode = prefix_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
- emit_byte(0x10);
+ emit_byte(0xAF);
emit_byte(0xC0 | encode);
}
-void Assembler::movsd(XMMRegister dst, Address src) {
+
+void Assembler::imull(Register dst, Register src, int value) {
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
+ if (is8bit(value)) {
+ emit_byte(0x6B);
+ emit_byte(0xC0 | encode);
+ emit_byte(value);
+ } else {
+ emit_byte(0x69);
+ emit_byte(0xC0 | encode);
+ emit_long(value);
+ }
+}
+
+void Assembler::incl(Address dst) {
+ // Don't use it directly. Use MacroAssembler::increment() instead.
InstructionMark im(this);
- emit_byte(0xF2);
- prefix(src, dst);
- emit_byte(0x0F);
- emit_byte(0x10);
- emit_operand(dst, src);
+ prefix(dst);
+ emit_byte(0xFF);
+ emit_operand(rax, dst);
}
-void Assembler::movsd(Address dst, XMMRegister src) {
+void Assembler::jcc(Condition cc, Label& L, relocInfo::relocType rtype) {
InstructionMark im(this);
- emit_byte(0xF2);
- prefix(dst, src);
+ relocate(rtype);
+ assert((0 <= cc) && (cc < 16), "illegal cc");
+ if (L.is_bound()) {
+ address dst = target(L);
+ assert(dst != NULL, "jcc most probably wrong");
+
+ const int short_size = 2;
+ const int long_size = 6;
+ intptr_t offs = (intptr_t)dst - (intptr_t)_code_pos;
+ if (rtype == relocInfo::none && is8bit(offs - short_size)) {
+ // 0111 tttn #8-bit disp
+ emit_byte(0x70 | cc);
+ emit_byte((offs - short_size) & 0xFF);
+ } else {
+ // 0000 1111 1000 tttn #32-bit disp
+ assert(is_simm32(offs - long_size),
+ "must be 32bit offset (call4)");
+ emit_byte(0x0F);
+ emit_byte(0x80 | cc);
+ emit_long(offs - long_size);
+ }
+ } else {
+ // Note: could eliminate cond. jumps to this jump if condition
+ // is the same however, seems to be rather unlikely case.
+ // Note: use jccb() if label to be bound is very close to get
+ // an 8-bit displacement
+ L.add_patch_at(code(), locator());
+ emit_byte(0x0F);
+ emit_byte(0x80 | cc);
+ emit_long(0);
+ }
+}
+
+void Assembler::jccb(Condition cc, Label& L) {
+ if (L.is_bound()) {
+ const int short_size = 2;
+ address entry = target(L);
+ assert(is8bit((intptr_t)entry - ((intptr_t)_code_pos + short_size)),
+ "Dispacement too large for a short jmp");
+ intptr_t offs = (intptr_t)entry - (intptr_t)_code_pos;
+ // 0111 tttn #8-bit disp
+ emit_byte(0x70 | cc);
+ emit_byte((offs - short_size) & 0xFF);
+ } else {
+ InstructionMark im(this);
+ L.add_patch_at(code(), locator());
+ emit_byte(0x70 | cc);
+ emit_byte(0);
+ }
+}
+
+void Assembler::jmp(Address adr) {
+ InstructionMark im(this);
+ prefix(adr);
+ emit_byte(0xFF);
+ emit_operand(rsp, adr);
+}
+
+void Assembler::jmp(Label& L, relocInfo::relocType rtype) {
+ if (L.is_bound()) {
+ address entry = target(L);
+ assert(entry != NULL, "jmp most probably wrong");
+ InstructionMark im(this);
+ const int short_size = 2;
+ const int long_size = 5;
+ intptr_t offs = entry - _code_pos;
+ if (rtype == relocInfo::none && is8bit(offs - short_size)) {
+ emit_byte(0xEB);
+ emit_byte((offs - short_size) & 0xFF);
+ } else {
+ emit_byte(0xE9);
+ emit_long(offs - long_size);
+ }
+ } else {
+ // By default, forward jumps are always 32-bit displacements, since
+ // we can't yet know where the label will be bound. If you're sure that
+ // the forward jump will not run beyond 256 bytes, use jmpb to
+ // force an 8-bit displacement.
+ InstructionMark im(this);
+ relocate(rtype);
+ L.add_patch_at(code(), locator());
+ emit_byte(0xE9);
+ emit_long(0);
+ }
+}
+
+void Assembler::jmp(Register entry) {
+ int encode = prefix_and_encode(entry->encoding());
+ emit_byte(0xFF);
+ emit_byte(0xE0 | encode);
+}
+
+void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
+ InstructionMark im(this);
+ emit_byte(0xE9);
+ assert(dest != NULL, "must have a target");
+ intptr_t disp = dest - (_code_pos + sizeof(int32_t));
+ assert(is_simm32(disp), "must be 32bit offset (jmp)");
+ emit_data(disp, rspec.reloc(), call32_operand);
+}
+
+void Assembler::jmpb(Label& L) {
+ if (L.is_bound()) {
+ const int short_size = 2;
+ address entry = target(L);
+ assert(is8bit((entry - _code_pos) + short_size),
+ "Dispacement too large for a short jmp");
+ assert(entry != NULL, "jmp most probably wrong");
+ intptr_t offs = entry - _code_pos;
+ emit_byte(0xEB);
+ emit_byte((offs - short_size) & 0xFF);
+ } else {
+ InstructionMark im(this);
+ L.add_patch_at(code(), locator());
+ emit_byte(0xEB);
+ emit_byte(0);
+ }
+}
+
+void Assembler::ldmxcsr( Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
+ InstructionMark im(this);
+ prefix(src);
emit_byte(0x0F);
- emit_byte(0x11);
- emit_operand(src, dst);
+ emit_byte(0xAE);
+ emit_operand(as_Register(2), src);
}
-// New cpus require to use movsd and movss to avoid partial register stall
-// when loading from memory. But for old Opteron use movlpd instead of movsd.
-// The selection is done in MacroAssembler::movdbl() and movflt().
-void Assembler::movlpd(XMMRegister dst, Address src) {
+void Assembler::leal(Register dst, Address src) {
InstructionMark im(this);
- emit_byte(0x66);
+#ifdef _LP64
+ emit_byte(0x67); // addr32
prefix(src, dst);
- emit_byte(0x0F);
- emit_byte(0x12);
+#endif // LP64
+ emit_byte(0x8D);
emit_operand(dst, src);
}
+void Assembler::lock() {
+ if (Atomics & 1) {
+ // Emit either nothing, a NOP, or a NOP: prefix
+ emit_byte(0x90) ;
+ } else {
+ emit_byte(0xF0);
+ }
+}
+
+// Serializes memory.
+void Assembler::mfence() {
+ // Memory barriers are only needed on multiprocessors
+ if (os::is_MP()) {
+ if( LP64_ONLY(true ||) VM_Version::supports_sse2() ) {
+ emit_byte( 0x0F ); // MFENCE; faster blows no regs
+ emit_byte( 0xAE );
+ emit_byte( 0xF0 );
+ } else {
+ // All usable chips support "locked" instructions which suffice
+ // as barriers, and are much faster than the alternative of
+ // using cpuid instruction. We use here a locked add [esp],0.
+ // This is conveniently otherwise a no-op except for blowing
+ // flags (which we save and restore.)
+ pushf(); // Save eflags register
+ lock();
+ addl(Address(rsp, 0), 0);// Assert the lock# signal here
+ popf(); // Restore eflags register
+ }
+ }
+}
+
+void Assembler::mov(Register dst, Register src) {
+ LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
+}
+
void Assembler::movapd(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
int dstenc = dst->encoding();
int srcenc = src->encoding();
emit_byte(0x66);
@@ -1511,6 +1479,7 @@ void Assembler::movapd(XMMRegister dst, XMMRegister src) {
}
void Assembler::movaps(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
int dstenc = dst->encoding();
int srcenc = src->encoding();
if (dstenc < 8) {
@@ -1532,7 +1501,34 @@ void Assembler::movaps(XMMRegister dst, XMMRegister src) {
emit_byte(0xC0 | dstenc << 3 | srcenc);
}
+void Assembler::movb(Register dst, Address src) {
+ NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
+ InstructionMark im(this);
+ prefix(src, dst, true);
+ emit_byte(0x8A);
+ emit_operand(dst, src);
+}
+
+
+void Assembler::movb(Address dst, int imm8) {
+ InstructionMark im(this);
+ prefix(dst);
+ emit_byte(0xC6);
+ emit_operand(rax, dst, 1);
+ emit_byte(imm8);
+}
+
+
+void Assembler::movb(Address dst, Register src) {
+ assert(src->has_byte_register(), "must have byte register");
+ InstructionMark im(this);
+ prefix(dst, src, true);
+ emit_byte(0x88);
+ emit_operand(src, dst);
+}
+
void Assembler::movdl(XMMRegister dst, Register src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
emit_byte(0x66);
int encode = prefix_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
@@ -1541,6 +1537,7 @@ void Assembler::movdl(XMMRegister dst, Register src) {
}
void Assembler::movdl(Register dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
emit_byte(0x66);
// swap src/dst to get correct prefix
int encode = prefix_and_encode(src->encoding(), dst->encoding());
@@ -1549,68 +1546,106 @@ void Assembler::movdl(Register dst, XMMRegister src) {
emit_byte(0xC0 | encode);
}
-void Assembler::movdq(XMMRegister dst, Register src) {
+void Assembler::movdqa(XMMRegister dst, Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ InstructionMark im(this);
+ emit_byte(0x66);
+ prefix(src, dst);
+ emit_byte(0x0F);
+ emit_byte(0x6F);
+ emit_operand(dst, src);
+}
+
+void Assembler::movdqa(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
emit_byte(0x66);
int encode = prefixq_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
- emit_byte(0x6E);
+ emit_byte(0x6F);
emit_byte(0xC0 | encode);
}
-void Assembler::movdq(Register dst, XMMRegister src) {
+void Assembler::movdqa(Address dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ InstructionMark im(this);
emit_byte(0x66);
- // swap src/dst to get correct prefix
- int encode = prefixq_and_encode(src->encoding(), dst->encoding());
+ prefix(dst, src);
emit_byte(0x0F);
- emit_byte(0x7E);
+ emit_byte(0x7F);
+ emit_operand(src, dst);
+}
+
+// Uses zero extension on 64bit
+
+void Assembler::movl(Register dst, int32_t imm32) {
+ int encode = prefix_and_encode(dst->encoding());
+ emit_byte(0xB8 | encode);
+ emit_long(imm32);
+}
+
+void Assembler::movl(Register dst, Register src) {
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
+ emit_byte(0x8B);
emit_byte(0xC0 | encode);
}
-void Assembler::pxor(XMMRegister dst, Address src) {
+void Assembler::movl(Register dst, Address src) {
InstructionMark im(this);
- emit_byte(0x66);
prefix(src, dst);
- emit_byte(0x0F);
- emit_byte(0xEF);
+ emit_byte(0x8B);
emit_operand(dst, src);
}
-void Assembler::pxor(XMMRegister dst, XMMRegister src) {
+void Assembler::movl(Address dst, int32_t imm32) {
InstructionMark im(this);
- emit_byte(0x66);
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
- emit_byte(0x0F);
- emit_byte(0xEF);
- emit_byte(0xC0 | encode);
+ prefix(dst);
+ emit_byte(0xC7);
+ emit_operand(rax, dst, 4);
+ emit_long(imm32);
}
-void Assembler::movdqa(XMMRegister dst, Address src) {
+void Assembler::movl(Address dst, Register src) {
+ InstructionMark im(this);
+ prefix(dst, src);
+ emit_byte(0x89);
+ emit_operand(src, dst);
+}
+
+// New cpus require to use movsd and movss to avoid partial register stall
+// when loading from memory. But for old Opteron use movlpd instead of movsd.
+// The selection is done in MacroAssembler::movdbl() and movflt().
+void Assembler::movlpd(XMMRegister dst, Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
emit_byte(0x66);
prefix(src, dst);
emit_byte(0x0F);
- emit_byte(0x6F);
+ emit_byte(0x12);
emit_operand(dst, src);
}
-void Assembler::movdqa(XMMRegister dst, XMMRegister src) {
- emit_byte(0x66);
- int encode = prefixq_and_encode(dst->encoding(), src->encoding());
+void Assembler::movq( MMXRegister dst, Address src ) {
+ assert( VM_Version::supports_mmx(), "" );
emit_byte(0x0F);
emit_byte(0x6F);
- emit_byte(0xC0 | encode);
+ emit_operand(dst, src);
}
-void Assembler::movdqa(Address dst, XMMRegister src) {
- InstructionMark im(this);
- emit_byte(0x66);
- prefix(dst, src);
+void Assembler::movq( Address dst, MMXRegister src ) {
+ assert( VM_Version::supports_mmx(), "" );
emit_byte(0x0F);
emit_byte(0x7F);
- emit_operand(src, dst);
+ // workaround gcc (3.2.1-7a) bug
+ // In that version of gcc with only an emit_operand(MMX, Address)
+ // gcc will tail jump and try and reverse the parameters completely
+ // obliterating dst in the process. By having a version available
+ // that doesn't need to swap the args at the tail jump the bug is
+ // avoided.
+ emit_operand(dst, src);
}
void Assembler::movq(XMMRegister dst, Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
emit_byte(0xF3);
prefix(src, dst);
@@ -1620,6 +1655,7 @@ void Assembler::movq(XMMRegister dst, Address src) {
}
void Assembler::movq(Address dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
emit_byte(0x66);
prefix(dst, src);
@@ -1628,547 +1664,680 @@ void Assembler::movq(Address dst, XMMRegister src) {
emit_operand(src, dst);
}
-void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
- assert(isByte(mode), "invalid value");
- emit_byte(0x66);
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
+void Assembler::movsbl(Register dst, Address src) { // movsxb
+ InstructionMark im(this);
+ prefix(src, dst);
emit_byte(0x0F);
- emit_byte(0x70);
- emit_byte(0xC0 | encode);
- emit_byte(mode & 0xFF);
+ emit_byte(0xBE);
+ emit_operand(dst, src);
}
-void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
- assert(isByte(mode), "invalid value");
- InstructionMark im(this);
- emit_byte(0x66);
+void Assembler::movsbl(Register dst, Register src) { // movsxb
+ NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
+ int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
emit_byte(0x0F);
- emit_byte(0x70);
- emit_operand(dst, src);
- emit_byte(mode & 0xFF);
+ emit_byte(0xBE);
+ emit_byte(0xC0 | encode);
}
-void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
- assert(isByte(mode), "invalid value");
+void Assembler::movsd(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
emit_byte(0xF2);
int encode = prefix_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
- emit_byte(0x70);
+ emit_byte(0x10);
emit_byte(0xC0 | encode);
- emit_byte(mode & 0xFF);
}
-void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
- assert(isByte(mode), "invalid value");
+void Assembler::movsd(XMMRegister dst, Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
emit_byte(0xF2);
+ prefix(src, dst);
emit_byte(0x0F);
- emit_byte(0x70);
+ emit_byte(0x10);
emit_operand(dst, src);
- emit_byte(mode & 0xFF);
}
-void Assembler::cmovl(Condition cc, Register dst, Register src) {
+void Assembler::movsd(Address dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ InstructionMark im(this);
+ emit_byte(0xF2);
+ prefix(dst, src);
+ emit_byte(0x0F);
+ emit_byte(0x11);
+ emit_operand(src, dst);
+}
+
+void Assembler::movss(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
+ emit_byte(0xF3);
int encode = prefix_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
- emit_byte(0x40 | cc);
+ emit_byte(0x10);
emit_byte(0xC0 | encode);
}
-void Assembler::cmovl(Condition cc, Register dst, Address src) {
+void Assembler::movss(XMMRegister dst, Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
InstructionMark im(this);
+ emit_byte(0xF3);
prefix(src, dst);
emit_byte(0x0F);
- emit_byte(0x40 | cc);
+ emit_byte(0x10);
emit_operand(dst, src);
}
-void Assembler::cmovq(Condition cc, Register dst, Register src) {
- int encode = prefixq_and_encode(dst->encoding(), src->encoding());
+void Assembler::movss(Address dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
+ InstructionMark im(this);
+ emit_byte(0xF3);
+ prefix(dst, src);
emit_byte(0x0F);
- emit_byte(0x40 | cc);
- emit_byte(0xC0 | encode);
+ emit_byte(0x11);
+ emit_operand(src, dst);
}
-void Assembler::cmovq(Condition cc, Register dst, Address src) {
+void Assembler::movswl(Register dst, Address src) { // movsxw
InstructionMark im(this);
- prefixq(src, dst);
+ prefix(src, dst);
emit_byte(0x0F);
- emit_byte(0x40 | cc);
+ emit_byte(0xBF);
emit_operand(dst, src);
}
-void Assembler::prefetch_prefix(Address src) {
- prefix(src);
+void Assembler::movswl(Register dst, Register src) { // movsxw
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
+ emit_byte(0xBF);
+ emit_byte(0xC0 | encode);
}
-void Assembler::prefetcht0(Address src) {
+void Assembler::movw(Address dst, int imm16) {
InstructionMark im(this);
- prefetch_prefix(src);
- emit_byte(0x18);
- emit_operand(rcx, src); // 1, src
-}
-void Assembler::prefetcht1(Address src) {
- InstructionMark im(this);
- prefetch_prefix(src);
- emit_byte(0x18);
- emit_operand(rdx, src); // 2, src
+ emit_byte(0x66); // switch to 16-bit mode
+ prefix(dst);
+ emit_byte(0xC7);
+ emit_operand(rax, dst, 2);
+ emit_word(imm16);
}
-void Assembler::prefetcht2(Address src) {
+void Assembler::movw(Register dst, Address src) {
InstructionMark im(this);
- prefetch_prefix(src);
- emit_byte(0x18);
- emit_operand(rbx, src); // 3, src
+ emit_byte(0x66);
+ prefix(src, dst);
+ emit_byte(0x8B);
+ emit_operand(dst, src);
}
-void Assembler::prefetchnta(Address src) {
+void Assembler::movw(Address dst, Register src) {
InstructionMark im(this);
- prefetch_prefix(src);
- emit_byte(0x18);
- emit_operand(rax, src); // 0, src
+ emit_byte(0x66);
+ prefix(dst, src);
+ emit_byte(0x89);
+ emit_operand(src, dst);
}
-void Assembler::prefetchw(Address src) {
+void Assembler::movzbl(Register dst, Address src) { // movzxb
InstructionMark im(this);
- prefetch_prefix(src);
- emit_byte(0x0D);
- emit_operand(rcx, src); // 1, src
+ prefix(src, dst);
+ emit_byte(0x0F);
+ emit_byte(0xB6);
+ emit_operand(dst, src);
}
-void Assembler::adcl(Register dst, int imm32) {
- prefix(dst);
- emit_arith(0x81, 0xD0, dst, imm32);
+void Assembler::movzbl(Register dst, Register src) { // movzxb
+ NOT_LP64(assert(src->has_byte_register(), "must have byte register"));
+ int encode = prefix_and_encode(dst->encoding(), src->encoding(), true);
+ emit_byte(0x0F);
+ emit_byte(0xB6);
+ emit_byte(0xC0 | encode);
}
-void Assembler::adcl(Register dst, Address src) {
+void Assembler::movzwl(Register dst, Address src) { // movzxw
InstructionMark im(this);
prefix(src, dst);
- emit_byte(0x13);
+ emit_byte(0x0F);
+ emit_byte(0xB7);
emit_operand(dst, src);
}
-void Assembler::adcl(Register dst, Register src) {
- (void) prefix_and_encode(dst->encoding(), src->encoding());
- emit_arith(0x13, 0xC0, dst, src);
-}
-
-void Assembler::adcq(Register dst, int imm32) {
- (void) prefixq_and_encode(dst->encoding());
- emit_arith(0x81, 0xD0, dst, imm32);
+void Assembler::movzwl(Register dst, Register src) { // movzxw
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
+ emit_byte(0x0F);
+ emit_byte(0xB7);
+ emit_byte(0xC0 | encode);
}
-void Assembler::adcq(Register dst, Address src) {
+void Assembler::mull(Address src) {
InstructionMark im(this);
- prefixq(src, dst);
- emit_byte(0x13);
- emit_operand(dst, src);
-}
-
-void Assembler::adcq(Register dst, Register src) {
- (int) prefixq_and_encode(dst->encoding(), src->encoding());
- emit_arith(0x13, 0xC0, dst, src);
+ prefix(src);
+ emit_byte(0xF7);
+ emit_operand(rsp, src);
}
-void Assembler::addl(Address dst, int imm32) {
- InstructionMark im(this);
- prefix(dst);
- emit_arith_operand(0x81, rax, dst,imm32);
+void Assembler::mull(Register src) {
+ int encode = prefix_and_encode(src->encoding());
+ emit_byte(0xF7);
+ emit_byte(0xE0 | encode);
}
-void Assembler::addl(Address dst, Register src) {
+void Assembler::mulsd(XMMRegister dst, Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
- prefix(dst, src);
- emit_byte(0x01);
- emit_operand(src, dst);
+ emit_byte(0xF2);
+ prefix(src, dst);
+ emit_byte(0x0F);
+ emit_byte(0x59);
+ emit_operand(dst, src);
}
-void Assembler::addl(Register dst, int imm32) {
- prefix(dst);
- emit_arith(0x81, 0xC0, dst, imm32);
+void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ emit_byte(0xF2);
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
+ emit_byte(0x0F);
+ emit_byte(0x59);
+ emit_byte(0xC0 | encode);
}
-void Assembler::addl(Register dst, Address src) {
+void Assembler::mulss(XMMRegister dst, Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
InstructionMark im(this);
+ emit_byte(0xF3);
prefix(src, dst);
- emit_byte(0x03);
+ emit_byte(0x0F);
+ emit_byte(0x59);
emit_operand(dst, src);
}
-void Assembler::addl(Register dst, Register src) {
- (void) prefix_and_encode(dst->encoding(), src->encoding());
- emit_arith(0x03, 0xC0, dst, src);
+void Assembler::mulss(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
+ emit_byte(0xF3);
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
+ emit_byte(0x0F);
+ emit_byte(0x59);
+ emit_byte(0xC0 | encode);
}
-void Assembler::addq(Address dst, int imm32) {
- InstructionMark im(this);
- prefixq(dst);
- emit_arith_operand(0x81, rax, dst,imm32);
+void Assembler::negl(Register dst) {
+ int encode = prefix_and_encode(dst->encoding());
+ emit_byte(0xF7);
+ emit_byte(0xD8 | encode);
}
-void Assembler::addq(Address dst, Register src) {
- InstructionMark im(this);
- prefixq(dst, src);
- emit_byte(0x01);
- emit_operand(src, dst);
-}
+void Assembler::nop(int i) {
+#ifdef ASSERT
+ assert(i > 0, " ");
+ // The fancy nops aren't currently recognized by debuggers making it a
+ // pain to disassemble code while debugging. If asserts are on clearly
+ // speed is not an issue so simply use the single byte traditional nop
+ // to do alignment.
-void Assembler::addq(Register dst, int imm32) {
- (void) prefixq_and_encode(dst->encoding());
- emit_arith(0x81, 0xC0, dst, imm32);
-}
+ for (; i > 0 ; i--) emit_byte(0x90);
+ return;
-void Assembler::addq(Register dst, Address src) {
- InstructionMark im(this);
- prefixq(src, dst);
- emit_byte(0x03);
- emit_operand(dst, src);
-}
+#endif // ASSERT
-void Assembler::addq(Register dst, Register src) {
- (void) prefixq_and_encode(dst->encoding(), src->encoding());
- emit_arith(0x03, 0xC0, dst, src);
-}
+ if (UseAddressNop && VM_Version::is_intel()) {
+ //
+ // Using multi-bytes nops "0x0F 0x1F [address]" for Intel
+ // 1: 0x90
+ // 2: 0x66 0x90
+ // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
+ // 4: 0x0F 0x1F 0x40 0x00
+ // 5: 0x0F 0x1F 0x44 0x00 0x00
+ // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
+ // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
+ // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+ // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+ // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+ // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
-void Assembler::andl(Register dst, int imm32) {
- prefix(dst);
- emit_arith(0x81, 0xE0, dst, imm32);
-}
+ // The rest coding is Intel specific - don't use consecutive address nops
-void Assembler::andl(Register dst, Address src) {
- InstructionMark im(this);
- prefix(src, dst);
- emit_byte(0x23);
- emit_operand(dst, src);
-}
+ // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
+ // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
+ // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
+ // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
-void Assembler::andl(Register dst, Register src) {
- (void) prefix_and_encode(dst->encoding(), src->encoding());
- emit_arith(0x23, 0xC0, dst, src);
-}
+ while(i >= 15) {
+ // For Intel don't generate consecutive addess nops (mix with regular nops)
+ i -= 15;
+ emit_byte(0x66); // size prefix
+ emit_byte(0x66); // size prefix
+ emit_byte(0x66); // size prefix
+ addr_nop_8();
+ emit_byte(0x66); // size prefix
+ emit_byte(0x66); // size prefix
+ emit_byte(0x66); // size prefix
+ emit_byte(0x90); // nop
+ }
+ switch (i) {
+ case 14:
+ emit_byte(0x66); // size prefix
+ case 13:
+ emit_byte(0x66); // size prefix
+ case 12:
+ addr_nop_8();
+ emit_byte(0x66); // size prefix
+ emit_byte(0x66); // size prefix
+ emit_byte(0x66); // size prefix
+ emit_byte(0x90); // nop
+ break;
+ case 11:
+ emit_byte(0x66); // size prefix
+ case 10:
+ emit_byte(0x66); // size prefix
+ case 9:
+ emit_byte(0x66); // size prefix
+ case 8:
+ addr_nop_8();
+ break;
+ case 7:
+ addr_nop_7();
+ break;
+ case 6:
+ emit_byte(0x66); // size prefix
+ case 5:
+ addr_nop_5();
+ break;
+ case 4:
+ addr_nop_4();
+ break;
+ case 3:
+ // Don't use "0x0F 0x1F 0x00" - need patching safe padding
+ emit_byte(0x66); // size prefix
+ case 2:
+ emit_byte(0x66); // size prefix
+ case 1:
+ emit_byte(0x90); // nop
+ break;
+ default:
+ assert(i == 0, " ");
+ }
+ return;
+ }
+ if (UseAddressNop && VM_Version::is_amd()) {
+ //
+ // Using multi-bytes nops "0x0F 0x1F [address]" for AMD.
+ // 1: 0x90
+ // 2: 0x66 0x90
+ // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
+ // 4: 0x0F 0x1F 0x40 0x00
+ // 5: 0x0F 0x1F 0x44 0x00 0x00
+ // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
+ // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
+ // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+ // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+ // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+ // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
-void Assembler::andq(Register dst, int imm32) {
- (void) prefixq_and_encode(dst->encoding());
- emit_arith(0x81, 0xE0, dst, imm32);
-}
+ // The rest coding is AMD specific - use consecutive address nops
-void Assembler::andq(Register dst, Address src) {
- InstructionMark im(this);
- prefixq(src, dst);
- emit_byte(0x23);
- emit_operand(dst, src);
-}
+ // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
+ // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
+ // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
+ // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
+ // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+ // Size prefixes (0x66) are added for larger sizes
-void Assembler::andq(Register dst, Register src) {
- (int) prefixq_and_encode(dst->encoding(), src->encoding());
- emit_arith(0x23, 0xC0, dst, src);
+ while(i >= 22) {
+ i -= 11;
+ emit_byte(0x66); // size prefix
+ emit_byte(0x66); // size prefix
+ emit_byte(0x66); // size prefix
+ addr_nop_8();
+ }
+ // Generate first nop for size between 21-12
+ switch (i) {
+ case 21:
+ i -= 1;
+ emit_byte(0x66); // size prefix
+ case 20:
+ case 19:
+ i -= 1;
+ emit_byte(0x66); // size prefix
+ case 18:
+ case 17:
+ i -= 1;
+ emit_byte(0x66); // size prefix
+ case 16:
+ case 15:
+ i -= 8;
+ addr_nop_8();
+ break;
+ case 14:
+ case 13:
+ i -= 7;
+ addr_nop_7();
+ break;
+ case 12:
+ i -= 6;
+ emit_byte(0x66); // size prefix
+ addr_nop_5();
+ break;
+ default:
+ assert(i < 12, " ");
+ }
+
+ // Generate second nop for size between 11-1
+ switch (i) {
+ case 11:
+ emit_byte(0x66); // size prefix
+ case 10:
+ emit_byte(0x66); // size prefix
+ case 9:
+ emit_byte(0x66); // size prefix
+ case 8:
+ addr_nop_8();
+ break;
+ case 7:
+ addr_nop_7();
+ break;
+ case 6:
+ emit_byte(0x66); // size prefix
+ case 5:
+ addr_nop_5();
+ break;
+ case 4:
+ addr_nop_4();
+ break;
+ case 3:
+ // Don't use "0x0F 0x1F 0x00" - need patching safe padding
+ emit_byte(0x66); // size prefix
+ case 2:
+ emit_byte(0x66); // size prefix
+ case 1:
+ emit_byte(0x90); // nop
+ break;
+ default:
+ assert(i == 0, " ");
+ }
+ return;
+ }
+
+ // Using nops with size prefixes "0x66 0x90".
+ // From AMD Optimization Guide:
+ // 1: 0x90
+ // 2: 0x66 0x90
+ // 3: 0x66 0x66 0x90
+ // 4: 0x66 0x66 0x66 0x90
+ // 5: 0x66 0x66 0x90 0x66 0x90
+ // 6: 0x66 0x66 0x90 0x66 0x66 0x90
+ // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
+ // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
+ // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
+ // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
+ //
+ while(i > 12) {
+ i -= 4;
+ emit_byte(0x66); // size prefix
+ emit_byte(0x66);
+ emit_byte(0x66);
+ emit_byte(0x90); // nop
+ }
+ // 1 - 12 nops
+ if(i > 8) {
+ if(i > 9) {
+ i -= 1;
+ emit_byte(0x66);
+ }
+ i -= 3;
+ emit_byte(0x66);
+ emit_byte(0x66);
+ emit_byte(0x90);
+ }
+ // 1 - 8 nops
+ if(i > 4) {
+ if(i > 6) {
+ i -= 1;
+ emit_byte(0x66);
+ }
+ i -= 3;
+ emit_byte(0x66);
+ emit_byte(0x66);
+ emit_byte(0x90);
+ }
+ switch (i) {
+ case 4:
+ emit_byte(0x66);
+ case 3:
+ emit_byte(0x66);
+ case 2:
+ emit_byte(0x66);
+ case 1:
+ emit_byte(0x90);
+ break;
+ default:
+ assert(i == 0, " ");
+ }
}
-void Assembler::cmpb(Address dst, int imm8) {
- InstructionMark im(this);
- prefix(dst);
- emit_byte(0x80);
- emit_operand(rdi, dst, 1);
- emit_byte(imm8);
+void Assembler::notl(Register dst) {
+ int encode = prefix_and_encode(dst->encoding());
+ emit_byte(0xF7);
+ emit_byte(0xD0 | encode );
}
-void Assembler::cmpl(Address dst, int imm32) {
+void Assembler::orl(Address dst, int32_t imm32) {
InstructionMark im(this);
prefix(dst);
emit_byte(0x81);
- emit_operand(rdi, dst, 4);
+ emit_operand(rcx, dst, 4);
emit_long(imm32);
}
-void Assembler::cmpl(Register dst, int imm32) {
+void Assembler::orl(Register dst, int32_t imm32) {
prefix(dst);
- emit_arith(0x81, 0xF8, dst, imm32);
+ emit_arith(0x81, 0xC8, dst, imm32);
}
-void Assembler::cmpl(Register dst, Register src) {
- (void) prefix_and_encode(dst->encoding(), src->encoding());
- emit_arith(0x3B, 0xC0, dst, src);
-}
-void Assembler::cmpl(Register dst, Address src) {
+void Assembler::orl(Register dst, Address src) {
InstructionMark im(this);
prefix(src, dst);
- emit_byte(0x3B);
+ emit_byte(0x0B);
emit_operand(dst, src);
}
-void Assembler::cmpq(Address dst, int imm32) {
- InstructionMark im(this);
- prefixq(dst);
- emit_byte(0x81);
- emit_operand(rdi, dst, 4);
- emit_long(imm32);
-}
-void Assembler::cmpq(Register dst, int imm32) {
- (void) prefixq_and_encode(dst->encoding());
- emit_arith(0x81, 0xF8, dst, imm32);
+void Assembler::orl(Register dst, Register src) {
+ (void) prefix_and_encode(dst->encoding(), src->encoding());
+ emit_arith(0x0B, 0xC0, dst, src);
}
-void Assembler::cmpq(Address dst, Register src) {
- prefixq(dst, src);
- emit_byte(0x3B);
- emit_operand(src, dst);
+// generic
+void Assembler::pop(Register dst) {
+ int encode = prefix_and_encode(dst->encoding());
+ emit_byte(0x58 | encode);
}
-void Assembler::cmpq(Register dst, Register src) {
- (void) prefixq_and_encode(dst->encoding(), src->encoding());
- emit_arith(0x3B, 0xC0, dst, src);
+void Assembler::popf() {
+ emit_byte(0x9D);
}
-void Assembler::cmpq(Register dst, Address src) {
+void Assembler::popl(Address dst) {
+ // NOTE: this will adjust stack by 8byte on 64bits
InstructionMark im(this);
- prefixq(src, dst);
- emit_byte(0x3B);
- emit_operand(dst, src);
+ prefix(dst);
+ emit_byte(0x8F);
+ emit_operand(rax, dst);
}
-void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
+void Assembler::prefetch_prefix(Address src) {
+ prefix(src);
emit_byte(0x0F);
- emit_byte(0x2E);
- emit_byte(0xC0 | encode);
-}
-
-void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
- emit_byte(0x66);
- ucomiss(dst, src);
}
-void Assembler::decl(Register dst) {
- // Don't use it directly. Use MacroAssembler::decrementl() instead.
- // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
- int encode = prefix_and_encode(dst->encoding());
- emit_byte(0xFF);
- emit_byte(0xC8 | encode);
-}
-
-void Assembler::decl(Address dst) {
- // Don't use it directly. Use MacroAssembler::decrementl() instead.
+void Assembler::prefetchnta(Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), "must support"));
InstructionMark im(this);
- prefix(dst);
- emit_byte(0xFF);
- emit_operand(rcx, dst);
+ prefetch_prefix(src);
+ emit_byte(0x18);
+ emit_operand(rax, src); // 0, src
}
-void Assembler::decq(Register dst) {
- // Don't use it directly. Use MacroAssembler::decrementq() instead.
- // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
- int encode = prefixq_and_encode(dst->encoding());
- emit_byte(0xFF);
- emit_byte(0xC8 | encode);
+void Assembler::prefetchr(Address src) {
+ NOT_LP64(assert(VM_Version::supports_3dnow(), "must support"));
+ InstructionMark im(this);
+ prefetch_prefix(src);
+ emit_byte(0x0D);
+ emit_operand(rax, src); // 0, src
}
-void Assembler::decq(Address dst) {
- // Don't use it directly. Use MacroAssembler::decrementq() instead.
+void Assembler::prefetcht0(Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
InstructionMark im(this);
- prefixq(dst);
- emit_byte(0xFF);
- emit_operand(rcx, dst);
+ prefetch_prefix(src);
+ emit_byte(0x18);
+ emit_operand(rcx, src); // 1, src
}
-void Assembler::idivl(Register src) {
- int encode = prefix_and_encode(src->encoding());
- emit_byte(0xF7);
- emit_byte(0xF8 | encode);
+void Assembler::prefetcht1(Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
+ InstructionMark im(this);
+ prefetch_prefix(src);
+ emit_byte(0x18);
+ emit_operand(rdx, src); // 2, src
}
-void Assembler::idivq(Register src) {
- int encode = prefixq_and_encode(src->encoding());
- emit_byte(0xF7);
- emit_byte(0xF8 | encode);
+void Assembler::prefetcht2(Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), "must support"));
+ InstructionMark im(this);
+ prefetch_prefix(src);
+ emit_byte(0x18);
+ emit_operand(rbx, src); // 3, src
}
-void Assembler::cdql() {
- emit_byte(0x99);
+void Assembler::prefetchw(Address src) {
+ NOT_LP64(assert(VM_Version::supports_3dnow(), "must support"));
+ InstructionMark im(this);
+ prefetch_prefix(src);
+ emit_byte(0x0D);
+ emit_operand(rcx, src); // 1, src
}
-void Assembler::cdqq() {
- prefix(REX_W);
- emit_byte(0x99);
+void Assembler::prefix(Prefix p) {
+ a_byte(p);
}
-void Assembler::imull(Register dst, Register src) {
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
- emit_byte(0x0F);
- emit_byte(0xAF);
- emit_byte(0xC0 | encode);
-}
+void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
+ assert(isByte(mode), "invalid value");
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-void Assembler::imull(Register dst, Register src, int value) {
+ emit_byte(0x66);
int encode = prefix_and_encode(dst->encoding(), src->encoding());
- if (is8bit(value)) {
- emit_byte(0x6B);
- emit_byte(0xC0 | encode);
- emit_byte(value);
- } else {
- emit_byte(0x69);
- emit_byte(0xC0 | encode);
- emit_long(value);
- }
-}
-
-void Assembler::imulq(Register dst, Register src) {
- int encode = prefixq_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
- emit_byte(0xAF);
+ emit_byte(0x70);
emit_byte(0xC0 | encode);
-}
+ emit_byte(mode & 0xFF);
-void Assembler::imulq(Register dst, Register src, int value) {
- int encode = prefixq_and_encode(dst->encoding(), src->encoding());
- if (is8bit(value)) {
- emit_byte(0x6B);
- emit_byte(0xC0 | encode);
- emit_byte(value);
- } else {
- emit_byte(0x69);
- emit_byte(0xC0 | encode);
- emit_long(value);
- }
}
-void Assembler::incl(Register dst) {
- // Don't use it directly. Use MacroAssembler::incrementl() instead.
- // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
- int encode = prefix_and_encode(dst->encoding());
- emit_byte(0xFF);
- emit_byte(0xC0 | encode);
-}
+void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
+ assert(isByte(mode), "invalid value");
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-void Assembler::incl(Address dst) {
- // Don't use it directly. Use MacroAssembler::incrementl() instead.
InstructionMark im(this);
- prefix(dst);
- emit_byte(0xFF);
- emit_operand(rax, dst);
+ emit_byte(0x66);
+ prefix(src, dst);
+ emit_byte(0x0F);
+ emit_byte(0x70);
+ emit_operand(dst, src);
+ emit_byte(mode & 0xFF);
}
-void Assembler::incq(Register dst) {
- // Don't use it directly. Use MacroAssembler::incrementq() instead.
- // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
- int encode = prefixq_and_encode(dst->encoding());
- emit_byte(0xFF);
+void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
+ assert(isByte(mode), "invalid value");
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+
+ emit_byte(0xF2);
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
+ emit_byte(0x0F);
+ emit_byte(0x70);
emit_byte(0xC0 | encode);
+ emit_byte(mode & 0xFF);
}
-void Assembler::incq(Address dst) {
- // Don't use it directly. Use MacroAssembler::incrementq() instead.
- InstructionMark im(this);
- prefixq(dst);
- emit_byte(0xFF);
- emit_operand(rax, dst);
-}
+void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
+ assert(isByte(mode), "invalid value");
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
-void Assembler::leal(Register dst, Address src) {
InstructionMark im(this);
- emit_byte(0x67); // addr32
- prefix(src, dst);
- emit_byte(0x8D);
+ emit_byte(0xF2);
+ prefix(src, dst); // QQ new
+ emit_byte(0x0F);
+ emit_byte(0x70);
emit_operand(dst, src);
+ emit_byte(mode & 0xFF);
}
-void Assembler::leaq(Register dst, Address src) {
- InstructionMark im(this);
- prefixq(src, dst);
- emit_byte(0x8D);
- emit_operand(dst, src);
-}
+void Assembler::psrlq(XMMRegister dst, int shift) {
+ // HMM Table D-1 says sse2 or mmx
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
-void Assembler::mull(Address src) {
- InstructionMark im(this);
- // was missing
- prefix(src);
- emit_byte(0xF7);
- emit_operand(rsp, src);
+ int encode = prefixq_and_encode(xmm2->encoding(), dst->encoding());
+ emit_byte(0x66);
+ emit_byte(0x0F);
+ emit_byte(0x73);
+ emit_byte(0xC0 | encode);
+ emit_byte(shift);
}
-void Assembler::mull(Register src) {
- // was missing
- int encode = prefix_and_encode(src->encoding());
- emit_byte(0xF7);
- emit_byte(0xE0 | encode);
+void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ emit_byte(0x66);
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
+ emit_byte(0x0F);
+ emit_byte(0x60);
+ emit_byte(0xC0 | encode);
}
-void Assembler::negl(Register dst) {
- int encode = prefix_and_encode(dst->encoding());
- emit_byte(0xF7);
- emit_byte(0xD8 | encode);
+void Assembler::push(int32_t imm32) {
+ // in 64bits we push 64bits onto the stack but only
+ // take a 32bit immediate
+ emit_byte(0x68);
+ emit_long(imm32);
}
-void Assembler::negq(Register dst) {
- int encode = prefixq_and_encode(dst->encoding());
- emit_byte(0xF7);
- emit_byte(0xD8 | encode);
-}
+void Assembler::push(Register src) {
+ int encode = prefix_and_encode(src->encoding());
-void Assembler::notl(Register dst) {
- int encode = prefix_and_encode(dst->encoding());
- emit_byte(0xF7);
- emit_byte(0xD0 | encode);
+ emit_byte(0x50 | encode);
}
-void Assembler::notq(Register dst) {
- int encode = prefixq_and_encode(dst->encoding());
- emit_byte(0xF7);
- emit_byte(0xD0 | encode);
+void Assembler::pushf() {
+ emit_byte(0x9C);
}
-void Assembler::orl(Address dst, int imm32) {
+void Assembler::pushl(Address src) {
+ // Note this will push 64bit on 64bit
InstructionMark im(this);
- prefix(dst);
- emit_byte(0x81);
- emit_operand(rcx, dst, 4);
- emit_long(imm32);
-}
-
-void Assembler::orl(Register dst, int imm32) {
- prefix(dst);
- emit_arith(0x81, 0xC8, dst, imm32);
+ prefix(src);
+ emit_byte(0xFF);
+ emit_operand(rsi, src);
}
-void Assembler::orl(Register dst, Address src) {
+void Assembler::pxor(XMMRegister dst, Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
+ emit_byte(0x66);
prefix(src, dst);
- emit_byte(0x0B);
+ emit_byte(0x0F);
+ emit_byte(0xEF);
emit_operand(dst, src);
}
-void Assembler::orl(Register dst, Register src) {
- (void) prefix_and_encode(dst->encoding(), src->encoding());
- emit_arith(0x0B, 0xC0, dst, src);
-}
-
-void Assembler::orq(Address dst, int imm32) {
- InstructionMark im(this);
- prefixq(dst);
- emit_byte(0x81);
- emit_operand(rcx, dst, 4);
- emit_long(imm32);
-}
-
-void Assembler::orq(Register dst, int imm32) {
- (void) prefixq_and_encode(dst->encoding());
- emit_arith(0x81, 0xC8, dst, imm32);
-}
-
-void Assembler::orq(Register dst, Address src) {
+void Assembler::pxor(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
- prefixq(src, dst);
- emit_byte(0x0B);
- emit_operand(dst, src);
-}
-
-void Assembler::orq(Register dst, Register src) {
- (void) prefixq_and_encode(dst->encoding(), src->encoding());
- emit_arith(0x0B, 0xC0, dst, src);
+ emit_byte(0x66);
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
+ emit_byte(0x0F);
+ emit_byte(0xEF);
+ emit_byte(0xC0 | encode);
}
void Assembler::rcll(Register dst, int imm8) {
@@ -2184,19 +2353,60 @@ void Assembler::rcll(Register dst, int imm8) {
}
}
-void Assembler::rclq(Register dst, int imm8) {
- assert(isShiftCount(imm8 >> 1), "illegal shift count");
- int encode = prefixq_and_encode(dst->encoding());
- if (imm8 == 1) {
- emit_byte(0xD1);
- emit_byte(0xD0 | encode);
+// copies data from [esi] to [edi] using rcx pointer sized words
+// generic
+void Assembler::rep_mov() {
+ emit_byte(0xF3);
+ // MOVSQ
+ LP64_ONLY(prefix(REX_W));
+ emit_byte(0xA5);
+}
+
+// sets rcx pointer sized words with rax, value at [edi]
+// generic
+void Assembler::rep_set() { // rep_set
+ emit_byte(0xF3);
+ // STOSQ
+ LP64_ONLY(prefix(REX_W));
+ emit_byte(0xAB);
+}
+
+// scans rcx pointer sized words at [edi] for occurance of rax,
+// generic
+void Assembler::repne_scan() { // repne_scan
+ emit_byte(0xF2);
+ // SCASQ
+ LP64_ONLY(prefix(REX_W));
+ emit_byte(0xAF);
+}
+
+#ifdef _LP64
+// scans rcx 4 byte words at [edi] for occurance of rax,
+// generic
+void Assembler::repne_scanl() { // repne_scan
+ emit_byte(0xF2);
+ // SCASL
+ emit_byte(0xAF);
+}
+#endif
+
+void Assembler::ret(int imm16) {
+ if (imm16 == 0) {
+ emit_byte(0xC3);
} else {
- emit_byte(0xC1);
- emit_byte(0xD0 | encode);
- emit_byte(imm8);
+ emit_byte(0xC2);
+ emit_word(imm16);
}
}
+void Assembler::sahf() {
+#ifdef _LP64
+ // Not supported in 64bit mode
+ ShouldNotReachHere();
+#endif
+ emit_byte(0x9E);
+}
+
void Assembler::sarl(Register dst, int imm8) {
int encode = prefix_and_encode(dst->encoding());
assert(isShiftCount(imm8), "illegal shift count");
@@ -2216,36 +2426,18 @@ void Assembler::sarl(Register dst) {
emit_byte(0xF8 | encode);
}
-void Assembler::sarq(Register dst, int imm8) {
- assert(isShiftCount(imm8 >> 1), "illegal shift count");
- int encode = prefixq_and_encode(dst->encoding());
- if (imm8 == 1) {
- emit_byte(0xD1);
- emit_byte(0xF8 | encode);
- } else {
- emit_byte(0xC1);
- emit_byte(0xF8 | encode);
- emit_byte(imm8);
- }
-}
-
-void Assembler::sarq(Register dst) {
- int encode = prefixq_and_encode(dst->encoding());
- emit_byte(0xD3);
- emit_byte(0xF8 | encode);
-}
-
-void Assembler::sbbl(Address dst, int imm32) {
+void Assembler::sbbl(Address dst, int32_t imm32) {
InstructionMark im(this);
prefix(dst);
emit_arith_operand(0x81, rbx, dst, imm32);
}
-void Assembler::sbbl(Register dst, int imm32) {
+void Assembler::sbbl(Register dst, int32_t imm32) {
prefix(dst);
emit_arith(0x81, 0xD8, dst, imm32);
}
+
void Assembler::sbbl(Register dst, Address src) {
InstructionMark im(this);
prefix(src, dst);
@@ -2258,27 +2450,12 @@ void Assembler::sbbl(Register dst, Register src) {
emit_arith(0x1B, 0xC0, dst, src);
}
-void Assembler::sbbq(Address dst, int imm32) {
- InstructionMark im(this);
- prefixq(dst);
- emit_arith_operand(0x81, rbx, dst, imm32);
-}
-
-void Assembler::sbbq(Register dst, int imm32) {
- (void) prefixq_and_encode(dst->encoding());
- emit_arith(0x81, 0xD8, dst, imm32);
-}
-
-void Assembler::sbbq(Register dst, Address src) {
- InstructionMark im(this);
- prefixq(src, dst);
- emit_byte(0x1B);
- emit_operand(dst, src);
-}
-
-void Assembler::sbbq(Register dst, Register src) {
- (void) prefixq_and_encode(dst->encoding(), src->encoding());
- emit_arith(0x1B, 0xC0, dst, src);
+void Assembler::setb(Condition cc, Register dst) {
+ assert(0 <= cc && cc < 16, "illegal cc");
+ int encode = prefix_and_encode(dst->encoding(), true);
+ emit_byte(0x0F);
+ emit_byte(0x90 | cc);
+ emit_byte(0xC0 | encode);
}
void Assembler::shll(Register dst, int imm8) {
@@ -2300,25 +2477,6 @@ void Assembler::shll(Register dst) {
emit_byte(0xE0 | encode);
}
-void Assembler::shlq(Register dst, int imm8) {
- assert(isShiftCount(imm8 >> 1), "illegal shift count");
- int encode = prefixq_and_encode(dst->encoding());
- if (imm8 == 1) {
- emit_byte(0xD1);
- emit_byte(0xE0 | encode);
- } else {
- emit_byte(0xC1);
- emit_byte(0xE0 | encode);
- emit_byte(imm8);
- }
-}
-
-void Assembler::shlq(Register dst) {
- int encode = prefixq_and_encode(dst->encoding());
- emit_byte(0xD3);
- emit_byte(0xE0 | encode);
-}
-
void Assembler::shrl(Register dst, int imm8) {
assert(isShiftCount(imm8), "illegal shift count");
int encode = prefix_and_encode(dst->encoding());
@@ -2333,21 +2491,32 @@ void Assembler::shrl(Register dst) {
emit_byte(0xE8 | encode);
}
-void Assembler::shrq(Register dst, int imm8) {
- assert(isShiftCount(imm8 >> 1), "illegal shift count");
- int encode = prefixq_and_encode(dst->encoding());
- emit_byte(0xC1);
- emit_byte(0xE8 | encode);
- emit_byte(imm8);
+// copies a single word from [esi] to [edi]
+void Assembler::smovl() {
+ emit_byte(0xA5);
}
-void Assembler::shrq(Register dst) {
- int encode = prefixq_and_encode(dst->encoding());
- emit_byte(0xD3);
- emit_byte(0xE8 | encode);
+void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
+ // HMM Table D-1 says sse2
+ // NOT_LP64(assert(VM_Version::supports_sse(), ""));
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ emit_byte(0xF2);
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
+ emit_byte(0x0F);
+ emit_byte(0x51);
+ emit_byte(0xC0 | encode);
}
-void Assembler::subl(Address dst, int imm32) {
+void Assembler::stmxcsr( Address dst) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
+ InstructionMark im(this);
+ prefix(dst);
+ emit_byte(0x0F);
+ emit_byte(0xAE);
+ emit_operand(as_Register(3), dst);
+}
+
+void Assembler::subl(Address dst, int32_t imm32) {
InstructionMark im(this);
prefix(dst);
if (is8bit(imm32)) {
@@ -2361,7 +2530,7 @@ void Assembler::subl(Address dst, int imm32) {
}
}
-void Assembler::subl(Register dst, int imm32) {
+void Assembler::subl(Register dst, int32_t imm32) {
prefix(dst);
emit_arith(0x81, 0xE8, dst, imm32);
}
@@ -2385,50 +2554,51 @@ void Assembler::subl(Register dst, Register src) {
emit_arith(0x2B, 0xC0, dst, src);
}
-void Assembler::subq(Address dst, int imm32) {
- InstructionMark im(this);
- prefixq(dst);
- if (is8bit(imm32)) {
- emit_byte(0x83);
- emit_operand(rbp, dst, 1);
- emit_byte(imm32 & 0xFF);
- } else {
- emit_byte(0x81);
- emit_operand(rbp, dst, 4);
- emit_long(imm32);
- }
+void Assembler::subsd(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ emit_byte(0xF2);
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
+ emit_byte(0x0F);
+ emit_byte(0x5C);
+ emit_byte(0xC0 | encode);
}
-void Assembler::subq(Register dst, int imm32) {
- (void) prefixq_and_encode(dst->encoding());
- emit_arith(0x81, 0xE8, dst, imm32);
+void Assembler::subsd(XMMRegister dst, Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ InstructionMark im(this);
+ emit_byte(0xF2);
+ prefix(src, dst);
+ emit_byte(0x0F);
+ emit_byte(0x5C);
+ emit_operand(dst, src);
}
-void Assembler::subq(Address dst, Register src) {
- InstructionMark im(this);
- prefixq(dst, src);
- emit_byte(0x29);
- emit_operand(src, dst);
+void Assembler::subss(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
+ emit_byte(0xF3);
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
+ emit_byte(0x0F);
+ emit_byte(0x5C);
+ emit_byte(0xC0 | encode);
}
-void Assembler::subq(Register dst, Address src) {
+void Assembler::subss(XMMRegister dst, Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
InstructionMark im(this);
- prefixq(src, dst);
- emit_byte(0x2B);
+ emit_byte(0xF3);
+ prefix(src, dst);
+ emit_byte(0x0F);
+ emit_byte(0x5C);
emit_operand(dst, src);
}
-void Assembler::subq(Register dst, Register src) {
- (void) prefixq_and_encode(dst->encoding(), src->encoding());
- emit_arith(0x2B, 0xC0, dst, src);
-}
-
void Assembler::testb(Register dst, int imm8) {
+ NOT_LP64(assert(dst->has_byte_register(), "must have byte register"));
(void) prefix_and_encode(dst->encoding(), true);
emit_arith_b(0xF6, 0xC0, dst, imm8);
}
-void Assembler::testl(Register dst, int imm32) {
+void Assembler::testl(Register dst, int32_t imm32) {
// not using emit_arith because test
// doesn't support sign-extension of
// 8bit operands
@@ -2448,27 +2618,44 @@ void Assembler::testl(Register dst, Register src) {
emit_arith(0x85, 0xC0, dst, src);
}
-void Assembler::testq(Register dst, int imm32) {
- // not using emit_arith because test
- // doesn't support sign-extension of
- // 8bit operands
- int encode = dst->encoding();
- if (encode == 0) {
- prefix(REX_W);
- emit_byte(0xA9);
- } else {
- encode = prefixq_and_encode(encode);
- emit_byte(0xF7);
- emit_byte(0xC0 | encode);
- }
- emit_long(imm32);
+void Assembler::testl(Register dst, Address src) {
+ InstructionMark im(this);
+ prefix(src, dst);
+ emit_byte(0x85);
+ emit_operand(dst, src);
}
-void Assembler::testq(Register dst, Register src) {
- (void) prefixq_and_encode(dst->encoding(), src->encoding());
- emit_arith(0x85, 0xC0, dst, src);
+void Assembler::ucomisd(XMMRegister dst, Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ emit_byte(0x66);
+ ucomiss(dst, src);
+}
+
+void Assembler::ucomisd(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ emit_byte(0x66);
+ ucomiss(dst, src);
+}
+
+void Assembler::ucomiss(XMMRegister dst, Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
+
+ InstructionMark im(this);
+ prefix(src, dst);
+ emit_byte(0x0F);
+ emit_byte(0x2E);
+ emit_operand(dst, src);
}
+void Assembler::ucomiss(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
+ emit_byte(0x0F);
+ emit_byte(0x2E);
+ emit_byte(0xC0 | encode);
+}
+
+
void Assembler::xaddl(Address dst, Register src) {
InstructionMark im(this);
prefix(dst, src);
@@ -2477,849 +2664,1022 @@ void Assembler::xaddl(Address dst, Register src) {
emit_operand(src, dst);
}
-void Assembler::xaddq(Address dst, Register src) {
+void Assembler::xchgl(Register dst, Address src) { // xchg
InstructionMark im(this);
- prefixq(dst, src);
- emit_byte(0x0F);
- emit_byte(0xC1);
- emit_operand(src, dst);
+ prefix(src, dst);
+ emit_byte(0x87);
+ emit_operand(dst, src);
+}
+
+void Assembler::xchgl(Register dst, Register src) {
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
+ emit_byte(0x87);
+ emit_byte(0xc0 | encode);
}
-void Assembler::xorl(Register dst, int imm32) {
+void Assembler::xorl(Register dst, int32_t imm32) {
prefix(dst);
emit_arith(0x81, 0xF0, dst, imm32);
}
+void Assembler::xorl(Register dst, Address src) {
+ InstructionMark im(this);
+ prefix(src, dst);
+ emit_byte(0x33);
+ emit_operand(dst, src);
+}
+
void Assembler::xorl(Register dst, Register src) {
(void) prefix_and_encode(dst->encoding(), src->encoding());
emit_arith(0x33, 0xC0, dst, src);
}
-void Assembler::xorl(Register dst, Address src) {
+void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ emit_byte(0x66);
+ xorps(dst, src);
+}
+
+void Assembler::xorpd(XMMRegister dst, Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
InstructionMark im(this);
+ emit_byte(0x66);
prefix(src, dst);
- emit_byte(0x33);
+ emit_byte(0x0F);
+ emit_byte(0x57);
emit_operand(dst, src);
}
-void Assembler::xorq(Register dst, int imm32) {
- (void) prefixq_and_encode(dst->encoding());
- emit_arith(0x81, 0xF0, dst, imm32);
-}
-void Assembler::xorq(Register dst, Register src) {
- (void) prefixq_and_encode(dst->encoding(), src->encoding());
- emit_arith(0x33, 0xC0, dst, src);
+void Assembler::xorps(XMMRegister dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
+ int encode = prefix_and_encode(dst->encoding(), src->encoding());
+ emit_byte(0x0F);
+ emit_byte(0x57);
+ emit_byte(0xC0 | encode);
}
-void Assembler::xorq(Register dst, Address src) {
+void Assembler::xorps(XMMRegister dst, Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
InstructionMark im(this);
- prefixq(src, dst);
- emit_byte(0x33);
+ prefix(src, dst);
+ emit_byte(0x0F);
+ emit_byte(0x57);
emit_operand(dst, src);
}
-void Assembler::bswapl(Register reg) {
- int encode = prefix_and_encode(reg->encoding());
- emit_byte(0x0F);
- emit_byte(0xC8 | encode);
+#ifndef _LP64
+// 32bit only pieces of the assembler
+
+void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) {
+ // NO PREFIX AS NEVER 64BIT
+ InstructionMark im(this);
+ emit_byte(0x81);
+ emit_byte(0xF8 | src1->encoding());
+ emit_data(imm32, rspec, 0);
}
-void Assembler::bswapq(Register reg) {
- int encode = prefixq_and_encode(reg->encoding());
+void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) {
+ // NO PREFIX AS NEVER 64BIT (not even 32bit versions of 64bit regs
+ InstructionMark im(this);
+ emit_byte(0x81);
+ emit_operand(rdi, src1);
+ emit_data(imm32, rspec, 0);
+}
+
+// The 64-bit (32bit platform) cmpxchg compares the value at adr with the contents of rdx:rax,
+// and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded
+// into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise.
+void Assembler::cmpxchg8(Address adr) {
+ InstructionMark im(this);
emit_byte(0x0F);
- emit_byte(0xC8 | encode);
+ emit_byte(0xc7);
+ emit_operand(rcx, adr);
}
-void Assembler::lock() {
- emit_byte(0xF0);
+void Assembler::decl(Register dst) {
+ // Don't use it directly. Use MacroAssembler::decrementl() instead.
+ emit_byte(0x48 | dst->encoding());
}
-void Assembler::xchgl(Register dst, Address src) {
+#endif // _LP64
+
+// 64bit typically doesn't use the x87 but needs to for the trig funcs
+
+void Assembler::fabs() {
+ emit_byte(0xD9);
+ emit_byte(0xE1);
+}
+
+void Assembler::fadd(int i) {
+ emit_farith(0xD8, 0xC0, i);
+}
+
+void Assembler::fadd_d(Address src) {
InstructionMark im(this);
- prefix(src, dst);
- emit_byte(0x87);
- emit_operand(dst, src);
+ emit_byte(0xDC);
+ emit_operand32(rax, src);
}
-void Assembler::xchgl(Register dst, Register src) {
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
- emit_byte(0x87);
- emit_byte(0xc0 | encode);
+void Assembler::fadd_s(Address src) {
+ InstructionMark im(this);
+ emit_byte(0xD8);
+ emit_operand32(rax, src);
}
-void Assembler::xchgq(Register dst, Address src) {
+void Assembler::fadda(int i) {
+ emit_farith(0xDC, 0xC0, i);
+}
+
+void Assembler::faddp(int i) {
+ emit_farith(0xDE, 0xC0, i);
+}
+
+void Assembler::fchs() {
+ emit_byte(0xD9);
+ emit_byte(0xE0);
+}
+
+void Assembler::fcom(int i) {
+ emit_farith(0xD8, 0xD0, i);
+}
+
+void Assembler::fcomp(int i) {
+ emit_farith(0xD8, 0xD8, i);
+}
+
+void Assembler::fcomp_d(Address src) {
InstructionMark im(this);
- prefixq(src, dst);
- emit_byte(0x87);
- emit_operand(dst, src);
+ emit_byte(0xDC);
+ emit_operand32(rbx, src);
}
-void Assembler::xchgq(Register dst, Register src) {
- int encode = prefixq_and_encode(dst->encoding(), src->encoding());
- emit_byte(0x87);
- emit_byte(0xc0 | encode);
+void Assembler::fcomp_s(Address src) {
+ InstructionMark im(this);
+ emit_byte(0xD8);
+ emit_operand32(rbx, src);
+}
+
+void Assembler::fcompp() {
+ emit_byte(0xDE);
+ emit_byte(0xD9);
+}
+
+void Assembler::fcos() {
+ emit_byte(0xD9);
+ emit_byte(0xFF);
+}
+
+void Assembler::fdecstp() {
+ emit_byte(0xD9);
+ emit_byte(0xF6);
+}
+
+void Assembler::fdiv(int i) {
+ emit_farith(0xD8, 0xF0, i);
}
-void Assembler::cmpxchgl(Register reg, Address adr) {
+void Assembler::fdiv_d(Address src) {
InstructionMark im(this);
- prefix(adr, reg);
- emit_byte(0x0F);
- emit_byte(0xB1);
- emit_operand(reg, adr);
+ emit_byte(0xDC);
+ emit_operand32(rsi, src);
}
-void Assembler::cmpxchgq(Register reg, Address adr) {
+void Assembler::fdiv_s(Address src) {
InstructionMark im(this);
- prefixq(adr, reg);
- emit_byte(0x0F);
- emit_byte(0xB1);
- emit_operand(reg, adr);
+ emit_byte(0xD8);
+ emit_operand32(rsi, src);
}
-void Assembler::hlt() {
- emit_byte(0xF4);
+void Assembler::fdiva(int i) {
+ emit_farith(0xDC, 0xF8, i);
}
+// Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994)
+// is erroneous for some of the floating-point instructions below.
-void Assembler::addr_nop_4() {
- // 4 bytes: NOP DWORD PTR [EAX+0]
- emit_byte(0x0F);
- emit_byte(0x1F);
- emit_byte(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
- emit_byte(0); // 8-bits offset (1 byte)
+void Assembler::fdivp(int i) {
+ emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong)
}
-void Assembler::addr_nop_5() {
- // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
- emit_byte(0x0F);
- emit_byte(0x1F);
- emit_byte(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
- emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
- emit_byte(0); // 8-bits offset (1 byte)
+void Assembler::fdivr(int i) {
+ emit_farith(0xD8, 0xF8, i);
}
-void Assembler::addr_nop_7() {
- // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
- emit_byte(0x0F);
- emit_byte(0x1F);
- emit_byte(0x80); // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
- emit_long(0); // 32-bits offset (4 bytes)
+void Assembler::fdivr_d(Address src) {
+ InstructionMark im(this);
+ emit_byte(0xDC);
+ emit_operand32(rdi, src);
}
-void Assembler::addr_nop_8() {
- // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
- emit_byte(0x0F);
- emit_byte(0x1F);
- emit_byte(0x84); // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
- emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
- emit_long(0); // 32-bits offset (4 bytes)
+void Assembler::fdivr_s(Address src) {
+ InstructionMark im(this);
+ emit_byte(0xD8);
+ emit_operand32(rdi, src);
}
-void Assembler::nop(int i) {
- assert(i > 0, " ");
- if (UseAddressNop && VM_Version::is_intel()) {
- //
- // Using multi-bytes nops "0x0F 0x1F [address]" for Intel
- // 1: 0x90
- // 2: 0x66 0x90
- // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
- // 4: 0x0F 0x1F 0x40 0x00
- // 5: 0x0F 0x1F 0x44 0x00 0x00
- // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
- // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
- // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
- // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
- // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
- // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+void Assembler::fdivra(int i) {
+ emit_farith(0xDC, 0xF0, i);
+}
- // The rest coding is Intel specific - don't use consecutive address nops
+void Assembler::fdivrp(int i) {
+ emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong)
+}
- // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
- // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
- // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
- // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
+void Assembler::ffree(int i) {
+ emit_farith(0xDD, 0xC0, i);
+}
- while(i >= 15) {
- // For Intel don't generate consecutive addess nops (mix with regular nops)
- i -= 15;
- emit_byte(0x66); // size prefix
- emit_byte(0x66); // size prefix
- emit_byte(0x66); // size prefix
- addr_nop_8();
- emit_byte(0x66); // size prefix
- emit_byte(0x66); // size prefix
- emit_byte(0x66); // size prefix
- emit_byte(0x90); // nop
- }
- switch (i) {
- case 14:
- emit_byte(0x66); // size prefix
- case 13:
- emit_byte(0x66); // size prefix
- case 12:
- addr_nop_8();
- emit_byte(0x66); // size prefix
- emit_byte(0x66); // size prefix
- emit_byte(0x66); // size prefix
- emit_byte(0x90); // nop
- break;
- case 11:
- emit_byte(0x66); // size prefix
- case 10:
- emit_byte(0x66); // size prefix
- case 9:
- emit_byte(0x66); // size prefix
- case 8:
- addr_nop_8();
- break;
- case 7:
- addr_nop_7();
- break;
- case 6:
- emit_byte(0x66); // size prefix
- case 5:
- addr_nop_5();
- break;
- case 4:
- addr_nop_4();
- break;
- case 3:
- // Don't use "0x0F 0x1F 0x00" - need patching safe padding
- emit_byte(0x66); // size prefix
- case 2:
- emit_byte(0x66); // size prefix
- case 1:
- emit_byte(0x90); // nop
- break;
- default:
- assert(i == 0, " ");
- }
- return;
- }
- if (UseAddressNop && VM_Version::is_amd()) {
- //
- // Using multi-bytes nops "0x0F 0x1F [address]" for AMD.
- // 1: 0x90
- // 2: 0x66 0x90
- // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
- // 4: 0x0F 0x1F 0x40 0x00
- // 5: 0x0F 0x1F 0x44 0x00 0x00
- // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
- // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
- // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
- // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
- // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
- // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
+void Assembler::fild_d(Address adr) {
+ InstructionMark im(this);
+ emit_byte(0xDF);
+ emit_operand32(rbp, adr);
+}
- // The rest coding is AMD specific - use consecutive address nops
+void Assembler::fild_s(Address adr) {
+ InstructionMark im(this);
+ emit_byte(0xDB);
+ emit_operand32(rax, adr);
+}
- // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
- // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
- // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
- // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
- // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
- // Size prefixes (0x66) are added for larger sizes
+void Assembler::fincstp() {
+ emit_byte(0xD9);
+ emit_byte(0xF7);
+}
- while(i >= 22) {
- i -= 11;
- emit_byte(0x66); // size prefix
- emit_byte(0x66); // size prefix
- emit_byte(0x66); // size prefix
- addr_nop_8();
- }
- // Generate first nop for size between 21-12
- switch (i) {
- case 21:
- i -= 1;
- emit_byte(0x66); // size prefix
- case 20:
- case 19:
- i -= 1;
- emit_byte(0x66); // size prefix
- case 18:
- case 17:
- i -= 1;
- emit_byte(0x66); // size prefix
- case 16:
- case 15:
- i -= 8;
- addr_nop_8();
- break;
- case 14:
- case 13:
- i -= 7;
- addr_nop_7();
- break;
- case 12:
- i -= 6;
- emit_byte(0x66); // size prefix
- addr_nop_5();
- break;
- default:
- assert(i < 12, " ");
- }
+void Assembler::finit() {
+ emit_byte(0x9B);
+ emit_byte(0xDB);
+ emit_byte(0xE3);
+}
- // Generate second nop for size between 11-1
- switch (i) {
- case 11:
- emit_byte(0x66); // size prefix
- case 10:
- emit_byte(0x66); // size prefix
- case 9:
- emit_byte(0x66); // size prefix
- case 8:
- addr_nop_8();
- break;
- case 7:
- addr_nop_7();
- break;
- case 6:
- emit_byte(0x66); // size prefix
- case 5:
- addr_nop_5();
- break;
- case 4:
- addr_nop_4();
- break;
- case 3:
- // Don't use "0x0F 0x1F 0x00" - need patching safe padding
- emit_byte(0x66); // size prefix
- case 2:
- emit_byte(0x66); // size prefix
- case 1:
- emit_byte(0x90); // nop
- break;
- default:
- assert(i == 0, " ");
- }
- return;
- }
+void Assembler::fist_s(Address adr) {
+ InstructionMark im(this);
+ emit_byte(0xDB);
+ emit_operand32(rdx, adr);
+}
- // Using nops with size prefixes "0x66 0x90".
- // From AMD Optimization Guide:
- // 1: 0x90
- // 2: 0x66 0x90
- // 3: 0x66 0x66 0x90
- // 4: 0x66 0x66 0x66 0x90
- // 5: 0x66 0x66 0x90 0x66 0x90
- // 6: 0x66 0x66 0x90 0x66 0x66 0x90
- // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
- // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
- // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
- // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
- //
- while(i > 12) {
- i -= 4;
- emit_byte(0x66); // size prefix
- emit_byte(0x66);
- emit_byte(0x66);
- emit_byte(0x90); // nop
- }
- // 1 - 12 nops
- if(i > 8) {
- if(i > 9) {
- i -= 1;
- emit_byte(0x66);
- }
- i -= 3;
- emit_byte(0x66);
- emit_byte(0x66);
- emit_byte(0x90);
- }
- // 1 - 8 nops
- if(i > 4) {
- if(i > 6) {
- i -= 1;
- emit_byte(0x66);
- }
- i -= 3;
- emit_byte(0x66);
- emit_byte(0x66);
- emit_byte(0x90);
- }
- switch (i) {
- case 4:
- emit_byte(0x66);
- case 3:
- emit_byte(0x66);
- case 2:
- emit_byte(0x66);
- case 1:
- emit_byte(0x90);
- break;
- default:
- assert(i == 0, " ");
- }
+void Assembler::fistp_d(Address adr) {
+ InstructionMark im(this);
+ emit_byte(0xDF);
+ emit_operand32(rdi, adr);
}
-void Assembler::ret(int imm16) {
- if (imm16 == 0) {
- emit_byte(0xC3);
- } else {
- emit_byte(0xC2);
- emit_word(imm16);
- }
+void Assembler::fistp_s(Address adr) {
+ InstructionMark im(this);
+ emit_byte(0xDB);
+ emit_operand32(rbx, adr);
}
-// copies a single word from [esi] to [edi]
-void Assembler::smovl() {
- emit_byte(0xA5);
+void Assembler::fld1() {
+ emit_byte(0xD9);
+ emit_byte(0xE8);
}
-// copies data from [rsi] to [rdi] using rcx words (m32)
-void Assembler::rep_movl() {
- // REP
- emit_byte(0xF3);
- // MOVSL
- emit_byte(0xA5);
+void Assembler::fld_d(Address adr) {
+ InstructionMark im(this);
+ emit_byte(0xDD);
+ emit_operand32(rax, adr);
}
-// copies data from [rsi] to [rdi] using rcx double words (m64)
-void Assembler::rep_movq() {
- // REP
- emit_byte(0xF3);
- // MOVSQ
- prefix(REX_W);
- emit_byte(0xA5);
+void Assembler::fld_s(Address adr) {
+ InstructionMark im(this);
+ emit_byte(0xD9);
+ emit_operand32(rax, adr);
}
-// sets rcx double words (m64) with rax value at [rdi]
-void Assembler::rep_set() {
- // REP
- emit_byte(0xF3);
- // STOSQ
- prefix(REX_W);
- emit_byte(0xAB);
+
+void Assembler::fld_s(int index) {
+ emit_farith(0xD9, 0xC0, index);
}
-// scans rcx double words (m64) at [rdi] for occurance of rax
-void Assembler::repne_scanq() {
- // REPNE/REPNZ
- emit_byte(0xF2);
- // SCASQ
- prefix(REX_W);
- emit_byte(0xAF);
+void Assembler::fld_x(Address adr) {
+ InstructionMark im(this);
+ emit_byte(0xDB);
+ emit_operand32(rbp, adr);
}
-void Assembler::repne_scanl() {
- // REPNE/REPNZ
- emit_byte(0xF2);
- // SCASL
- emit_byte(0xAF);
+void Assembler::fldcw(Address src) {
+ InstructionMark im(this);
+ emit_byte(0xd9);
+ emit_operand32(rbp, src);
}
+void Assembler::fldenv(Address src) {
+ InstructionMark im(this);
+ emit_byte(0xD9);
+ emit_operand32(rsp, src);
+}
-void Assembler::setb(Condition cc, Register dst) {
- assert(0 <= cc && cc < 16, "illegal cc");
- int encode = prefix_and_encode(dst->encoding(), true);
- emit_byte(0x0F);
- emit_byte(0x90 | cc);
- emit_byte(0xC0 | encode);
+void Assembler::fldlg2() {
+ emit_byte(0xD9);
+ emit_byte(0xEC);
}
-void Assembler::clflush(Address adr) {
- prefix(adr);
- emit_byte(0x0F);
- emit_byte(0xAE);
- emit_operand(rdi, adr);
+void Assembler::fldln2() {
+ emit_byte(0xD9);
+ emit_byte(0xED);
}
-void Assembler::call(Label& L, relocInfo::relocType rtype) {
- if (L.is_bound()) {
- const int long_size = 5;
- int offs = (int)( target(L) - pc() );
- assert(offs <= 0, "assembler error");
- InstructionMark im(this);
- // 1110 1000 #32-bit disp
- emit_byte(0xE8);
- emit_data(offs - long_size, rtype, disp32_operand);
- } else {
- InstructionMark im(this);
- // 1110 1000 #32-bit disp
- L.add_patch_at(code(), locator());
+void Assembler::fldz() {
+ emit_byte(0xD9);
+ emit_byte(0xEE);
+}
- emit_byte(0xE8);
- emit_data(int(0), rtype, disp32_operand);
- }
+void Assembler::flog() {
+ fldln2();
+ fxch();
+ fyl2x();
}
-void Assembler::call_literal(address entry, RelocationHolder const& rspec) {
- assert(entry != NULL, "call most probably wrong");
+void Assembler::flog10() {
+ fldlg2();
+ fxch();
+ fyl2x();
+}
+
+void Assembler::fmul(int i) {
+ emit_farith(0xD8, 0xC8, i);
+}
+
+void Assembler::fmul_d(Address src) {
InstructionMark im(this);
- emit_byte(0xE8);
- intptr_t disp = entry - (_code_pos + sizeof(int32_t));
- assert(is_simm32(disp), "must be 32bit offset (call2)");
- // Technically, should use call32_operand, but this format is
- // implied by the fact that we're emitting a call instruction.
- emit_data((int) disp, rspec, disp32_operand);
+ emit_byte(0xDC);
+ emit_operand32(rcx, src);
}
+void Assembler::fmul_s(Address src) {
+ InstructionMark im(this);
+ emit_byte(0xD8);
+ emit_operand32(rcx, src);
+}
-void Assembler::call(Register dst) {
- // This was originally using a 32bit register encoding
- // and surely we want 64bit!
- // this is a 32bit encoding but in 64bit mode the default
- // operand size is 64bit so there is no need for the
- // wide prefix. So prefix only happens if we use the
- // new registers. Much like push/pop.
- int encode = prefixq_and_encode(dst->encoding());
- emit_byte(0xFF);
- emit_byte(0xD0 | encode);
+void Assembler::fmula(int i) {
+ emit_farith(0xDC, 0xC8, i);
}
-void Assembler::call(Address adr) {
+void Assembler::fmulp(int i) {
+ emit_farith(0xDE, 0xC8, i);
+}
+
+void Assembler::fnsave(Address dst) {
InstructionMark im(this);
- prefix(adr);
- emit_byte(0xFF);
- emit_operand(rdx, adr);
+ emit_byte(0xDD);
+ emit_operand32(rsi, dst);
}
-void Assembler::jmp(Register reg) {
- int encode = prefix_and_encode(reg->encoding());
- emit_byte(0xFF);
- emit_byte(0xE0 | encode);
+void Assembler::fnstcw(Address src) {
+ InstructionMark im(this);
+ emit_byte(0x9B);
+ emit_byte(0xD9);
+ emit_operand32(rdi, src);
}
-void Assembler::jmp(Address adr) {
+void Assembler::fnstsw_ax() {
+ emit_byte(0xdF);
+ emit_byte(0xE0);
+}
+
+void Assembler::fprem() {
+ emit_byte(0xD9);
+ emit_byte(0xF8);
+}
+
+void Assembler::fprem1() {
+ emit_byte(0xD9);
+ emit_byte(0xF5);
+}
+
+void Assembler::frstor(Address src) {
InstructionMark im(this);
- prefix(adr);
- emit_byte(0xFF);
- emit_operand(rsp, adr);
+ emit_byte(0xDD);
+ emit_operand32(rsp, src);
}
-void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
+void Assembler::fsin() {
+ emit_byte(0xD9);
+ emit_byte(0xFE);
+}
+
+void Assembler::fsqrt() {
+ emit_byte(0xD9);
+ emit_byte(0xFA);
+}
+
+void Assembler::fst_d(Address adr) {
InstructionMark im(this);
- emit_byte(0xE9);
- assert(dest != NULL, "must have a target");
- intptr_t disp = dest - (_code_pos + sizeof(int32_t));
- assert(is_simm32(disp), "must be 32bit offset (jmp)");
- emit_data(disp, rspec.reloc(), call32_operand);
+ emit_byte(0xDD);
+ emit_operand32(rdx, adr);
}
-void Assembler::jmp(Label& L, relocInfo::relocType rtype) {
- if (L.is_bound()) {
- address entry = target(L);
- assert(entry != NULL, "jmp most probably wrong");
- InstructionMark im(this);
- const int short_size = 2;
- const int long_size = 5;
- intptr_t offs = entry - _code_pos;
- if (rtype == relocInfo::none && is8bit(offs - short_size)) {
- emit_byte(0xEB);
- emit_byte((offs - short_size) & 0xFF);
- } else {
- emit_byte(0xE9);
- emit_long(offs - long_size);
+void Assembler::fst_s(Address adr) {
+ InstructionMark im(this);
+ emit_byte(0xD9);
+ emit_operand32(rdx, adr);
+}
+
+void Assembler::fstp_d(Address adr) {
+ InstructionMark im(this);
+ emit_byte(0xDD);
+ emit_operand32(rbx, adr);
+}
+
+void Assembler::fstp_d(int index) {
+ emit_farith(0xDD, 0xD8, index);
+}
+
+void Assembler::fstp_s(Address adr) {
+ InstructionMark im(this);
+ emit_byte(0xD9);
+ emit_operand32(rbx, adr);
+}
+
+void Assembler::fstp_x(Address adr) {
+ InstructionMark im(this);
+ emit_byte(0xDB);
+ emit_operand32(rdi, adr);
+}
+
+void Assembler::fsub(int i) {
+ emit_farith(0xD8, 0xE0, i);
+}
+
+void Assembler::fsub_d(Address src) {
+ InstructionMark im(this);
+ emit_byte(0xDC);
+ emit_operand32(rsp, src);
+}
+
+void Assembler::fsub_s(Address src) {
+ InstructionMark im(this);
+ emit_byte(0xD8);
+ emit_operand32(rsp, src);
+}
+
+void Assembler::fsuba(int i) {
+ emit_farith(0xDC, 0xE8, i);
+}
+
+void Assembler::fsubp(int i) {
+ emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong)
+}
+
+void Assembler::fsubr(int i) {
+ emit_farith(0xD8, 0xE8, i);
+}
+
+void Assembler::fsubr_d(Address src) {
+ InstructionMark im(this);
+ emit_byte(0xDC);
+ emit_operand32(rbp, src);
+}
+
+void Assembler::fsubr_s(Address src) {
+ InstructionMark im(this);
+ emit_byte(0xD8);
+ emit_operand32(rbp, src);
+}
+
+void Assembler::fsubra(int i) {
+ emit_farith(0xDC, 0xE0, i);
+}
+
+void Assembler::fsubrp(int i) {
+ emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong)
+}
+
+void Assembler::ftan() {
+ emit_byte(0xD9);
+ emit_byte(0xF2);
+ emit_byte(0xDD);
+ emit_byte(0xD8);
+}
+
+void Assembler::ftst() {
+ emit_byte(0xD9);
+ emit_byte(0xE4);
+}
+
+void Assembler::fucomi(int i) {
+ // make sure the instruction is supported (introduced for P6, together with cmov)
+ guarantee(VM_Version::supports_cmov(), "illegal instruction");
+ emit_farith(0xDB, 0xE8, i);
+}
+
+void Assembler::fucomip(int i) {
+ // make sure the instruction is supported (introduced for P6, together with cmov)
+ guarantee(VM_Version::supports_cmov(), "illegal instruction");
+ emit_farith(0xDF, 0xE8, i);
+}
+
+void Assembler::fwait() {
+ emit_byte(0x9B);
+}
+
+void Assembler::fxch(int i) {
+ emit_farith(0xD9, 0xC8, i);
+}
+
+void Assembler::fyl2x() {
+ emit_byte(0xD9);
+ emit_byte(0xF1);
+}
+
+void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec, int format) {
+ InstructionMark im(this);
+ int encode = prefix_and_encode(dst->encoding());
+ emit_byte(0xB8 | encode);
+ emit_data((int)imm32, rspec, format);
+}
+
+#ifndef _LP64
+
+void Assembler::incl(Register dst) {
+ // Don't use it directly. Use MacroAssembler::incrementl() instead.
+ emit_byte(0x40 | dst->encoding());
+}
+
+void Assembler::lea(Register dst, Address src) {
+ leal(dst, src);
+}
+
+void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) {
+ InstructionMark im(this);
+ emit_byte(0xC7);
+ emit_operand(rax, dst);
+ emit_data((int)imm32, rspec, 0);
+}
+
+
+void Assembler::popa() { // 32bit
+ emit_byte(0x61);
+}
+
+void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) {
+ InstructionMark im(this);
+ emit_byte(0x68);
+ emit_data(imm32, rspec, 0);
+}
+
+void Assembler::pusha() { // 32bit
+ emit_byte(0x60);
+}
+
+void Assembler::set_byte_if_not_zero(Register dst) {
+ emit_byte(0x0F);
+ emit_byte(0x95);
+ emit_byte(0xE0 | dst->encoding());
+}
+
+void Assembler::shldl(Register dst, Register src) {
+ emit_byte(0x0F);
+ emit_byte(0xA5);
+ emit_byte(0xC0 | src->encoding() << 3 | dst->encoding());
+}
+
+void Assembler::shrdl(Register dst, Register src) {
+ emit_byte(0x0F);
+ emit_byte(0xAD);
+ emit_byte(0xC0 | src->encoding() << 3 | dst->encoding());
+}
+
+#else // LP64
+
+// 64bit only pieces of the assembler
+// This should only be used by 64bit instructions that can use rip-relative
+// it cannot be used by instructions that want an immediate value.
+
+bool Assembler::reachable(AddressLiteral adr) {
+ int64_t disp;
+ // None will force a 64bit literal to the code stream. Likely a placeholder
+ // for something that will be patched later and we need to certain it will
+ // always be reachable.
+ if (adr.reloc() == relocInfo::none) {
+ return false;
+ }
+ if (adr.reloc() == relocInfo::internal_word_type) {
+ // This should be rip relative and easily reachable.
+ return true;
+ }
+ if (adr.reloc() == relocInfo::virtual_call_type ||
+ adr.reloc() == relocInfo::opt_virtual_call_type ||
+ adr.reloc() == relocInfo::static_call_type ||
+ adr.reloc() == relocInfo::static_stub_type ) {
+ // This should be rip relative within the code cache and easily
+ // reachable until we get huge code caches. (At which point
+ // ic code is going to have issues).
+ return true;
+ }
+ if (adr.reloc() != relocInfo::external_word_type &&
+ adr.reloc() != relocInfo::poll_return_type && // these are really external_word but need special
+ adr.reloc() != relocInfo::poll_type && // relocs to identify them
+ adr.reloc() != relocInfo::runtime_call_type ) {
+ return false;
+ }
+
+ // Stress the correction code
+ if (ForceUnreachable) {
+ // Must be runtimecall reloc, see if it is in the codecache
+ // Flipping stuff in the codecache to be unreachable causes issues
+ // with things like inline caches where the additional instructions
+ // are not handled.
+ if (CodeCache::find_blob(adr._target) == NULL) {
+ return false;
}
+ }
+ // For external_word_type/runtime_call_type if it is reachable from where we
+ // are now (possibly a temp buffer) and where we might end up
+ // anywhere in the codeCache then we are always reachable.
+ // This would have to change if we ever save/restore shared code
+ // to be more pessimistic.
+
+ disp = (int64_t)adr._target - ((int64_t)CodeCache::low_bound() + sizeof(int));
+ if (!is_simm32(disp)) return false;
+ disp = (int64_t)adr._target - ((int64_t)CodeCache::high_bound() + sizeof(int));
+ if (!is_simm32(disp)) return false;
+
+ disp = (int64_t)adr._target - ((int64_t)_code_pos + sizeof(int));
+
+ // Because rip relative is a disp + address_of_next_instruction and we
+ // don't know the value of address_of_next_instruction we apply a fudge factor
+ // to make sure we will be ok no matter the size of the instruction we get placed into.
+ // We don't have to fudge the checks above here because they are already worst case.
+
+ // 12 == override/rex byte, opcode byte, rm byte, sib byte, a 4-byte disp , 4-byte literal
+ // + 4 because better safe than sorry.
+ const int fudge = 12 + 4;
+ if (disp < 0) {
+ disp -= fudge;
} else {
- // By default, forward jumps are always 32-bit displacements, since
- // we can't yet know where the label will be bound. If you're sure that
- // the forward jump will not run beyond 256 bytes, use jmpb to
- // force an 8-bit displacement.
- InstructionMark im(this);
- relocate(rtype);
- L.add_patch_at(code(), locator());
- emit_byte(0xE9);
- emit_long(0);
+ disp += fudge;
}
+ return is_simm32(disp);
}
-void Assembler::jmpb(Label& L) {
- if (L.is_bound()) {
- const int short_size = 2;
- address entry = target(L);
- assert(is8bit((entry - _code_pos) + short_size),
- "Dispacement too large for a short jmp");
- assert(entry != NULL, "jmp most probably wrong");
- intptr_t offs = entry - _code_pos;
- emit_byte(0xEB);
- emit_byte((offs - short_size) & 0xFF);
+void Assembler::emit_data64(jlong data,
+ relocInfo::relocType rtype,
+ int format) {
+ if (rtype == relocInfo::none) {
+ emit_long64(data);
} else {
- InstructionMark im(this);
- L.add_patch_at(code(), locator());
- emit_byte(0xEB);
- emit_byte(0);
+ emit_data64(data, Relocation::spec_simple(rtype), format);
}
}
-void Assembler::jcc(Condition cc, Label& L, relocInfo::relocType rtype) {
- InstructionMark im(this);
- relocate(rtype);
- assert((0 <= cc) && (cc < 16), "illegal cc");
- if (L.is_bound()) {
- address dst = target(L);
- assert(dst != NULL, "jcc most probably wrong");
+void Assembler::emit_data64(jlong data,
+ RelocationHolder const& rspec,
+ int format) {
+ assert(imm_operand == 0, "default format must be immediate in this file");
+ assert(imm_operand == format, "must be immediate");
+ assert(inst_mark() != NULL, "must be inside InstructionMark");
+ // Do not use AbstractAssembler::relocate, which is not intended for
+ // embedded words. Instead, relocate to the enclosing instruction.
+ code_section()->relocate(inst_mark(), rspec, format);
+#ifdef ASSERT
+ check_relocation(rspec, format);
+#endif
+ emit_long64(data);
+}
- const int short_size = 2;
- const int long_size = 6;
- intptr_t offs = (intptr_t)dst - (intptr_t)_code_pos;
- if (rtype == relocInfo::none && is8bit(offs - short_size)) {
- // 0111 tttn #8-bit disp
- emit_byte(0x70 | cc);
- emit_byte((offs - short_size) & 0xFF);
- } else {
- // 0000 1111 1000 tttn #32-bit disp
- assert(is_simm32(offs - long_size),
- "must be 32bit offset (call4)");
- emit_byte(0x0F);
- emit_byte(0x80 | cc);
- emit_long(offs - long_size);
- }
+int Assembler::prefix_and_encode(int reg_enc, bool byteinst) {
+ if (reg_enc >= 8) {
+ prefix(REX_B);
+ reg_enc -= 8;
+ } else if (byteinst && reg_enc >= 4) {
+ prefix(REX);
+ }
+ return reg_enc;
+}
+
+int Assembler::prefixq_and_encode(int reg_enc) {
+ if (reg_enc < 8) {
+ prefix(REX_W);
} else {
- // Note: could eliminate cond. jumps to this jump if condition
- // is the same however, seems to be rather unlikely case.
- // Note: use jccb() if label to be bound is very close to get
- // an 8-bit displacement
- L.add_patch_at(code(), locator());
- emit_byte(0x0F);
- emit_byte(0x80 | cc);
- emit_long(0);
+ prefix(REX_WB);
+ reg_enc -= 8;
}
+ return reg_enc;
}
-void Assembler::jccb(Condition cc, Label& L) {
- if (L.is_bound()) {
- const int short_size = 2;
- const int long_size = 6;
- address entry = target(L);
- assert(is8bit((intptr_t)entry - ((intptr_t)_code_pos + short_size)),
- "Dispacement too large for a short jmp");
- intptr_t offs = (intptr_t)entry - (intptr_t)_code_pos;
- // 0111 tttn #8-bit disp
- emit_byte(0x70 | cc);
- emit_byte((offs - short_size) & 0xFF);
+int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) {
+ if (dst_enc < 8) {
+ if (src_enc >= 8) {
+ prefix(REX_B);
+ src_enc -= 8;
+ } else if (byteinst && src_enc >= 4) {
+ prefix(REX);
+ }
} else {
- InstructionMark im(this);
- L.add_patch_at(code(), locator());
- emit_byte(0x70 | cc);
- emit_byte(0);
+ if (src_enc < 8) {
+ prefix(REX_R);
+ } else {
+ prefix(REX_RB);
+ src_enc -= 8;
+ }
+ dst_enc -= 8;
}
+ return dst_enc << 3 | src_enc;
}
-// FP instructions
+int Assembler::prefixq_and_encode(int dst_enc, int src_enc) {
+ if (dst_enc < 8) {
+ if (src_enc < 8) {
+ prefix(REX_W);
+ } else {
+ prefix(REX_WB);
+ src_enc -= 8;
+ }
+ } else {
+ if (src_enc < 8) {
+ prefix(REX_WR);
+ } else {
+ prefix(REX_WRB);
+ src_enc -= 8;
+ }
+ dst_enc -= 8;
+ }
+ return dst_enc << 3 | src_enc;
+}
-void Assembler::fxsave(Address dst) {
- prefixq(dst);
- emit_byte(0x0F);
- emit_byte(0xAE);
- emit_operand(as_Register(0), dst);
+void Assembler::prefix(Register reg) {
+ if (reg->encoding() >= 8) {
+ prefix(REX_B);
+ }
}
-void Assembler::fxrstor(Address src) {
- prefixq(src);
- emit_byte(0x0F);
- emit_byte(0xAE);
- emit_operand(as_Register(1), src);
+void Assembler::prefix(Address adr) {
+ if (adr.base_needs_rex()) {
+ if (adr.index_needs_rex()) {
+ prefix(REX_XB);
+ } else {
+ prefix(REX_B);
+ }
+ } else {
+ if (adr.index_needs_rex()) {
+ prefix(REX_X);
+ }
+ }
}
-void Assembler::ldmxcsr(Address src) {
- InstructionMark im(this);
- prefix(src);
- emit_byte(0x0F);
- emit_byte(0xAE);
- emit_operand(as_Register(2), src);
+void Assembler::prefixq(Address adr) {
+ if (adr.base_needs_rex()) {
+ if (adr.index_needs_rex()) {
+ prefix(REX_WXB);
+ } else {
+ prefix(REX_WB);
+ }
+ } else {
+ if (adr.index_needs_rex()) {
+ prefix(REX_WX);
+ } else {
+ prefix(REX_W);
+ }
+ }
}
-void Assembler::stmxcsr(Address dst) {
- InstructionMark im(this);
- prefix(dst);
- emit_byte(0x0F);
- emit_byte(0xAE);
- emit_operand(as_Register(3), dst);
+
+void Assembler::prefix(Address adr, Register reg, bool byteinst) {
+ if (reg->encoding() < 8) {
+ if (adr.base_needs_rex()) {
+ if (adr.index_needs_rex()) {
+ prefix(REX_XB);
+ } else {
+ prefix(REX_B);
+ }
+ } else {
+ if (adr.index_needs_rex()) {
+ prefix(REX_X);
+ } else if (reg->encoding() >= 4 ) {
+ prefix(REX);
+ }
+ }
+ } else {
+ if (adr.base_needs_rex()) {
+ if (adr.index_needs_rex()) {
+ prefix(REX_RXB);
+ } else {
+ prefix(REX_RB);
+ }
+ } else {
+ if (adr.index_needs_rex()) {
+ prefix(REX_RX);
+ } else {
+ prefix(REX_R);
+ }
+ }
+ }
}
-void Assembler::addss(XMMRegister dst, XMMRegister src) {
- emit_byte(0xF3);
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
- emit_byte(0x0F);
- emit_byte(0x58);
- emit_byte(0xC0 | encode);
+void Assembler::prefixq(Address adr, Register src) {
+ if (src->encoding() < 8) {
+ if (adr.base_needs_rex()) {
+ if (adr.index_needs_rex()) {
+ prefix(REX_WXB);
+ } else {
+ prefix(REX_WB);
+ }
+ } else {
+ if (adr.index_needs_rex()) {
+ prefix(REX_WX);
+ } else {
+ prefix(REX_W);
+ }
+ }
+ } else {
+ if (adr.base_needs_rex()) {
+ if (adr.index_needs_rex()) {
+ prefix(REX_WRXB);
+ } else {
+ prefix(REX_WRB);
+ }
+ } else {
+ if (adr.index_needs_rex()) {
+ prefix(REX_WRX);
+ } else {
+ prefix(REX_WR);
+ }
+ }
+ }
}
-void Assembler::addss(XMMRegister dst, Address src) {
- InstructionMark im(this);
- emit_byte(0xF3);
- prefix(src, dst);
- emit_byte(0x0F);
- emit_byte(0x58);
- emit_operand(dst, src);
+void Assembler::prefix(Address adr, XMMRegister reg) {
+ if (reg->encoding() < 8) {
+ if (adr.base_needs_rex()) {
+ if (adr.index_needs_rex()) {
+ prefix(REX_XB);
+ } else {
+ prefix(REX_B);
+ }
+ } else {
+ if (adr.index_needs_rex()) {
+ prefix(REX_X);
+ }
+ }
+ } else {
+ if (adr.base_needs_rex()) {
+ if (adr.index_needs_rex()) {
+ prefix(REX_RXB);
+ } else {
+ prefix(REX_RB);
+ }
+ } else {
+ if (adr.index_needs_rex()) {
+ prefix(REX_RX);
+ } else {
+ prefix(REX_R);
+ }
+ }
+ }
}
-void Assembler::subss(XMMRegister dst, XMMRegister src) {
- emit_byte(0xF3);
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
- emit_byte(0x0F);
- emit_byte(0x5C);
- emit_byte(0xC0 | encode);
+void Assembler::adcq(Register dst, int32_t imm32) {
+ (void) prefixq_and_encode(dst->encoding());
+ emit_arith(0x81, 0xD0, dst, imm32);
}
-void Assembler::subss(XMMRegister dst, Address src) {
+void Assembler::adcq(Register dst, Address src) {
InstructionMark im(this);
- emit_byte(0xF3);
- prefix(src, dst);
- emit_byte(0x0F);
- emit_byte(0x5C);
+ prefixq(src, dst);
+ emit_byte(0x13);
emit_operand(dst, src);
}
-void Assembler::mulss(XMMRegister dst, XMMRegister src) {
- emit_byte(0xF3);
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
- emit_byte(0x0F);
- emit_byte(0x59);
- emit_byte(0xC0 | encode);
+void Assembler::adcq(Register dst, Register src) {
+ (int) prefixq_and_encode(dst->encoding(), src->encoding());
+ emit_arith(0x13, 0xC0, dst, src);
}
-void Assembler::mulss(XMMRegister dst, Address src) {
+void Assembler::addq(Address dst, int32_t imm32) {
InstructionMark im(this);
- emit_byte(0xF3);
- prefix(src, dst);
- emit_byte(0x0F);
- emit_byte(0x59);
- emit_operand(dst, src);
+ prefixq(dst);
+ emit_arith_operand(0x81, rax, dst,imm32);
}
-void Assembler::divss(XMMRegister dst, XMMRegister src) {
- emit_byte(0xF3);
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
- emit_byte(0x0F);
- emit_byte(0x5E);
- emit_byte(0xC0 | encode);
+void Assembler::addq(Address dst, Register src) {
+ InstructionMark im(this);
+ prefixq(dst, src);
+ emit_byte(0x01);
+ emit_operand(src, dst);
}
-void Assembler::divss(XMMRegister dst, Address src) {
+void Assembler::addq(Register dst, int32_t imm32) {
+ (void) prefixq_and_encode(dst->encoding());
+ emit_arith(0x81, 0xC0, dst, imm32);
+}
+
+void Assembler::addq(Register dst, Address src) {
InstructionMark im(this);
- emit_byte(0xF3);
- prefix(src, dst);
- emit_byte(0x0F);
- emit_byte(0x5E);
+ prefixq(src, dst);
+ emit_byte(0x03);
emit_operand(dst, src);
}
-void Assembler::addsd(XMMRegister dst, XMMRegister src) {
- emit_byte(0xF2);
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
- emit_byte(0x0F);
- emit_byte(0x58);
- emit_byte(0xC0 | encode);
+void Assembler::addq(Register dst, Register src) {
+ (void) prefixq_and_encode(dst->encoding(), src->encoding());
+ emit_arith(0x03, 0xC0, dst, src);
}
-void Assembler::addsd(XMMRegister dst, Address src) {
+void Assembler::andq(Register dst, int32_t imm32) {
+ (void) prefixq_and_encode(dst->encoding());
+ emit_arith(0x81, 0xE0, dst, imm32);
+}
+
+void Assembler::andq(Register dst, Address src) {
InstructionMark im(this);
- emit_byte(0xF2);
- prefix(src, dst);
- emit_byte(0x0F);
- emit_byte(0x58);
+ prefixq(src, dst);
+ emit_byte(0x23);
emit_operand(dst, src);
}
-void Assembler::subsd(XMMRegister dst, XMMRegister src) {
- emit_byte(0xF2);
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
- emit_byte(0x0F);
- emit_byte(0x5C);
- emit_byte(0xC0 | encode);
+void Assembler::andq(Register dst, Register src) {
+ (int) prefixq_and_encode(dst->encoding(), src->encoding());
+ emit_arith(0x23, 0xC0, dst, src);
}
-void Assembler::subsd(XMMRegister dst, Address src) {
- InstructionMark im(this);
- emit_byte(0xF2);
- prefix(src, dst);
+void Assembler::bswapq(Register reg) {
+ int encode = prefixq_and_encode(reg->encoding());
emit_byte(0x0F);
- emit_byte(0x5C);
- emit_operand(dst, src);
+ emit_byte(0xC8 | encode);
}
-void Assembler::mulsd(XMMRegister dst, XMMRegister src) {
- emit_byte(0xF2);
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
- emit_byte(0x0F);
- emit_byte(0x59);
- emit_byte(0xC0 | encode);
+void Assembler::cdqq() {
+ prefix(REX_W);
+ emit_byte(0x99);
}
-void Assembler::mulsd(XMMRegister dst, Address src) {
- InstructionMark im(this);
- emit_byte(0xF2);
- prefix(src, dst);
+void Assembler::clflush(Address adr) {
+ prefix(adr);
emit_byte(0x0F);
- emit_byte(0x59);
- emit_operand(dst, src);
+ emit_byte(0xAE);
+ emit_operand(rdi, adr);
}
-void Assembler::divsd(XMMRegister dst, XMMRegister src) {
- emit_byte(0xF2);
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
+void Assembler::cmovq(Condition cc, Register dst, Register src) {
+ int encode = prefixq_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
- emit_byte(0x5E);
+ emit_byte(0x40 | cc);
emit_byte(0xC0 | encode);
}
-void Assembler::divsd(XMMRegister dst, Address src) {
+void Assembler::cmovq(Condition cc, Register dst, Address src) {
InstructionMark im(this);
- emit_byte(0xF2);
- prefix(src, dst);
+ prefixq(src, dst);
emit_byte(0x0F);
- emit_byte(0x5E);
+ emit_byte(0x40 | cc);
emit_operand(dst, src);
}
-void Assembler::sqrtsd(XMMRegister dst, XMMRegister src) {
- emit_byte(0xF2);
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
- emit_byte(0x0F);
- emit_byte(0x51);
- emit_byte(0xC0 | encode);
+void Assembler::cmpq(Address dst, int32_t imm32) {
+ InstructionMark im(this);
+ prefixq(dst);
+ emit_byte(0x81);
+ emit_operand(rdi, dst, 4);
+ emit_long(imm32);
}
-void Assembler::sqrtsd(XMMRegister dst, Address src) {
+void Assembler::cmpq(Register dst, int32_t imm32) {
+ (void) prefixq_and_encode(dst->encoding());
+ emit_arith(0x81, 0xF8, dst, imm32);
+}
+
+void Assembler::cmpq(Address dst, Register src) {
InstructionMark im(this);
- emit_byte(0xF2);
- prefix(src, dst);
- emit_byte(0x0F);
- emit_byte(0x51);
- emit_operand(dst, src);
+ prefixq(dst, src);
+ emit_byte(0x3B);
+ emit_operand(src, dst);
}
-void Assembler::xorps(XMMRegister dst, XMMRegister src) {
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
- emit_byte(0x0F);
- emit_byte(0x57);
- emit_byte(0xC0 | encode);
+void Assembler::cmpq(Register dst, Register src) {
+ (void) prefixq_and_encode(dst->encoding(), src->encoding());
+ emit_arith(0x3B, 0xC0, dst, src);
}
-void Assembler::xorps(XMMRegister dst, Address src) {
+void Assembler::cmpq(Register dst, Address src) {
InstructionMark im(this);
- prefix(src, dst);
- emit_byte(0x0F);
- emit_byte(0x57);
+ prefixq(src, dst);
+ emit_byte(0x3B);
emit_operand(dst, src);
}
-void Assembler::xorpd(XMMRegister dst, XMMRegister src) {
- emit_byte(0x66);
- xorps(dst, src);
-}
-
-void Assembler::xorpd(XMMRegister dst, Address src) {
+void Assembler::cmpxchgq(Register reg, Address adr) {
InstructionMark im(this);
- emit_byte(0x66);
- prefix(src, dst);
+ prefixq(adr, reg);
emit_byte(0x0F);
- emit_byte(0x57);
- emit_operand(dst, src);
+ emit_byte(0xB1);
+ emit_operand(reg, adr);
}
-void Assembler::cvtsi2ssl(XMMRegister dst, Register src) {
- emit_byte(0xF3);
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
+void Assembler::cvtsi2sdq(XMMRegister dst, Register src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ emit_byte(0xF2);
+ int encode = prefixq_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
emit_byte(0x2A);
emit_byte(0xC0 | encode);
}
void Assembler::cvtsi2ssq(XMMRegister dst, Register src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
emit_byte(0xF3);
int encode = prefixq_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
@@ -3327,127 +3687,498 @@ void Assembler::cvtsi2ssq(XMMRegister dst, Register src) {
emit_byte(0xC0 | encode);
}
-void Assembler::cvtsi2sdl(XMMRegister dst, Register src) {
+void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
emit_byte(0xF2);
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
+ int encode = prefixq_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
- emit_byte(0x2A);
+ emit_byte(0x2C);
emit_byte(0xC0 | encode);
}
-void Assembler::cvtsi2sdq(XMMRegister dst, Register src) {
- emit_byte(0xF2);
+void Assembler::cvttss2siq(Register dst, XMMRegister src) {
+ NOT_LP64(assert(VM_Version::supports_sse(), ""));
+ emit_byte(0xF3);
int encode = prefixq_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
- emit_byte(0x2A);
+ emit_byte(0x2C);
emit_byte(0xC0 | encode);
}
-void Assembler::cvttss2sil(Register dst, XMMRegister src) {
- emit_byte(0xF3);
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
+void Assembler::decl(Register dst) {
+ // Don't use it directly. Use MacroAssembler::decrementl() instead.
+ // Use two-byte form (one-byte form is a REX prefix in 64-bit mode)
+ int encode = prefix_and_encode(dst->encoding());
+ emit_byte(0xFF);
+ emit_byte(0xC8 | encode);
+}
+
+void Assembler::decq(Register dst) {
+ // Don't use it directly. Use MacroAssembler::decrementq() instead.
+ // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
+ int encode = prefixq_and_encode(dst->encoding());
+ emit_byte(0xFF);
+ emit_byte(0xC8 | encode);
+}
+
+void Assembler::decq(Address dst) {
+ // Don't use it directly. Use MacroAssembler::decrementq() instead.
+ InstructionMark im(this);
+ prefixq(dst);
+ emit_byte(0xFF);
+ emit_operand(rcx, dst);
+}
+
+void Assembler::fxrstor(Address src) {
+ prefixq(src);
emit_byte(0x0F);
- emit_byte(0x2C);
- emit_byte(0xC0 | encode);
+ emit_byte(0xAE);
+ emit_operand(as_Register(1), src);
}
-void Assembler::cvttss2siq(Register dst, XMMRegister src) {
- emit_byte(0xF3);
+void Assembler::fxsave(Address dst) {
+ prefixq(dst);
+ emit_byte(0x0F);
+ emit_byte(0xAE);
+ emit_operand(as_Register(0), dst);
+}
+
+void Assembler::idivq(Register src) {
+ int encode = prefixq_and_encode(src->encoding());
+ emit_byte(0xF7);
+ emit_byte(0xF8 | encode);
+}
+
+void Assembler::imulq(Register dst, Register src) {
int encode = prefixq_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
- emit_byte(0x2C);
+ emit_byte(0xAF);
emit_byte(0xC0 | encode);
}
-void Assembler::cvttsd2sil(Register dst, XMMRegister src) {
- emit_byte(0xF2);
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
- emit_byte(0x0F);
- emit_byte(0x2C);
+void Assembler::imulq(Register dst, Register src, int value) {
+ int encode = prefixq_and_encode(dst->encoding(), src->encoding());
+ if (is8bit(value)) {
+ emit_byte(0x6B);
+ emit_byte(0xC0 | encode);
+ emit_byte(value);
+ } else {
+ emit_byte(0x69);
+ emit_byte(0xC0 | encode);
+ emit_long(value);
+ }
+}
+
+void Assembler::incl(Register dst) {
+ // Don't use it directly. Use MacroAssembler::incrementl() instead.
+ // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
+ int encode = prefix_and_encode(dst->encoding());
+ emit_byte(0xFF);
emit_byte(0xC0 | encode);
}
-void Assembler::cvttsd2siq(Register dst, XMMRegister src) {
- emit_byte(0xF2);
+void Assembler::incq(Register dst) {
+ // Don't use it directly. Use MacroAssembler::incrementq() instead.
+ // Use two-byte form (one-byte from is a REX prefix in 64-bit mode)
+ int encode = prefixq_and_encode(dst->encoding());
+ emit_byte(0xFF);
+ emit_byte(0xC0 | encode);
+}
+
+void Assembler::incq(Address dst) {
+ // Don't use it directly. Use MacroAssembler::incrementq() instead.
+ InstructionMark im(this);
+ prefixq(dst);
+ emit_byte(0xFF);
+ emit_operand(rax, dst);
+}
+
+void Assembler::lea(Register dst, Address src) {
+ leaq(dst, src);
+}
+
+void Assembler::leaq(Register dst, Address src) {
+ InstructionMark im(this);
+ prefixq(src, dst);
+ emit_byte(0x8D);
+ emit_operand(dst, src);
+}
+
+void Assembler::mov64(Register dst, int64_t imm64) {
+ InstructionMark im(this);
+ int encode = prefixq_and_encode(dst->encoding());
+ emit_byte(0xB8 | encode);
+ emit_long64(imm64);
+}
+
+void Assembler::mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec) {
+ InstructionMark im(this);
+ int encode = prefixq_and_encode(dst->encoding());
+ emit_byte(0xB8 | encode);
+ emit_data64(imm64, rspec);
+}
+
+void Assembler::movdq(XMMRegister dst, Register src) {
+ // table D-1 says MMX/SSE2
+ NOT_LP64(assert(VM_Version::supports_sse2() || VM_Version::supports_mmx(), ""));
+ emit_byte(0x66);
int encode = prefixq_and_encode(dst->encoding(), src->encoding());
emit_byte(0x0F);
- emit_byte(0x2C);
+ emit_byte(0x6E);
emit_byte(0xC0 | encode);
}
-void Assembler::cvtss2sd(XMMRegister dst, XMMRegister src) {
- emit_byte(0xF3);
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
+void Assembler::movdq(Register dst, XMMRegister src) {
+ // table D-1 says MMX/SSE2
+ NOT_LP64(assert(VM_Version::supports_sse2() || VM_Version::supports_mmx(), ""));
+ emit_byte(0x66);
+ // swap src/dst to get correct prefix
+ int encode = prefixq_and_encode(src->encoding(), dst->encoding());
emit_byte(0x0F);
- emit_byte(0x5A);
+ emit_byte(0x7E);
emit_byte(0xC0 | encode);
}
-void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
- emit_byte(0xF3);
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
- emit_byte(0x0F);
- emit_byte(0xE6);
+void Assembler::movq(Register dst, Register src) {
+ int encode = prefixq_and_encode(dst->encoding(), src->encoding());
+ emit_byte(0x8B);
emit_byte(0xC0 | encode);
}
-void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
- emit_byte(0x0F);
- emit_byte(0x5B);
+void Assembler::movq(Register dst, Address src) {
+ InstructionMark im(this);
+ prefixq(src, dst);
+ emit_byte(0x8B);
+ emit_operand(dst, src);
+}
+
+void Assembler::movq(Address dst, Register src) {
+ InstructionMark im(this);
+ prefixq(dst, src);
+ emit_byte(0x89);
+ emit_operand(src, dst);
+}
+
+void Assembler::movslq(Register dst, int32_t imm32) {
+ // dbx shows movslq(rcx, 3) as movq $0x0000000049000000,(%rbx)
+ // and movslq(r8, 3); as movl $0x0000000048000000,(%rbx)
+ // as a result we shouldn't use until tested at runtime...
+ ShouldNotReachHere();
+ InstructionMark im(this);
+ int encode = prefixq_and_encode(dst->encoding());
+ emit_byte(0xC7 | encode);
+ emit_long(imm32);
+}
+
+void Assembler::movslq(Address dst, int32_t imm32) {
+ assert(is_simm32(imm32), "lost bits");
+ InstructionMark im(this);
+ prefixq(dst);
+ emit_byte(0xC7);
+ emit_operand(rax, dst, 4);
+ emit_long(imm32);
+}
+
+void Assembler::movslq(Register dst, Address src) {
+ InstructionMark im(this);
+ prefixq(src, dst);
+ emit_byte(0x63);
+ emit_operand(dst, src);
+}
+
+void Assembler::movslq(Register dst, Register src) {
+ int encode = prefixq_and_encode(dst->encoding(), src->encoding());
+ emit_byte(0x63);
emit_byte(0xC0 | encode);
}
-void Assembler::cvtsd2ss(XMMRegister dst, XMMRegister src) {
+void Assembler::negq(Register dst) {
+ int encode = prefixq_and_encode(dst->encoding());
+ emit_byte(0xF7);
+ emit_byte(0xD8 | encode);
+}
+
+void Assembler::notq(Register dst) {
+ int encode = prefixq_and_encode(dst->encoding());
+ emit_byte(0xF7);
+ emit_byte(0xD0 | encode);
+}
+
+void Assembler::orq(Address dst, int32_t imm32) {
+ InstructionMark im(this);
+ prefixq(dst);
+ emit_byte(0x81);
+ emit_operand(rcx, dst, 4);
+ emit_long(imm32);
+}
+
+void Assembler::orq(Register dst, int32_t imm32) {
+ (void) prefixq_and_encode(dst->encoding());
+ emit_arith(0x81, 0xC8, dst, imm32);
+}
+
+void Assembler::orq(Register dst, Address src) {
+ InstructionMark im(this);
+ prefixq(src, dst);
+ emit_byte(0x0B);
+ emit_operand(dst, src);
+}
+
+void Assembler::orq(Register dst, Register src) {
+ (void) prefixq_and_encode(dst->encoding(), src->encoding());
+ emit_arith(0x0B, 0xC0, dst, src);
+}
+
+void Assembler::popa() { // 64bit
+ movq(r15, Address(rsp, 0));
+ movq(r14, Address(rsp, wordSize));
+ movq(r13, Address(rsp, 2 * wordSize));
+ movq(r12, Address(rsp, 3 * wordSize));
+ movq(r11, Address(rsp, 4 * wordSize));
+ movq(r10, Address(rsp, 5 * wordSize));
+ movq(r9, Address(rsp, 6 * wordSize));
+ movq(r8, Address(rsp, 7 * wordSize));
+ movq(rdi, Address(rsp, 8 * wordSize));
+ movq(rsi, Address(rsp, 9 * wordSize));
+ movq(rbp, Address(rsp, 10 * wordSize));
+ // skip rsp
+ movq(rbx, Address(rsp, 12 * wordSize));
+ movq(rdx, Address(rsp, 13 * wordSize));
+ movq(rcx, Address(rsp, 14 * wordSize));
+ movq(rax, Address(rsp, 15 * wordSize));
+
+ addq(rsp, 16 * wordSize);
+}
+
+void Assembler::popq(Address dst) {
+ InstructionMark im(this);
+ prefixq(dst);
+ emit_byte(0x8F);
+ emit_operand(rax, dst);
+}
+
+void Assembler::pusha() { // 64bit
+ // we have to store original rsp. ABI says that 128 bytes
+ // below rsp are local scratch.
+ movq(Address(rsp, -5 * wordSize), rsp);
+
+ subq(rsp, 16 * wordSize);
+
+ movq(Address(rsp, 15 * wordSize), rax);
+ movq(Address(rsp, 14 * wordSize), rcx);
+ movq(Address(rsp, 13 * wordSize), rdx);
+ movq(Address(rsp, 12 * wordSize), rbx);
+ // skip rsp
+ movq(Address(rsp, 10 * wordSize), rbp);
+ movq(Address(rsp, 9 * wordSize), rsi);
+ movq(Address(rsp, 8 * wordSize), rdi);
+ movq(Address(rsp, 7 * wordSize), r8);
+ movq(Address(rsp, 6 * wordSize), r9);
+ movq(Address(rsp, 5 * wordSize), r10);
+ movq(Address(rsp, 4 * wordSize), r11);
+ movq(Address(rsp, 3 * wordSize), r12);
+ movq(Address(rsp, 2 * wordSize), r13);
+ movq(Address(rsp, wordSize), r14);
+ movq(Address(rsp, 0), r15);
+}
+
+void Assembler::pushq(Address src) {
+ InstructionMark im(this);
+ prefixq(src);
+ emit_byte(0xFF);
+ emit_operand(rsi, src);
+}
+
+void Assembler::rclq(Register dst, int imm8) {
+ assert(isShiftCount(imm8 >> 1), "illegal shift count");
+ int encode = prefixq_and_encode(dst->encoding());
+ if (imm8 == 1) {
+ emit_byte(0xD1);
+ emit_byte(0xD0 | encode);
+ } else {
+ emit_byte(0xC1);
+ emit_byte(0xD0 | encode);
+ emit_byte(imm8);
+ }
+}
+void Assembler::sarq(Register dst, int imm8) {
+ assert(isShiftCount(imm8 >> 1), "illegal shift count");
+ int encode = prefixq_and_encode(dst->encoding());
+ if (imm8 == 1) {
+ emit_byte(0xD1);
+ emit_byte(0xF8 | encode);
+ } else {
+ emit_byte(0xC1);
+ emit_byte(0xF8 | encode);
+ emit_byte(imm8);
+ }
+}
+
+void Assembler::sarq(Register dst) {
+ int encode = prefixq_and_encode(dst->encoding());
+ emit_byte(0xD3);
+ emit_byte(0xF8 | encode);
+}
+void Assembler::sbbq(Address dst, int32_t imm32) {
+ InstructionMark im(this);
+ prefixq(dst);
+ emit_arith_operand(0x81, rbx, dst, imm32);
+}
+
+void Assembler::sbbq(Register dst, int32_t imm32) {
+ (void) prefixq_and_encode(dst->encoding());
+ emit_arith(0x81, 0xD8, dst, imm32);
+}
+
+void Assembler::sbbq(Register dst, Address src) {
+ InstructionMark im(this);
+ prefixq(src, dst);
+ emit_byte(0x1B);
+ emit_operand(dst, src);
+}
+
+void Assembler::sbbq(Register dst, Register src) {
+ (void) prefixq_and_encode(dst->encoding(), src->encoding());
+ emit_arith(0x1B, 0xC0, dst, src);
+}
+
+void Assembler::shlq(Register dst, int imm8) {
+ assert(isShiftCount(imm8 >> 1), "illegal shift count");
+ int encode = prefixq_and_encode(dst->encoding());
+ if (imm8 == 1) {
+ emit_byte(0xD1);
+ emit_byte(0xE0 | encode);
+ } else {
+ emit_byte(0xC1);
+ emit_byte(0xE0 | encode);
+ emit_byte(imm8);
+ }
+}
+
+void Assembler::shlq(Register dst) {
+ int encode = prefixq_and_encode(dst->encoding());
+ emit_byte(0xD3);
+ emit_byte(0xE0 | encode);
+}
+
+void Assembler::shrq(Register dst, int imm8) {
+ assert(isShiftCount(imm8 >> 1), "illegal shift count");
+ int encode = prefixq_and_encode(dst->encoding());
+ emit_byte(0xC1);
+ emit_byte(0xE8 | encode);
+ emit_byte(imm8);
+}
+
+void Assembler::shrq(Register dst) {
+ int encode = prefixq_and_encode(dst->encoding());
+ emit_byte(0xD3);
+ emit_byte(0xE8 | encode);
+}
+
+void Assembler::sqrtsd(XMMRegister dst, Address src) {
+ NOT_LP64(assert(VM_Version::supports_sse2(), ""));
+ InstructionMark im(this);
emit_byte(0xF2);
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
+ prefix(src, dst);
emit_byte(0x0F);
- emit_byte(0x5A);
- emit_byte(0xC0 | encode);
+ emit_byte(0x51);
+ emit_operand(dst, src);
}
-void Assembler::punpcklbw(XMMRegister dst, XMMRegister src) {
- emit_byte(0x66);
- int encode = prefix_and_encode(dst->encoding(), src->encoding());
- emit_byte(0x0F);
- emit_byte(0x60);
- emit_byte(0xC0 | encode);
+void Assembler::subq(Address dst, int32_t imm32) {
+ InstructionMark im(this);
+ prefixq(dst);
+ if (is8bit(imm32)) {
+ emit_byte(0x83);
+ emit_operand(rbp, dst, 1);
+ emit_byte(imm32 & 0xFF);
+ } else {
+ emit_byte(0x81);
+ emit_operand(rbp, dst, 4);
+ emit_long(imm32);
+ }
}
-// Implementation of MacroAssembler
+void Assembler::subq(Register dst, int32_t imm32) {
+ (void) prefixq_and_encode(dst->encoding());
+ emit_arith(0x81, 0xE8, dst, imm32);
+}
-// On 32 bit it returns a vanilla displacement on 64 bit is a rip relative displacement
-Address MacroAssembler::as_Address(AddressLiteral adr) {
- assert(!adr.is_lval(), "must be rval");
- assert(reachable(adr), "must be");
- return Address((int)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc());
+void Assembler::subq(Address dst, Register src) {
+ InstructionMark im(this);
+ prefixq(dst, src);
+ emit_byte(0x29);
+ emit_operand(src, dst);
}
-Address MacroAssembler::as_Address(ArrayAddress adr) {
-#ifdef _LP64
- AddressLiteral base = adr.base();
- lea(rscratch1, base);
- Address index = adr.index();
- assert(index._disp == 0, "must not have disp"); // maybe it can?
- Address array(rscratch1, index._index, index._scale, index._disp);
- return array;
-#else
- return Address::make_array(adr);
-#endif // _LP64
+void Assembler::subq(Register dst, Address src) {
+ InstructionMark im(this);
+ prefixq(src, dst);
+ emit_byte(0x2B);
+ emit_operand(dst, src);
+}
+void Assembler::subq(Register dst, Register src) {
+ (void) prefixq_and_encode(dst->encoding(), src->encoding());
+ emit_arith(0x2B, 0xC0, dst, src);
}
-void MacroAssembler::fat_nop() {
- // A 5 byte nop that is safe for patching (see patch_verified_entry)
- // Recommened sequence from 'Software Optimization Guide for the AMD
- // Hammer Processor'
- emit_byte(0x66);
- emit_byte(0x66);
- emit_byte(0x90);
- emit_byte(0x66);
- emit_byte(0x90);
+void Assembler::testq(Register dst, int32_t imm32) {
+ // not using emit_arith because test
+ // doesn't support sign-extension of
+ // 8bit operands
+ int encode = dst->encoding();
+ if (encode == 0) {
+ prefix(REX_W);
+ emit_byte(0xA9);
+ } else {
+ encode = prefixq_and_encode(encode);
+ emit_byte(0xF7);
+ emit_byte(0xC0 | encode);
+ }
+ emit_long(imm32);
+}
+
+void Assembler::testq(Register dst, Register src) {
+ (void) prefixq_and_encode(dst->encoding(), src->encoding());
+ emit_arith(0x85, 0xC0, dst, src);
+}
+
+void Assembler::xaddq(Address dst, Register src) {
+ InstructionMark im(this);
+ prefixq(dst, src);
+ emit_byte(0x0F);
+ emit_byte(0xC1);
+ emit_operand(src, dst);
+}
+
+void Assembler::xchgq(Register dst, Address src) {
+ InstructionMark im(this);
+ prefixq(src, dst);
+ emit_byte(0x87);
+ emit_operand(dst, src);
}
+void Assembler::xchgq(Register dst, Register src) {
+ int encode = prefixq_and_encode(dst->encoding(), src->encoding());
+ emit_byte(0x87);
+ emit_byte(0xc0 | encode);
+}
+
+void Assembler::xorq(Register dst, Register src) {
+ (void) prefixq_and_encode(dst->encoding(), src->encoding());
+ emit_arith(0x33, 0xC0, dst, src);
+}
+
+void Assembler::xorq(Register dst, Address src) {
+ InstructionMark im(this);
+ prefixq(src, dst);
+ emit_byte(0x33);
+ emit_operand(dst, src);
+}
+
+#endif // !LP64
+
static Assembler::Condition reverse[] = {
Assembler::noOverflow /* overflow = 0x0 */ ,
Assembler::overflow /* noOverflow = 0x1 */ ,
@@ -3468,404 +4199,845 @@ static Assembler::Condition reverse[] = {
};
-// 32bit can do a case table jump in one instruction but we no longer allow the base
-// to be installed in the Address class
-void MacroAssembler::jump(ArrayAddress entry) {
-#ifdef _LP64
- lea(rscratch1, entry.base());
- Address dispatch = entry.index();
- assert(dispatch._base == noreg, "must be");
- dispatch._base = rscratch1;
- jmp(dispatch);
-#else
- jmp(as_Address(entry));
-#endif // _LP64
-}
-void MacroAssembler::jump(AddressLiteral dst) {
- if (reachable(dst)) {
- jmp_literal(dst.target(), dst.rspec());
- } else {
- lea(rscratch1, dst);
- jmp(rscratch1);
- }
-}
+// Implementation of MacroAssembler
-void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
- if (reachable(dst)) {
- InstructionMark im(this);
- relocate(dst.reloc());
- const int short_size = 2;
- const int long_size = 6;
- int offs = (intptr_t)dst.target() - ((intptr_t)_code_pos);
- if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
- // 0111 tttn #8-bit disp
- emit_byte(0x70 | cc);
- emit_byte((offs - short_size) & 0xFF);
- } else {
- // 0000 1111 1000 tttn #32-bit disp
- emit_byte(0x0F);
- emit_byte(0x80 | cc);
- emit_long(offs - long_size);
- }
- } else {
-#ifdef ASSERT
- warning("reversing conditional branch");
-#endif /* ASSERT */
- Label skip;
- jccb(reverse[cc], skip);
- lea(rscratch1, dst);
- Assembler::jmp(rscratch1);
- bind(skip);
- }
-}
+// First all the versions that have distinct versions depending on 32/64 bit
+// Unless the difference is trivial (1 line or so).
-// Wouldn't need if AddressLiteral version had new name
-void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
- Assembler::call(L, rtype);
+#ifndef _LP64
+
+// 32bit versions
+
+Address MacroAssembler::as_Address(AddressLiteral adr) {
+ return Address(adr.target(), adr.rspec());
}
-// Wouldn't need if AddressLiteral version had new name
-void MacroAssembler::call(Register entry) {
- Assembler::call(entry);
+Address MacroAssembler::as_Address(ArrayAddress adr) {
+ return Address::make_array(adr);
}
-void MacroAssembler::call(AddressLiteral entry) {
- if (reachable(entry)) {
- Assembler::call_literal(entry.target(), entry.rspec());
+int MacroAssembler::biased_locking_enter(Register lock_reg,
+ Register obj_reg,
+ Register swap_reg,
+ Register tmp_reg,
+ bool swap_reg_contains_mark,
+ Label& done,
+ Label* slow_case,
+ BiasedLockingCounters* counters) {
+ assert(UseBiasedLocking, "why call this otherwise?");
+ assert(swap_reg == rax, "swap_reg must be rax, for cmpxchg");
+ assert_different_registers(lock_reg, obj_reg, swap_reg);
+
+ if (PrintBiasedLockingStatistics && counters == NULL)
+ counters = BiasedLocking::counters();
+
+ bool need_tmp_reg = false;
+ if (tmp_reg == noreg) {
+ need_tmp_reg = true;
+ tmp_reg = lock_reg;
} else {
- lea(rscratch1, entry);
- Assembler::call(rscratch1);
+ assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
}
-}
+ assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
+ Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
+ Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes());
+ Address saved_mark_addr(lock_reg, 0);
-void MacroAssembler::cmp8(AddressLiteral src1, int8_t src2) {
- if (reachable(src1)) {
- cmpb(as_Address(src1), src2);
- } else {
- lea(rscratch1, src1);
- cmpb(Address(rscratch1, 0), src2);
+ // Biased locking
+ // See whether the lock is currently biased toward our thread and
+ // whether the epoch is still valid
+ // Note that the runtime guarantees sufficient alignment of JavaThread
+ // pointers to allow age to be placed into low bits
+ // First check to see whether biasing is even enabled for this object
+ Label cas_label;
+ int null_check_offset = -1;
+ if (!swap_reg_contains_mark) {
+ null_check_offset = offset();
+ movl(swap_reg, mark_addr);
}
-}
+ if (need_tmp_reg) {
+ push(tmp_reg);
+ }
+ movl(tmp_reg, swap_reg);
+ andl(tmp_reg, markOopDesc::biased_lock_mask_in_place);
+ cmpl(tmp_reg, markOopDesc::biased_lock_pattern);
+ if (need_tmp_reg) {
+ pop(tmp_reg);
+ }
+ jcc(Assembler::notEqual, cas_label);
+ // The bias pattern is present in the object's header. Need to check
+ // whether the bias owner and the epoch are both still current.
+ // Note that because there is no current thread register on x86 we
+ // need to store off the mark word we read out of the object to
+ // avoid reloading it and needing to recheck invariants below. This
+ // store is unfortunate but it makes the overall code shorter and
+ // simpler.
+ movl(saved_mark_addr, swap_reg);
+ if (need_tmp_reg) {
+ push(tmp_reg);
+ }
+ get_thread(tmp_reg);
+ xorl(swap_reg, tmp_reg);
+ if (swap_reg_contains_mark) {
+ null_check_offset = offset();
+ }
+ movl(tmp_reg, klass_addr);
+ xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
+ andl(swap_reg, ~((int) markOopDesc::age_mask_in_place));
+ if (need_tmp_reg) {
+ pop(tmp_reg);
+ }
+ if (counters != NULL) {
+ cond_inc32(Assembler::zero,
+ ExternalAddress((address)counters->biased_lock_entry_count_addr()));
+ }
+ jcc(Assembler::equal, done);
-void MacroAssembler::cmp32(AddressLiteral src1, int32_t src2) {
- if (reachable(src1)) {
- cmpl(as_Address(src1), src2);
- } else {
- lea(rscratch1, src1);
- cmpl(Address(rscratch1, 0), src2);
+ Label try_revoke_bias;
+ Label try_rebias;
+
+ // At this point we know that the header has the bias pattern and
+ // that we are not the bias owner in the current epoch. We need to
+ // figure out more details about the state of the header in order to
+ // know what operations can be legally performed on the object's
+ // header.
+
+ // If the low three bits in the xor result aren't clear, that means
+ // the prototype header is no longer biased and we have to revoke
+ // the bias on this object.
+ testl(swap_reg, markOopDesc::biased_lock_mask_in_place);
+ jcc(Assembler::notZero, try_revoke_bias);
+
+ // Biasing is still enabled for this data type. See whether the
+ // epoch of the current bias is still valid, meaning that the epoch
+ // bits of the mark word are equal to the epoch bits of the
+ // prototype header. (Note that the prototype header's epoch bits
+ // only change at a safepoint.) If not, attempt to rebias the object
+ // toward the current thread. Note that we must be absolutely sure
+ // that the current epoch is invalid in order to do this because
+ // otherwise the manipulations it performs on the mark word are
+ // illegal.
+ testl(swap_reg, markOopDesc::epoch_mask_in_place);
+ jcc(Assembler::notZero, try_rebias);
+
+ // The epoch of the current bias is still valid but we know nothing
+ // about the owner; it might be set or it might be clear. Try to
+ // acquire the bias of the object using an atomic operation. If this
+ // fails we will go in to the runtime to revoke the object's bias.
+ // Note that we first construct the presumed unbiased header so we
+ // don't accidentally blow away another thread's valid bias.
+ movl(swap_reg, saved_mark_addr);
+ andl(swap_reg,
+ markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
+ if (need_tmp_reg) {
+ push(tmp_reg);
}
-}
+ get_thread(tmp_reg);
+ orl(tmp_reg, swap_reg);
+ if (os::is_MP()) {
+ lock();
+ }
+ cmpxchgptr(tmp_reg, Address(obj_reg, 0));
+ if (need_tmp_reg) {
+ pop(tmp_reg);
+ }
+ // If the biasing toward our thread failed, this means that
+ // another thread succeeded in biasing it toward itself and we
+ // need to revoke that bias. The revocation will occur in the
+ // interpreter runtime in the slow case.
+ if (counters != NULL) {
+ cond_inc32(Assembler::zero,
+ ExternalAddress((address)counters->anonymously_biased_lock_entry_count_addr()));
+ }
+ if (slow_case != NULL) {
+ jcc(Assembler::notZero, *slow_case);
+ }
+ jmp(done);
-void MacroAssembler::cmp32(Register src1, AddressLiteral src2) {
- if (reachable(src2)) {
- cmpl(src1, as_Address(src2));
- } else {
- lea(rscratch1, src2);
- cmpl(src1, Address(rscratch1, 0));
+ bind(try_rebias);
+ // At this point we know the epoch has expired, meaning that the
+ // current "bias owner", if any, is actually invalid. Under these
+ // circumstances _only_, we are allowed to use the current header's
+ // value as the comparison value when doing the cas to acquire the
+ // bias in the current epoch. In other words, we allow transfer of
+ // the bias from one thread to another directly in this situation.
+ //
+ // FIXME: due to a lack of registers we currently blow away the age
+ // bits in this situation. Should attempt to preserve them.
+ if (need_tmp_reg) {
+ push(tmp_reg);
}
-}
+ get_thread(tmp_reg);
+ movl(swap_reg, klass_addr);
+ orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
+ movl(swap_reg, saved_mark_addr);
+ if (os::is_MP()) {
+ lock();
+ }
+ cmpxchgptr(tmp_reg, Address(obj_reg, 0));
+ if (need_tmp_reg) {
+ pop(tmp_reg);
+ }
+ // If the biasing toward our thread failed, then another thread
+ // succeeded in biasing it toward itself and we need to revoke that
+ // bias. The revocation will occur in the runtime in the slow case.
+ if (counters != NULL) {
+ cond_inc32(Assembler::zero,
+ ExternalAddress((address)counters->rebiased_lock_entry_count_addr()));
+ }
+ if (slow_case != NULL) {
+ jcc(Assembler::notZero, *slow_case);
+ }
+ jmp(done);
-void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) {
-#ifdef _LP64
- if (src2.is_lval()) {
- movptr(rscratch1, src2);
- Assembler::cmpq(src1, rscratch1);
- } else if (reachable(src2)) {
- cmpq(src1, as_Address(src2));
- } else {
- lea(rscratch1, src2);
- Assembler::cmpq(src1, Address(rscratch1, 0));
+ bind(try_revoke_bias);
+ // The prototype mark in the klass doesn't have the bias bit set any
+ // more, indicating that objects of this data type are not supposed
+ // to be biased any more. We are going to try to reset the mark of
+ // this object to the prototype value and fall through to the
+ // CAS-based locking scheme. Note that if our CAS fails, it means
+ // that another thread raced us for the privilege of revoking the
+ // bias of this particular object, so it's okay to continue in the
+ // normal locking code.
+ //
+ // FIXME: due to a lack of registers we currently blow away the age
+ // bits in this situation. Should attempt to preserve them.
+ movl(swap_reg, saved_mark_addr);
+ if (need_tmp_reg) {
+ push(tmp_reg);
}
-#else
- if (src2.is_lval()) {
- cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
- } else {
- cmpl(src1, as_Address(src2));
+ movl(tmp_reg, klass_addr);
+ movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
+ if (os::is_MP()) {
+ lock();
}
-#endif // _LP64
+ cmpxchgptr(tmp_reg, Address(obj_reg, 0));
+ if (need_tmp_reg) {
+ pop(tmp_reg);
+ }
+ // Fall through to the normal CAS-based lock, because no matter what
+ // the result of the above CAS, some thread must have succeeded in
+ // removing the bias bit from the object's header.
+ if (counters != NULL) {
+ cond_inc32(Assembler::zero,
+ ExternalAddress((address)counters->revoked_lock_entry_count_addr()));
+ }
+
+ bind(cas_label);
+
+ return null_check_offset;
+}
+void MacroAssembler::call_VM_leaf_base(address entry_point,
+ int number_of_arguments) {
+ call(RuntimeAddress(entry_point));
+ increment(rsp, number_of_arguments * wordSize);
}
-void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) {
- assert(src2.is_lval(), "not a mem-mem compare");
-#ifdef _LP64
- // moves src2's literal address
- movptr(rscratch1, src2);
- Assembler::cmpq(src1, rscratch1);
-#else
- cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
-#endif // _LP64
+void MacroAssembler::cmpoop(Address src1, jobject obj) {
+ cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
}
-void MacroAssembler::cmp64(Register src1, AddressLiteral src2) {
- assert(!src2.is_lval(), "should use cmpptr");
+void MacroAssembler::cmpoop(Register src1, jobject obj) {
+ cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
+}
- if (reachable(src2)) {
-#ifdef _LP64
- cmpq(src1, as_Address(src2));
-#else
- ShouldNotReachHere();
-#endif // _LP64
+void MacroAssembler::extend_sign(Register hi, Register lo) {
+ // According to Intel Doc. AP-526, "Integer Divide", p.18.
+ if (VM_Version::is_P6() && hi == rdx && lo == rax) {
+ cdql();
} else {
- lea(rscratch1, src2);
- Assembler::cmpq(src1, Address(rscratch1, 0));
+ movl(hi, lo);
+ sarl(hi, 31);
}
}
-void MacroAssembler::cmpxchgptr(Register reg, AddressLiteral adr) {
- if (reachable(adr)) {
-#ifdef _LP64
- cmpxchgq(reg, as_Address(adr));
-#else
- cmpxchgl(reg, as_Address(adr));
-#endif // _LP64
- } else {
- lea(rscratch1, adr);
- cmpxchgq(reg, Address(rscratch1, 0));
- }
+void MacroAssembler::fat_nop() {
+ // A 5 byte nop that is safe for patching (see patch_verified_entry)
+ emit_byte(0x26); // es:
+ emit_byte(0x2e); // cs:
+ emit_byte(0x64); // fs:
+ emit_byte(0x65); // gs:
+ emit_byte(0x90);
}
-void MacroAssembler::incrementl(AddressLiteral dst) {
- if (reachable(dst)) {
- incrementl(as_Address(dst));
- } else {
- lea(rscratch1, dst);
- incrementl(Address(rscratch1, 0));
- }
+void MacroAssembler::jC2(Register tmp, Label& L) {
+ // set parity bit if FPU flag C2 is set (via rax)
+ save_rax(tmp);
+ fwait(); fnstsw_ax();
+ sahf();
+ restore_rax(tmp);
+ // branch
+ jcc(Assembler::parity, L);
}
-void MacroAssembler::incrementl(ArrayAddress dst) {
- incrementl(as_Address(dst));
+void MacroAssembler::jnC2(Register tmp, Label& L) {
+ // set parity bit if FPU flag C2 is set (via rax)
+ save_rax(tmp);
+ fwait(); fnstsw_ax();
+ sahf();
+ restore_rax(tmp);
+ // branch
+ jcc(Assembler::noParity, L);
}
-void MacroAssembler::lea(Register dst, Address src) {
-#ifdef _LP64
- leaq(dst, src);
-#else
- leal(dst, src);
-#endif // _LP64
+// 32bit can do a case table jump in one instruction but we no longer allow the base
+// to be installed in the Address class
+void MacroAssembler::jump(ArrayAddress entry) {
+ jmp(as_Address(entry));
}
-void MacroAssembler::lea(Register dst, AddressLiteral src) {
-#ifdef _LP64
- mov_literal64(dst, (intptr_t)src.target(), src.rspec());
-#else
- mov_literal32(dst, (intptr_t)src.target(), src.rspec());
-#endif // _LP64
+// Note: y_lo will be destroyed
+void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
+ // Long compare for Java (semantics as described in JVM spec.)
+ Label high, low, done;
+
+ cmpl(x_hi, y_hi);
+ jcc(Assembler::less, low);
+ jcc(Assembler::greater, high);
+ // x_hi is the return register
+ xorl(x_hi, x_hi);
+ cmpl(x_lo, y_lo);
+ jcc(Assembler::below, low);
+ jcc(Assembler::equal, done);
+
+ bind(high);
+ xorl(x_hi, x_hi);
+ increment(x_hi);
+ jmp(done);
+
+ bind(low);
+ xorl(x_hi, x_hi);
+ decrementl(x_hi);
+
+ bind(done);
}
-void MacroAssembler::mov32(AddressLiteral dst, Register src) {
- if (reachable(dst)) {
- movl(as_Address(dst), src);
- } else {
- lea(rscratch1, dst);
- movl(Address(rscratch1, 0), src);
- }
+void MacroAssembler::lea(Register dst, AddressLiteral src) {
+ mov_literal32(dst, (int32_t)src.target(), src.rspec());
}
-void MacroAssembler::mov32(Register dst, AddressLiteral src) {
- if (reachable(src)) {
- movl(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- movl(dst, Address(rscratch1, 0));
- }
+void MacroAssembler::lea(Address dst, AddressLiteral adr) {
+ // leal(dst, as_Address(adr));
+ // see note in movl as to why we must use a move
+ mov_literal32(dst, (int32_t) adr.target(), adr.rspec());
}
-void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- if (UseXmmLoadAndClearUpper) {
- movsd (dst, as_Address(src));
- } else {
- movlpd(dst, as_Address(src));
- }
- } else {
- lea(rscratch1, src);
- if (UseXmmLoadAndClearUpper) {
- movsd (dst, Address(rscratch1, 0));
- } else {
- movlpd(dst, Address(rscratch1, 0));
- }
- }
+void MacroAssembler::leave() {
+ mov(rsp, rbp);
+ pop(rbp);
}
-void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- movss(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- movss(dst, Address(rscratch1, 0));
- }
+void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) {
+ // Multiplication of two Java long values stored on the stack
+ // as illustrated below. Result is in rdx:rax.
+ //
+ // rsp ---> [ ?? ] \ \
+ // .... | y_rsp_offset |
+ // [ y_lo ] / (in bytes) | x_rsp_offset
+ // [ y_hi ] | (in bytes)
+ // .... |
+ // [ x_lo ] /
+ // [ x_hi ]
+ // ....
+ //
+ // Basic idea: lo(result) = lo(x_lo * y_lo)
+ // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
+ Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset);
+ Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset);
+ Label quick;
+ // load x_hi, y_hi and check if quick
+ // multiplication is possible
+ movl(rbx, x_hi);
+ movl(rcx, y_hi);
+ movl(rax, rbx);
+ orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0
+ jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply
+ // do full multiplication
+ // 1st step
+ mull(y_lo); // x_hi * y_lo
+ movl(rbx, rax); // save lo(x_hi * y_lo) in rbx,
+ // 2nd step
+ movl(rax, x_lo);
+ mull(rcx); // x_lo * y_hi
+ addl(rbx, rax); // add lo(x_lo * y_hi) to rbx,
+ // 3rd step
+ bind(quick); // note: rbx, = 0 if quick multiply!
+ movl(rax, x_lo);
+ mull(y_lo); // x_lo * y_lo
+ addl(rdx, rbx); // correct hi(x_lo * y_lo)
+}
+
+void MacroAssembler::lneg(Register hi, Register lo) {
+ negl(lo);
+ adcl(hi, 0);
+ negl(hi);
+}
+
+void MacroAssembler::lshl(Register hi, Register lo) {
+ // Java shift left long support (semantics as described in JVM spec., p.305)
+ // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n))
+ // shift value is in rcx !
+ assert(hi != rcx, "must not use rcx");
+ assert(lo != rcx, "must not use rcx");
+ const Register s = rcx; // shift count
+ const int n = BitsPerWord;
+ Label L;
+ andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
+ cmpl(s, n); // if (s < n)
+ jcc(Assembler::less, L); // else (s >= n)
+ movl(hi, lo); // x := x << n
+ xorl(lo, lo);
+ // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
+ bind(L); // s (mod n) < n
+ shldl(hi, lo); // x := x << s
+ shll(lo);
+}
+
+
+void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) {
+ // Java shift right long support (semantics as described in JVM spec., p.306 & p.310)
+ // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n))
+ assert(hi != rcx, "must not use rcx");
+ assert(lo != rcx, "must not use rcx");
+ const Register s = rcx; // shift count
+ const int n = BitsPerWord;
+ Label L;
+ andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
+ cmpl(s, n); // if (s < n)
+ jcc(Assembler::less, L); // else (s >= n)
+ movl(lo, hi); // x := x >> n
+ if (sign_extension) sarl(hi, 31);
+ else xorl(hi, hi);
+ // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
+ bind(L); // s (mod n) < n
+ shrdl(lo, hi); // x := x >> s
+ if (sign_extension) sarl(hi);
+ else shrl(hi);
}
void MacroAssembler::movoop(Register dst, jobject obj) {
- mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
+ mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
}
void MacroAssembler::movoop(Address dst, jobject obj) {
- mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate());
- movq(dst, rscratch1);
+ mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
}
void MacroAssembler::movptr(Register dst, AddressLiteral src) {
-#ifdef _LP64
- if (src.is_lval()) {
- mov_literal64(dst, (intptr_t)src.target(), src.rspec());
- } else {
- if (reachable(src)) {
- movq(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- movq(dst, Address(rscratch1,0));
- }
- }
-#else
if (src.is_lval()) {
mov_literal32(dst, (intptr_t)src.target(), src.rspec());
} else {
movl(dst, as_Address(src));
}
-#endif // LP64
}
void MacroAssembler::movptr(ArrayAddress dst, Register src) {
-#ifdef _LP64
- movq(as_Address(dst), src);
-#else
movl(as_Address(dst), src);
-#endif // _LP64
}
+void MacroAssembler::movptr(Register dst, ArrayAddress src) {
+ movl(dst, as_Address(src));
+}
+
+// src should NEVER be a real pointer. Use AddressLiteral for true pointers
+void MacroAssembler::movptr(Address dst, intptr_t src) {
+ movl(dst, src);
+}
+
+
+void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
+ movsd(dst, as_Address(src));
+}
+
+void MacroAssembler::pop_callee_saved_registers() {
+ pop(rcx);
+ pop(rdx);
+ pop(rdi);
+ pop(rsi);
+}
+
+void MacroAssembler::pop_fTOS() {
+ fld_d(Address(rsp, 0));
+ addl(rsp, 2 * wordSize);
+}
+
+void MacroAssembler::push_callee_saved_registers() {
+ push(rsi);
+ push(rdi);
+ push(rdx);
+ push(rcx);
+}
+
+void MacroAssembler::push_fTOS() {
+ subl(rsp, 2 * wordSize);
+ fstp_d(Address(rsp, 0));
+}
+
+
void MacroAssembler::pushoop(jobject obj) {
-#ifdef _LP64
- movoop(rscratch1, obj);
- pushq(rscratch1);
-#else
push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate());
-#endif // _LP64
}
+
void MacroAssembler::pushptr(AddressLiteral src) {
-#ifdef _LP64
- lea(rscratch1, src);
if (src.is_lval()) {
- pushq(rscratch1);
+ push_literal32((int32_t)src.target(), src.rspec());
} else {
- pushq(Address(rscratch1, 0));
- }
-#else
- if (src.is_lval()) {
- push_literal((int32_t)src.target(), src.rspec());
- else {
pushl(as_Address(src));
}
-#endif // _LP64
}
-void MacroAssembler::ldmxcsr(AddressLiteral src) {
- if (reachable(src)) {
- Assembler::ldmxcsr(as_Address(src));
- } else {
- lea(rscratch1, src);
- Assembler::ldmxcsr(Address(rscratch1, 0));
- }
+void MacroAssembler::set_word_if_not_zero(Register dst) {
+ xorl(dst, dst);
+ set_byte_if_not_zero(dst);
}
-void MacroAssembler::movlpd(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- movlpd(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- movlpd(dst, Address(rscratch1, 0));
- }
+static void pass_arg0(MacroAssembler* masm, Register arg) {
+ masm->push(arg);
}
-void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- movss(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- movss(dst, Address(rscratch1, 0));
- }
+static void pass_arg1(MacroAssembler* masm, Register arg) {
+ masm->push(arg);
}
-void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- xorpd(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- xorpd(dst, Address(rscratch1, 0));
- }
+
+static void pass_arg2(MacroAssembler* masm, Register arg) {
+ masm->push(arg);
}
-void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
- if (reachable(src)) {
- xorps(dst, as_Address(src));
- } else {
- lea(rscratch1, src);
- xorps(dst, Address(rscratch1, 0));
- }
+static void pass_arg3(MacroAssembler* masm, Register arg) {
+ masm->push(arg);
}
-void MacroAssembler::null_check(Register reg, int offset) {
- if (needs_explicit_null_check(offset)) {
- // provoke OS NULL exception if reg = NULL by
- // accessing M[reg] w/o changing any (non-CC) registers
- cmpq(rax, Address(reg, 0));
- // Note: should probably use testl(rax, Address(reg, 0));
- // may be shorter code (however, this version of
- // testl needs to be implemented first)
+#ifndef PRODUCT
+extern "C" void findpc(intptr_t x);
+#endif
+
+void MacroAssembler::debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) {
+ // In order to get locks to work, we need to fake a in_VM state
+ JavaThread* thread = JavaThread::current();
+ JavaThreadState saved_state = thread->thread_state();
+ thread->set_thread_state(_thread_in_vm);
+ if (ShowMessageBoxOnError) {
+ JavaThread* thread = JavaThread::current();
+ JavaThreadState saved_state = thread->thread_state();
+ thread->set_thread_state(_thread_in_vm);
+ if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
+ ttyLocker ttyl;
+ BytecodeCounter::print();
+ }
+ // To see where a verify_oop failed, get $ebx+40/X for this frame.
+ // This is the value of eip which points to where verify_oop will return.
+ if (os::message_box(msg, "Execution stopped, print registers?")) {
+ ttyLocker ttyl;
+ tty->print_cr("eip = 0x%08x", eip);
+#ifndef PRODUCT
+ tty->cr();
+ findpc(eip);
+ tty->cr();
+#endif
+ tty->print_cr("rax, = 0x%08x", rax);
+ tty->print_cr("rbx, = 0x%08x", rbx);
+ tty->print_cr("rcx = 0x%08x", rcx);
+ tty->print_cr("rdx = 0x%08x", rdx);
+ tty->print_cr("rdi = 0x%08x", rdi);
+ tty->print_cr("rsi = 0x%08x", rsi);
+ tty->print_cr("rbp, = 0x%08x", rbp);
+ tty->print_cr("rsp = 0x%08x", rsp);
+ BREAKPOINT;
+ }
} else {
- // nothing to do, (later) access of M[reg + offset]
- // will provoke OS NULL exception if reg = NULL
+ ttyLocker ttyl;
+ ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
+ assert(false, "DEBUG MESSAGE");
}
+ ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
}
-int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
- int off = offset();
- movzbl(dst, src);
- return off;
+void MacroAssembler::stop(const char* msg) {
+ ExternalAddress message((address)msg);
+ // push address of message
+ pushptr(message.addr());
+ { Label L; call(L, relocInfo::none); bind(L); } // push eip
+ pusha(); // push registers
+ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32)));
+ hlt();
}
-int MacroAssembler::load_unsigned_word(Register dst, Address src) {
- int off = offset();
- movzwl(dst, src);
- return off;
+void MacroAssembler::warn(const char* msg) {
+ push_CPU_state();
+
+ ExternalAddress message((address) msg);
+ // push address of message
+ pushptr(message.addr());
+
+ call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
+ addl(rsp, wordSize); // discard argument
+ pop_CPU_state();
}
-int MacroAssembler::load_signed_byte(Register dst, Address src) {
- int off = offset();
- movsbl(dst, src);
- return off;
+#else // _LP64
+
+// 64 bit versions
+
+Address MacroAssembler::as_Address(AddressLiteral adr) {
+ // amd64 always does this as a pc-rel
+ // we can be absolute or disp based on the instruction type
+ // jmp/call are displacements others are absolute
+ assert(!adr.is_lval(), "must be rval");
+ assert(reachable(adr), "must be");
+ return Address((int32_t)(intptr_t)(adr.target() - pc()), adr.target(), adr.reloc());
+
}
-int MacroAssembler::load_signed_word(Register dst, Address src) {
- int off = offset();
- movswl(dst, src);
- return off;
+Address MacroAssembler::as_Address(ArrayAddress adr) {
+ AddressLiteral base = adr.base();
+ lea(rscratch1, base);
+ Address index = adr.index();
+ assert(index._disp == 0, "must not have disp"); // maybe it can?
+ Address array(rscratch1, index._index, index._scale, index._disp);
+ return array;
}
-void MacroAssembler::incrementl(Register reg, int value) {
- if (value == min_jint) { addl(reg, value); return; }
- if (value < 0) { decrementl(reg, -value); return; }
- if (value == 0) { ; return; }
- if (value == 1 && UseIncDec) { incl(reg) ; return; }
- /* else */ { addl(reg, value) ; return; }
+int MacroAssembler::biased_locking_enter(Register lock_reg,
+ Register obj_reg,
+ Register swap_reg,
+ Register tmp_reg,
+ bool swap_reg_contains_mark,
+ Label& done,
+ Label* slow_case,
+ BiasedLockingCounters* counters) {
+ assert(UseBiasedLocking, "why call this otherwise?");
+ assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
+ assert(tmp_reg != noreg, "tmp_reg must be supplied");
+ assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
+ assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
+ Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
+ Address saved_mark_addr(lock_reg, 0);
+
+ if (PrintBiasedLockingStatistics && counters == NULL)
+ counters = BiasedLocking::counters();
+
+ // Biased locking
+ // See whether the lock is currently biased toward our thread and
+ // whether the epoch is still valid
+ // Note that the runtime guarantees sufficient alignment of JavaThread
+ // pointers to allow age to be placed into low bits
+ // First check to see whether biasing is even enabled for this object
+ Label cas_label;
+ int null_check_offset = -1;
+ if (!swap_reg_contains_mark) {
+ null_check_offset = offset();
+ movq(swap_reg, mark_addr);
+ }
+ movq(tmp_reg, swap_reg);
+ andq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
+ cmpq(tmp_reg, markOopDesc::biased_lock_pattern);
+ jcc(Assembler::notEqual, cas_label);
+ // The bias pattern is present in the object's header. Need to check
+ // whether the bias owner and the epoch are both still current.
+ load_prototype_header(tmp_reg, obj_reg);
+ orq(tmp_reg, r15_thread);
+ xorq(tmp_reg, swap_reg);
+ andq(tmp_reg, ~((int) markOopDesc::age_mask_in_place));
+ if (counters != NULL) {
+ cond_inc32(Assembler::zero,
+ ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
+ }
+ jcc(Assembler::equal, done);
+
+ Label try_revoke_bias;
+ Label try_rebias;
+
+ // At this point we know that the header has the bias pattern and
+ // that we are not the bias owner in the current epoch. We need to
+ // figure out more details about the state of the header in order to
+ // know what operations can be legally performed on the object's
+ // header.
+
+ // If the low three bits in the xor result aren't clear, that means
+ // the prototype header is no longer biased and we have to revoke
+ // the bias on this object.
+ testq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
+ jcc(Assembler::notZero, try_revoke_bias);
+
+ // Biasing is still enabled for this data type. See whether the
+ // epoch of the current bias is still valid, meaning that the epoch
+ // bits of the mark word are equal to the epoch bits of the
+ // prototype header. (Note that the prototype header's epoch bits
+ // only change at a safepoint.) If not, attempt to rebias the object
+ // toward the current thread. Note that we must be absolutely sure
+ // that the current epoch is invalid in order to do this because
+ // otherwise the manipulations it performs on the mark word are
+ // illegal.
+ testq(tmp_reg, markOopDesc::epoch_mask_in_place);
+ jcc(Assembler::notZero, try_rebias);
+
+ // The epoch of the current bias is still valid but we know nothing
+ // about the owner; it might be set or it might be clear. Try to
+ // acquire the bias of the object using an atomic operation. If this
+ // fails we will go in to the runtime to revoke the object's bias.
+ // Note that we first construct the presumed unbiased header so we
+ // don't accidentally blow away another thread's valid bias.
+ andq(swap_reg,
+ markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
+ movq(tmp_reg, swap_reg);
+ orq(tmp_reg, r15_thread);
+ if (os::is_MP()) {
+ lock();
+ }
+ cmpxchgq(tmp_reg, Address(obj_reg, 0));
+ // If the biasing toward our thread failed, this means that
+ // another thread succeeded in biasing it toward itself and we
+ // need to revoke that bias. The revocation will occur in the
+ // interpreter runtime in the slow case.
+ if (counters != NULL) {
+ cond_inc32(Assembler::zero,
+ ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
+ }
+ if (slow_case != NULL) {
+ jcc(Assembler::notZero, *slow_case);
+ }
+ jmp(done);
+
+ bind(try_rebias);
+ // At this point we know the epoch has expired, meaning that the
+ // current "bias owner", if any, is actually invalid. Under these
+ // circumstances _only_, we are allowed to use the current header's
+ // value as the comparison value when doing the cas to acquire the
+ // bias in the current epoch. In other words, we allow transfer of
+ // the bias from one thread to another directly in this situation.
+ //
+ // FIXME: due to a lack of registers we currently blow away the age
+ // bits in this situation. Should attempt to preserve them.
+ load_prototype_header(tmp_reg, obj_reg);
+ orq(tmp_reg, r15_thread);
+ if (os::is_MP()) {
+ lock();
+ }
+ cmpxchgq(tmp_reg, Address(obj_reg, 0));
+ // If the biasing toward our thread failed, then another thread
+ // succeeded in biasing it toward itself and we need to revoke that
+ // bias. The revocation will occur in the runtime in the slow case.
+ if (counters != NULL) {
+ cond_inc32(Assembler::zero,
+ ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
+ }
+ if (slow_case != NULL) {
+ jcc(Assembler::notZero, *slow_case);
+ }
+ jmp(done);
+
+ bind(try_revoke_bias);
+ // The prototype mark in the klass doesn't have the bias bit set any
+ // more, indicating that objects of this data type are not supposed
+ // to be biased any more. We are going to try to reset the mark of
+ // this object to the prototype value and fall through to the
+ // CAS-based locking scheme. Note that if our CAS fails, it means
+ // that another thread raced us for the privilege of revoking the
+ // bias of this particular object, so it's okay to continue in the
+ // normal locking code.
+ //
+ // FIXME: due to a lack of registers we currently blow away the age
+ // bits in this situation. Should attempt to preserve them.
+ load_prototype_header(tmp_reg, obj_reg);
+ if (os::is_MP()) {
+ lock();
+ }
+ cmpxchgq(tmp_reg, Address(obj_reg, 0));
+ // Fall through to the normal CAS-based lock, because no matter what
+ // the result of the above CAS, some thread must have succeeded in
+ // removing the bias bit from the object's header.
+ if (counters != NULL) {
+ cond_inc32(Assembler::zero,
+ ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
+ }
+
+ bind(cas_label);
+
+ return null_check_offset;
}
-void MacroAssembler::decrementl(Register reg, int value) {
- if (value == min_jint) { subl(reg, value); return; }
- if (value < 0) { incrementl(reg, -value); return; }
- if (value == 0) { ; return; }
- if (value == 1 && UseIncDec) { decl(reg) ; return; }
- /* else */ { subl(reg, value) ; return; }
+void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
+ Label L, E;
+
+#ifdef _WIN64
+ // Windows always allocates space for it's register args
+ assert(num_args <= 4, "only register arguments supported");
+ subq(rsp, frame::arg_reg_save_area_bytes);
+#endif
+
+ // Align stack if necessary
+ testl(rsp, 15);
+ jcc(Assembler::zero, L);
+
+ subq(rsp, 8);
+ {
+ call(RuntimeAddress(entry_point));
+ }
+ addq(rsp, 8);
+ jmp(E);
+
+ bind(L);
+ {
+ call(RuntimeAddress(entry_point));
+ }
+
+ bind(E);
+
+#ifdef _WIN64
+ // restore stack pointer
+ addq(rsp, frame::arg_reg_save_area_bytes);
+#endif
+
}
-void MacroAssembler::incrementq(Register reg, int value) {
- if (value == min_jint) { addq(reg, value); return; }
- if (value < 0) { decrementq(reg, -value); return; }
- if (value == 0) { ; return; }
- if (value == 1 && UseIncDec) { incq(reg) ; return; }
- /* else */ { addq(reg, value) ; return; }
+void MacroAssembler::cmp64(Register src1, AddressLiteral src2) {
+ assert(!src2.is_lval(), "should use cmpptr");
+
+ if (reachable(src2)) {
+ cmpq(src1, as_Address(src2));
+ } else {
+ lea(rscratch1, src2);
+ Assembler::cmpq(src1, Address(rscratch1, 0));
+ }
+}
+
+int MacroAssembler::corrected_idivq(Register reg) {
+ // Full implementation of Java ldiv and lrem; checks for special
+ // case as described in JVM spec., p.243 & p.271. The function
+ // returns the (pc) offset of the idivl instruction - may be needed
+ // for implicit exceptions.
+ //
+ // normal case special case
+ //
+ // input : rax: dividend min_long
+ // reg: divisor (may not be eax/edx) -1
+ //
+ // output: rax: quotient (= rax idiv reg) min_long
+ // rdx: remainder (= rax irem reg) 0
+ assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
+ static const int64_t min_long = 0x8000000000000000;
+ Label normal_case, special_case;
+
+ // check for special case
+ cmp64(rax, ExternalAddress((address) &min_long));
+ jcc(Assembler::notEqual, normal_case);
+ xorl(rdx, rdx); // prepare rdx for possible special case (where
+ // remainder = 0)
+ cmpq(reg, -1);
+ jcc(Assembler::equal, special_case);
+
+ // handle normal case
+ bind(normal_case);
+ cdqq();
+ int idivq_offset = offset();
+ idivq(reg);
+
+ // normal and special case exit
+ bind(special_case);
+
+ return idivq_offset;
}
void MacroAssembler::decrementq(Register reg, int value) {
@@ -3876,20 +5048,31 @@ void MacroAssembler::decrementq(Register reg, int value) {
/* else */ { subq(reg, value) ; return; }
}
-void MacroAssembler::incrementl(Address dst, int value) {
- if (value == min_jint) { addl(dst, value); return; }
- if (value < 0) { decrementl(dst, -value); return; }
+void MacroAssembler::decrementq(Address dst, int value) {
+ if (value == min_jint) { subq(dst, value); return; }
+ if (value < 0) { incrementq(dst, -value); return; }
if (value == 0) { ; return; }
- if (value == 1 && UseIncDec) { incl(dst) ; return; }
- /* else */ { addl(dst, value) ; return; }
+ if (value == 1 && UseIncDec) { decq(dst) ; return; }
+ /* else */ { subq(dst, value) ; return; }
}
-void MacroAssembler::decrementl(Address dst, int value) {
- if (value == min_jint) { subl(dst, value); return; }
- if (value < 0) { incrementl(dst, -value); return; }
+void MacroAssembler::fat_nop() {
+ // A 5 byte nop that is safe for patching (see patch_verified_entry)
+ // Recommened sequence from 'Software Optimization Guide for the AMD
+ // Hammer Processor'
+ emit_byte(0x66);
+ emit_byte(0x66);
+ emit_byte(0x90);
+ emit_byte(0x66);
+ emit_byte(0x90);
+}
+
+void MacroAssembler::incrementq(Register reg, int value) {
+ if (value == min_jint) { addq(reg, value); return; }
+ if (value < 0) { decrementq(reg, -value); return; }
if (value == 0) { ; return; }
- if (value == 1 && UseIncDec) { decl(dst) ; return; }
- /* else */ { subl(dst, value) ; return; }
+ if (value == 1 && UseIncDec) { incq(reg) ; return; }
+ /* else */ { addq(reg, value) ; return; }
}
void MacroAssembler::incrementq(Address dst, int value) {
@@ -3900,81 +5083,111 @@ void MacroAssembler::incrementq(Address dst, int value) {
/* else */ { addq(dst, value) ; return; }
}
-void MacroAssembler::decrementq(Address dst, int value) {
- if (value == min_jint) { subq(dst, value); return; }
- if (value < 0) { incrementq(dst, -value); return; }
- if (value == 0) { ; return; }
- if (value == 1 && UseIncDec) { decq(dst) ; return; }
- /* else */ { subq(dst, value) ; return; }
+// 32bit can do a case table jump in one instruction but we no longer allow the base
+// to be installed in the Address class
+void MacroAssembler::jump(ArrayAddress entry) {
+ lea(rscratch1, entry.base());
+ Address dispatch = entry.index();
+ assert(dispatch._base == noreg, "must be");
+ dispatch._base = rscratch1;
+ jmp(dispatch);
}
-void MacroAssembler::align(int modulus) {
- if (offset() % modulus != 0) {
- nop(modulus - (offset() % modulus));
- }
+void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
+ ShouldNotReachHere(); // 64bit doesn't use two regs
+ cmpq(x_lo, y_lo);
}
-void MacroAssembler::enter() {
- pushq(rbp);
- movq(rbp, rsp);
+void MacroAssembler::lea(Register dst, AddressLiteral src) {
+ mov_literal64(dst, (intptr_t)src.target(), src.rspec());
+}
+
+void MacroAssembler::lea(Address dst, AddressLiteral adr) {
+ mov_literal64(rscratch1, (intptr_t)adr.target(), adr.rspec());
+ movptr(dst, rscratch1);
}
void MacroAssembler::leave() {
+ // %%% is this really better? Why not on 32bit too?
emit_byte(0xC9); // LEAVE
}
-// C++ bool manipulation
+void MacroAssembler::lneg(Register hi, Register lo) {
+ ShouldNotReachHere(); // 64bit doesn't use two regs
+ negq(lo);
+}
-void MacroAssembler::movbool(Register dst, Address src) {
- if(sizeof(bool) == 1)
- movb(dst, src);
- else if(sizeof(bool) == 2)
- movw(dst, src);
- else if(sizeof(bool) == 4)
- movl(dst, src);
- else {
- // unsupported
- ShouldNotReachHere();
- }
+void MacroAssembler::movoop(Register dst, jobject obj) {
+ mov_literal64(dst, (intptr_t)obj, oop_Relocation::spec_for_immediate());
}
-void MacroAssembler::movbool(Address dst, bool boolconst) {
- if(sizeof(bool) == 1)
- movb(dst, (int) boolconst);
- else if(sizeof(bool) == 2)
- movw(dst, (int) boolconst);
- else if(sizeof(bool) == 4)
- movl(dst, (int) boolconst);
- else {
- // unsupported
- ShouldNotReachHere();
+void MacroAssembler::movoop(Address dst, jobject obj) {
+ mov_literal64(rscratch1, (intptr_t)obj, oop_Relocation::spec_for_immediate());
+ movq(dst, rscratch1);
+}
+
+void MacroAssembler::movptr(Register dst, AddressLiteral src) {
+ if (src.is_lval()) {
+ mov_literal64(dst, (intptr_t)src.target(), src.rspec());
+ } else {
+ if (reachable(src)) {
+ movq(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ movq(dst, Address(rscratch1,0));
+ }
}
}
-void MacroAssembler::movbool(Address dst, Register src) {
- if(sizeof(bool) == 1)
- movb(dst, src);
- else if(sizeof(bool) == 2)
- movw(dst, src);
- else if(sizeof(bool) == 4)
- movl(dst, src);
- else {
- // unsupported
- ShouldNotReachHere();
+void MacroAssembler::movptr(ArrayAddress dst, Register src) {
+ movq(as_Address(dst), src);
+}
+
+void MacroAssembler::movptr(Register dst, ArrayAddress src) {
+ movq(dst, as_Address(src));
+}
+
+// src should NEVER be a real pointer. Use AddressLiteral for true pointers
+void MacroAssembler::movptr(Address dst, intptr_t src) {
+ mov64(rscratch1, src);
+ movq(dst, rscratch1);
+}
+
+// These are mostly for initializing NULL
+void MacroAssembler::movptr(Address dst, int32_t src) {
+ movslq(dst, src);
+}
+
+void MacroAssembler::movptr(Register dst, int32_t src) {
+ mov64(dst, (intptr_t)src);
+}
+
+void MacroAssembler::pushoop(jobject obj) {
+ movoop(rscratch1, obj);
+ push(rscratch1);
+}
+
+void MacroAssembler::pushptr(AddressLiteral src) {
+ lea(rscratch1, src);
+ if (src.is_lval()) {
+ push(rscratch1);
+ } else {
+ pushq(Address(rscratch1, 0));
}
}
-void MacroAssembler::testbool(Register dst) {
- if(sizeof(bool) == 1)
- testb(dst, (int) 0xff);
- else if(sizeof(bool) == 2) {
- // need testw impl
- ShouldNotReachHere();
- } else if(sizeof(bool) == 4)
- testl(dst, dst);
- else {
- // unsupported
- ShouldNotReachHere();
+void MacroAssembler::reset_last_Java_frame(bool clear_fp,
+ bool clear_pc) {
+ // we must set sp to zero to clear frame
+ movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), (int32_t)NULL_WORD);
+ // must clear fp, so that compiled frames are not confused; it is
+ // possible that we need it only for debugging
+ if (clear_fp) {
+ movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), (int32_t)NULL_WORD);
+ }
+
+ if (clear_pc) {
+ movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), (int32_t)NULL_WORD);
}
}
@@ -3988,8 +5201,8 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp,
// last_java_fp is optional
if (last_java_fp->is_valid()) {
- movq(Address(r15_thread, JavaThread::last_Java_fp_offset()),
- last_java_fp);
+ movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()),
+ last_java_fp);
}
// last_java_pc is optional
@@ -3997,188 +5210,224 @@ void MacroAssembler::set_last_Java_frame(Register last_java_sp,
Address java_pc(r15_thread,
JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset());
lea(rscratch1, InternalAddress(last_java_pc));
- movq(java_pc, rscratch1);
+ movptr(java_pc, rscratch1);
}
- movq(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
+ movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
}
-void MacroAssembler::reset_last_Java_frame(bool clear_fp,
- bool clear_pc) {
- // we must set sp to zero to clear frame
- movptr(Address(r15_thread, JavaThread::last_Java_sp_offset()), NULL_WORD);
- // must clear fp, so that compiled frames are not confused; it is
- // possible that we need it only for debugging
- if (clear_fp) {
- movptr(Address(r15_thread, JavaThread::last_Java_fp_offset()), NULL_WORD);
+static void pass_arg0(MacroAssembler* masm, Register arg) {
+ if (c_rarg0 != arg ) {
+ masm->mov(c_rarg0, arg);
}
+}
- if (clear_pc) {
- movptr(Address(r15_thread, JavaThread::last_Java_pc_offset()), NULL_WORD);
+static void pass_arg1(MacroAssembler* masm, Register arg) {
+ if (c_rarg1 != arg ) {
+ masm->mov(c_rarg1, arg);
}
}
-
-// Implementation of call_VM versions
-
-void MacroAssembler::call_VM_leaf_base(address entry_point, int num_args) {
- Label L, E;
-
-#ifdef _WIN64
- // Windows always allocates space for it's register args
- assert(num_args <= 4, "only register arguments supported");
- subq(rsp, frame::arg_reg_save_area_bytes);
-#endif
-
- // Align stack if necessary
- testl(rsp, 15);
- jcc(Assembler::zero, L);
-
- subq(rsp, 8);
- {
- call(RuntimeAddress(entry_point));
+static void pass_arg2(MacroAssembler* masm, Register arg) {
+ if (c_rarg2 != arg ) {
+ masm->mov(c_rarg2, arg);
}
- addq(rsp, 8);
- jmp(E);
+}
- bind(L);
- {
- call(RuntimeAddress(entry_point));
+static void pass_arg3(MacroAssembler* masm, Register arg) {
+ if (c_rarg3 != arg ) {
+ masm->mov(c_rarg3, arg);
}
+}
- bind(E);
+void MacroAssembler::stop(const char* msg) {
+ address rip = pc();
+ pusha(); // get regs on stack
+ lea(c_rarg0, ExternalAddress((address) msg));
+ lea(c_rarg1, InternalAddress(rip));
+ movq(c_rarg2, rsp); // pass pointer to regs array
+ andq(rsp, -16); // align stack as required by ABI
+ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
+ hlt();
+}
-#ifdef _WIN64
- // restore stack pointer
- addq(rsp, frame::arg_reg_save_area_bytes);
-#endif
+void MacroAssembler::warn(const char* msg) {
+ push(r12);
+ movq(r12, rsp);
+ andq(rsp, -16); // align stack as required by push_CPU_state and call
+
+ push_CPU_state(); // keeps alignment at 16 bytes
+ lea(c_rarg0, ExternalAddress((address) msg));
+ call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0);
+ pop_CPU_state();
+ movq(rsp, r12);
+ pop(r12);
}
+#ifndef PRODUCT
+extern "C" void findpc(intptr_t x);
+#endif
-void MacroAssembler::call_VM_base(Register oop_result,
- Register java_thread,
- Register last_java_sp,
- address entry_point,
- int num_args,
- bool check_exceptions) {
- // determine last_java_sp register
- if (!last_java_sp->is_valid()) {
- last_java_sp = rsp;
+void MacroAssembler::debug64(char* msg, int64_t pc, int64_t regs[]) {
+ // In order to get locks to work, we need to fake a in_VM state
+ if (ShowMessageBoxOnError ) {
+ JavaThread* thread = JavaThread::current();
+ JavaThreadState saved_state = thread->thread_state();
+ thread->set_thread_state(_thread_in_vm);
+#ifndef PRODUCT
+ if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
+ ttyLocker ttyl;
+ BytecodeCounter::print();
+ }
+#endif
+ // To see where a verify_oop failed, get $ebx+40/X for this frame.
+ // XXX correct this offset for amd64
+ // This is the value of eip which points to where verify_oop will return.
+ if (os::message_box(msg, "Execution stopped, print registers?")) {
+ ttyLocker ttyl;
+ tty->print_cr("rip = 0x%016lx", pc);
+#ifndef PRODUCT
+ tty->cr();
+ findpc(pc);
+ tty->cr();
+#endif
+ tty->print_cr("rax = 0x%016lx", regs[15]);
+ tty->print_cr("rbx = 0x%016lx", regs[12]);
+ tty->print_cr("rcx = 0x%016lx", regs[14]);
+ tty->print_cr("rdx = 0x%016lx", regs[13]);
+ tty->print_cr("rdi = 0x%016lx", regs[8]);
+ tty->print_cr("rsi = 0x%016lx", regs[9]);
+ tty->print_cr("rbp = 0x%016lx", regs[10]);
+ tty->print_cr("rsp = 0x%016lx", regs[11]);
+ tty->print_cr("r8 = 0x%016lx", regs[7]);
+ tty->print_cr("r9 = 0x%016lx", regs[6]);
+ tty->print_cr("r10 = 0x%016lx", regs[5]);
+ tty->print_cr("r11 = 0x%016lx", regs[4]);
+ tty->print_cr("r12 = 0x%016lx", regs[3]);
+ tty->print_cr("r13 = 0x%016lx", regs[2]);
+ tty->print_cr("r14 = 0x%016lx", regs[1]);
+ tty->print_cr("r15 = 0x%016lx", regs[0]);
+ BREAKPOINT;
+ }
+ ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
+ } else {
+ ttyLocker ttyl;
+ ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
+ msg);
}
+}
- // debugging support
- assert(num_args >= 0, "cannot have negative number of arguments");
- assert(r15_thread != oop_result,
- "cannot use the same register for java_thread & oop_result");
- assert(r15_thread != last_java_sp,
- "cannot use the same register for java_thread & last_java_sp");
+#endif // _LP64
- // set last Java frame before call
+// Now versions that are common to 32/64 bit
- // This sets last_Java_fp which is only needed from interpreted frames
- // and should really be done only from the interp_masm version before
- // calling the underlying call_VM. That doesn't happen yet so we set
- // last_Java_fp here even though some callers don't need it and
- // also clear it below.
- set_last_Java_frame(last_java_sp, rbp, NULL);
+void MacroAssembler::addptr(Register dst, int32_t imm32) {
+ LP64_ONLY(addq(dst, imm32)) NOT_LP64(addl(dst, imm32));
+}
- {
- Label L, E;
+void MacroAssembler::addptr(Register dst, Register src) {
+ LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
+}
- // Align stack if necessary
-#ifdef _WIN64
- assert(num_args <= 4, "only register arguments supported");
- // Windows always allocates space for it's register args
- subq(rsp, frame::arg_reg_save_area_bytes);
-#endif
- testl(rsp, 15);
- jcc(Assembler::zero, L);
+void MacroAssembler::addptr(Address dst, Register src) {
+ LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src));
+}
- subq(rsp, 8);
- {
- call(RuntimeAddress(entry_point));
- }
- addq(rsp, 8);
- jmp(E);
+void MacroAssembler::align(int modulus) {
+ if (offset() % modulus != 0) {
+ nop(modulus - (offset() % modulus));
+ }
+}
+void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
+ andpd(dst, as_Address(src));
+}
- bind(L);
- {
- call(RuntimeAddress(entry_point));
- }
+void MacroAssembler::andptr(Register dst, int32_t imm32) {
+ LP64_ONLY(andq(dst, imm32)) NOT_LP64(andl(dst, imm32));
+}
- bind(E);
+void MacroAssembler::atomic_incl(AddressLiteral counter_addr) {
+ pushf();
+ if (os::is_MP())
+ lock();
+ incrementl(counter_addr);
+ popf();
+}
-#ifdef _WIN64
- // restore stack pointer
- addq(rsp, frame::arg_reg_save_area_bytes);
-#endif
- }
+// Writes to stack successive pages until offset reached to check for
+// stack overflow + shadow pages. This clobbers tmp.
+void MacroAssembler::bang_stack_size(Register size, Register tmp) {
+ movptr(tmp, rsp);
+ // Bang stack for total size given plus shadow page size.
+ // Bang one page at a time because large size can bang beyond yellow and
+ // red zones.
+ Label loop;
+ bind(loop);
+ movl(Address(tmp, (-os::vm_page_size())), size );
+ subptr(tmp, os::vm_page_size());
+ subl(size, os::vm_page_size());
+ jcc(Assembler::greater, loop);
-#ifdef ASSERT
- pushq(rax);
- {
- Label L;
- get_thread(rax);
- cmpq(r15_thread, rax);
- jcc(Assembler::equal, L);
- stop("MacroAssembler::call_VM_base: register not callee saved?");
- bind(L);
+ // Bang down shadow pages too.
+ // The -1 because we already subtracted 1 page.
+ for (int i = 0; i< StackShadowPages-1; i++) {
+ // this could be any sized move but this is can be a debugging crumb
+ // so the bigger the better.
+ movptr(Address(tmp, (-i*os::vm_page_size())), size );
}
- popq(rax);
-#endif
-
- // reset last Java frame
- // This really shouldn't have to clear fp set note above at the
- // call to set_last_Java_frame
- reset_last_Java_frame(true, false);
-
- check_and_handle_popframe(noreg);
- check_and_handle_earlyret(noreg);
+}
- if (check_exceptions) {
- cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int) NULL);
- // This used to conditionally jump to forward_exception however it is
- // possible if we relocate that the branch will not reach. So we must jump
- // around so we can always reach
- Label ok;
- jcc(Assembler::equal, ok);
- jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
- bind(ok);
- }
+void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
+ assert(UseBiasedLocking, "why call this otherwise?");
- // get oop result if there is one and reset the value in the thread
- if (oop_result->is_valid()) {
- movq(oop_result, Address(r15_thread, JavaThread::vm_result_offset()));
- movptr(Address(r15_thread, JavaThread::vm_result_offset()), NULL_WORD);
- verify_oop(oop_result, "broken oop in call_VM_base");
- }
+ // Check for biased locking unlock case, which is a no-op
+ // Note: we do not have to check the thread ID for two reasons.
+ // First, the interpreter checks for IllegalMonitorStateException at
+ // a higher level. Second, if the bias was revoked while we held the
+ // lock, the object could not be rebiased toward another thread, so
+ // the bias bit would be clear.
+ movptr(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
+ andptr(temp_reg, markOopDesc::biased_lock_mask_in_place);
+ cmpptr(temp_reg, markOopDesc::biased_lock_pattern);
+ jcc(Assembler::equal, done);
}
-void MacroAssembler::check_and_handle_popframe(Register java_thread) {}
-void MacroAssembler::check_and_handle_earlyret(Register java_thread) {}
+void MacroAssembler::c2bool(Register x) {
+ // implements x == 0 ? 0 : 1
+ // note: must only look at least-significant byte of x
+ // since C-style booleans are stored in one byte
+ // only! (was bug)
+ andl(x, 0xFF);
+ setb(Assembler::notZero, x);
+}
-void MacroAssembler::call_VM_helper(Register oop_result,
- address entry_point,
- int num_args,
- bool check_exceptions) {
- // Java thread becomes first argument of C function
- movq(c_rarg0, r15_thread);
+// Wouldn't need if AddressLiteral version had new name
+void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
+ Assembler::call(L, rtype);
+}
- // We've pushed one address, correct last_Java_sp
- leaq(rax, Address(rsp, wordSize));
+void MacroAssembler::call(Register entry) {
+ Assembler::call(entry);
+}
- call_VM_base(oop_result, noreg, rax, entry_point, num_args,
- check_exceptions);
+void MacroAssembler::call(AddressLiteral entry) {
+ if (reachable(entry)) {
+ Assembler::call_literal(entry.target(), entry.rspec());
+ } else {
+ lea(rscratch1, entry);
+ Assembler::call(rscratch1);
+ }
}
+// Implementation of call_VM versions
void MacroAssembler::call_VM(Register oop_result,
address entry_point,
bool check_exceptions) {
Label C, E;
- Assembler::call(C, relocInfo::none);
+ call(C, relocInfo::none);
jmp(E);
bind(C);
@@ -4188,23 +5437,16 @@ void MacroAssembler::call_VM(Register oop_result,
bind(E);
}
-
void MacroAssembler::call_VM(Register oop_result,
address entry_point,
Register arg_1,
bool check_exceptions) {
- assert(rax != arg_1, "smashed argument");
- assert(c_rarg0 != arg_1, "smashed argument");
-
Label C, E;
- Assembler::call(C, relocInfo::none);
+ call(C, relocInfo::none);
jmp(E);
bind(C);
- // c_rarg0 is reserved for thread
- if (c_rarg1 != arg_1) {
- movq(c_rarg1, arg_1);
- }
+ pass_arg1(this, arg_1);
call_VM_helper(oop_result, entry_point, 1, check_exceptions);
ret(0);
@@ -4216,66 +5458,42 @@ void MacroAssembler::call_VM(Register oop_result,
Register arg_1,
Register arg_2,
bool check_exceptions) {
- assert(rax != arg_1, "smashed argument");
- assert(rax != arg_2, "smashed argument");
- assert(c_rarg0 != arg_1, "smashed argument");
- assert(c_rarg0 != arg_2, "smashed argument");
- assert(c_rarg1 != arg_2, "smashed argument");
- assert(c_rarg2 != arg_1, "smashed argument");
-
Label C, E;
- Assembler::call(C, relocInfo::none);
+ call(C, relocInfo::none);
jmp(E);
bind(C);
- // c_rarg0 is reserved for thread
- if (c_rarg1 != arg_1) {
- movq(c_rarg1, arg_1);
- }
- if (c_rarg2 != arg_2) {
- movq(c_rarg2, arg_2);
- }
+
+ LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
+
+ pass_arg2(this, arg_2);
+ pass_arg1(this, arg_1);
call_VM_helper(oop_result, entry_point, 2, check_exceptions);
ret(0);
bind(E);
}
-
void MacroAssembler::call_VM(Register oop_result,
address entry_point,
Register arg_1,
Register arg_2,
Register arg_3,
bool check_exceptions) {
- assert(rax != arg_1, "smashed argument");
- assert(rax != arg_2, "smashed argument");
- assert(rax != arg_3, "smashed argument");
- assert(c_rarg0 != arg_1, "smashed argument");
- assert(c_rarg0 != arg_2, "smashed argument");
- assert(c_rarg0 != arg_3, "smashed argument");
- assert(c_rarg1 != arg_2, "smashed argument");
- assert(c_rarg1 != arg_3, "smashed argument");
- assert(c_rarg2 != arg_1, "smashed argument");
- assert(c_rarg2 != arg_3, "smashed argument");
- assert(c_rarg3 != arg_1, "smashed argument");
- assert(c_rarg3 != arg_2, "smashed argument");
-
Label C, E;
- Assembler::call(C, relocInfo::none);
+ call(C, relocInfo::none);
jmp(E);
bind(C);
- // c_rarg0 is reserved for thread
- if (c_rarg1 != arg_1) {
- movq(c_rarg1, arg_1);
- }
- if (c_rarg2 != arg_2) {
- movq(c_rarg2, arg_2);
- }
- if (c_rarg3 != arg_3) {
- movq(c_rarg3, arg_3);
- }
+
+ LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
+ LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
+ pass_arg3(this, arg_3);
+
+ LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
+ pass_arg2(this, arg_2);
+
+ pass_arg1(this, arg_1);
call_VM_helper(oop_result, entry_point, 3, check_exceptions);
ret(0);
@@ -4285,10 +5503,10 @@ void MacroAssembler::call_VM(Register oop_result,
void MacroAssembler::call_VM(Register oop_result,
Register last_java_sp,
address entry_point,
- int num_args,
+ int number_of_arguments,
bool check_exceptions) {
- call_VM_base(oop_result, noreg, last_java_sp, entry_point, num_args,
- check_exceptions);
+ Register thread = LP64_ONLY(r15_thread) NOT_LP64(noreg);
+ call_VM_base(oop_result, thread, last_java_sp, entry_point, number_of_arguments, check_exceptions);
}
void MacroAssembler::call_VM(Register oop_result,
@@ -4296,12 +5514,7 @@ void MacroAssembler::call_VM(Register oop_result,
address entry_point,
Register arg_1,
bool check_exceptions) {
- assert(c_rarg0 != arg_1, "smashed argument");
- assert(c_rarg1 != last_java_sp, "smashed argument");
- // c_rarg0 is reserved for thread
- if (c_rarg1 != arg_1) {
- movq(c_rarg1, arg_1);
- }
+ pass_arg1(this, arg_1);
call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
}
@@ -4311,23 +5524,13 @@ void MacroAssembler::call_VM(Register oop_result,
Register arg_1,
Register arg_2,
bool check_exceptions) {
- assert(c_rarg0 != arg_1, "smashed argument");
- assert(c_rarg0 != arg_2, "smashed argument");
- assert(c_rarg1 != arg_2, "smashed argument");
- assert(c_rarg1 != last_java_sp, "smashed argument");
- assert(c_rarg2 != arg_1, "smashed argument");
- assert(c_rarg2 != last_java_sp, "smashed argument");
- // c_rarg0 is reserved for thread
- if (c_rarg1 != arg_1) {
- movq(c_rarg1, arg_1);
- }
- if (c_rarg2 != arg_2) {
- movq(c_rarg2, arg_2);
- }
+
+ LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
+ pass_arg2(this, arg_2);
+ pass_arg1(this, arg_1);
call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
}
-
void MacroAssembler::call_VM(Register oop_result,
Register last_java_sp,
address entry_point,
@@ -4335,168 +5538,333 @@ void MacroAssembler::call_VM(Register oop_result,
Register arg_2,
Register arg_3,
bool check_exceptions) {
- assert(c_rarg0 != arg_1, "smashed argument");
- assert(c_rarg0 != arg_2, "smashed argument");
- assert(c_rarg0 != arg_3, "smashed argument");
- assert(c_rarg1 != arg_2, "smashed argument");
- assert(c_rarg1 != arg_3, "smashed argument");
- assert(c_rarg1 != last_java_sp, "smashed argument");
- assert(c_rarg2 != arg_1, "smashed argument");
- assert(c_rarg2 != arg_3, "smashed argument");
- assert(c_rarg2 != last_java_sp, "smashed argument");
- assert(c_rarg3 != arg_1, "smashed argument");
- assert(c_rarg3 != arg_2, "smashed argument");
- assert(c_rarg3 != last_java_sp, "smashed argument");
- // c_rarg0 is reserved for thread
- if (c_rarg1 != arg_1) {
- movq(c_rarg1, arg_1);
- }
- if (c_rarg2 != arg_2) {
- movq(c_rarg2, arg_2);
- }
- if (c_rarg3 != arg_3) {
- movq(c_rarg2, arg_3);
- }
+ LP64_ONLY(assert(arg_1 != c_rarg3, "smashed arg"));
+ LP64_ONLY(assert(arg_2 != c_rarg3, "smashed arg"));
+ pass_arg3(this, arg_3);
+ LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
+ pass_arg2(this, arg_2);
+ pass_arg1(this, arg_1);
call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
}
-void MacroAssembler::call_VM_leaf(address entry_point, int num_args) {
- call_VM_leaf_base(entry_point, num_args);
-}
+void MacroAssembler::call_VM_base(Register oop_result,
+ Register java_thread,
+ Register last_java_sp,
+ address entry_point,
+ int number_of_arguments,
+ bool check_exceptions) {
+ // determine java_thread register
+ if (!java_thread->is_valid()) {
+#ifdef _LP64
+ java_thread = r15_thread;
+#else
+ java_thread = rdi;
+ get_thread(java_thread);
+#endif // LP64
+ }
+ // determine last_java_sp register
+ if (!last_java_sp->is_valid()) {
+ last_java_sp = rsp;
+ }
+ // debugging support
+ assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
+ LP64_ONLY(assert(java_thread == r15_thread, "unexpected register"));
+ assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
+ assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
+
+ // push java thread (becomes first argument of C function)
+
+ NOT_LP64(push(java_thread); number_of_arguments++);
+ LP64_ONLY(mov(c_rarg0, r15_thread));
+
+ // set last Java frame before call
+ assert(last_java_sp != rbp, "can't use ebp/rbp");
+
+ // Only interpreter should have to set fp
+ set_last_Java_frame(java_thread, last_java_sp, rbp, NULL);
+
+ // do the call, remove parameters
+ MacroAssembler::call_VM_leaf_base(entry_point, number_of_arguments);
-void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) {
- if (c_rarg0 != arg_1) {
- movq(c_rarg0, arg_1);
+ // restore the thread (cannot use the pushed argument since arguments
+ // may be overwritten by C code generated by an optimizing compiler);
+ // however can use the register value directly if it is callee saved.
+ if (LP64_ONLY(true ||) java_thread == rdi || java_thread == rsi) {
+ // rdi & rsi (also r15) are callee saved -> nothing to do
+#ifdef ASSERT
+ guarantee(java_thread != rax, "change this code");
+ push(rax);
+ { Label L;
+ get_thread(rax);
+ cmpptr(java_thread, rax);
+ jcc(Assembler::equal, L);
+ stop("MacroAssembler::call_VM_base: rdi not callee saved?");
+ bind(L);
+ }
+ pop(rax);
+#endif
+ } else {
+ get_thread(java_thread);
}
- call_VM_leaf(entry_point, 1);
-}
+ // reset last Java frame
+ // Only interpreter should have to clear fp
+ reset_last_Java_frame(java_thread, true, false);
+
+#ifndef CC_INTERP
+ // C++ interp handles this in the interpreter
+ check_and_handle_popframe(java_thread);
+ check_and_handle_earlyret(java_thread);
+#endif /* CC_INTERP */
-void MacroAssembler::call_VM_leaf(address entry_point,
- Register arg_1,
- Register arg_2) {
- assert(c_rarg0 != arg_2, "smashed argument");
- assert(c_rarg1 != arg_1, "smashed argument");
- if (c_rarg0 != arg_1) {
- movq(c_rarg0, arg_1);
+ if (check_exceptions) {
+ // check for pending exceptions (java_thread is set upon return)
+ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
+#ifndef _LP64
+ jump_cc(Assembler::notEqual,
+ RuntimeAddress(StubRoutines::forward_exception_entry()));
+#else
+ // This used to conditionally jump to forward_exception however it is
+ // possible if we relocate that the branch will not reach. So we must jump
+ // around so we can always reach
+
+ Label ok;
+ jcc(Assembler::equal, ok);
+ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
+ bind(ok);
+#endif // LP64
}
- if (c_rarg1 != arg_2) {
- movq(c_rarg1, arg_2);
+
+ // get oop result if there is one and reset the value in the thread
+ if (oop_result->is_valid()) {
+ movptr(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
+ movptr(Address(java_thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
+ verify_oop(oop_result, "broken oop in call_VM_base");
}
+}
+
+void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
+
+ // Calculate the value for last_Java_sp
+ // somewhat subtle. call_VM does an intermediate call
+ // which places a return address on the stack just under the
+ // stack pointer as the user finsihed with it. This allows
+ // use to retrieve last_Java_pc from last_Java_sp[-1].
+ // On 32bit we then have to push additional args on the stack to accomplish
+ // the actual requested call. On 64bit call_VM only can use register args
+ // so the only extra space is the return address that call_VM created.
+ // This hopefully explains the calculations here.
+
+#ifdef _LP64
+ // We've pushed one address, correct last_Java_sp
+ lea(rax, Address(rsp, wordSize));
+#else
+ lea(rax, Address(rsp, (1 + number_of_arguments) * wordSize));
+#endif // LP64
+
+ call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions);
+
+}
+
+void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
+ call_VM_leaf_base(entry_point, number_of_arguments);
+}
+
+void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0) {
+ pass_arg0(this, arg_0);
+ call_VM_leaf(entry_point, 1);
+}
+
+void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1) {
+
+ LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
+ pass_arg1(this, arg_1);
+ pass_arg0(this, arg_0);
call_VM_leaf(entry_point, 2);
}
-void MacroAssembler::call_VM_leaf(address entry_point,
- Register arg_1,
- Register arg_2,
- Register arg_3) {
- assert(c_rarg0 != arg_2, "smashed argument");
- assert(c_rarg0 != arg_3, "smashed argument");
- assert(c_rarg1 != arg_1, "smashed argument");
- assert(c_rarg1 != arg_3, "smashed argument");
- assert(c_rarg2 != arg_1, "smashed argument");
- assert(c_rarg2 != arg_2, "smashed argument");
- if (c_rarg0 != arg_1) {
- movq(c_rarg0, arg_1);
- }
- if (c_rarg1 != arg_2) {
- movq(c_rarg1, arg_2);
+void MacroAssembler::call_VM_leaf(address entry_point, Register arg_0, Register arg_1, Register arg_2) {
+ LP64_ONLY(assert(arg_0 != c_rarg2, "smashed arg"));
+ LP64_ONLY(assert(arg_1 != c_rarg2, "smashed arg"));
+ pass_arg2(this, arg_2);
+ LP64_ONLY(assert(arg_0 != c_rarg1, "smashed arg"));
+ pass_arg1(this, arg_1);
+ pass_arg0(this, arg_0);
+ call_VM_leaf(entry_point, 3);
+}
+
+void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
+}
+
+void MacroAssembler::check_and_handle_popframe(Register java_thread) {
+}
+
+void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) {
+ if (reachable(src1)) {
+ cmpl(as_Address(src1), imm);
+ } else {
+ lea(rscratch1, src1);
+ cmpl(Address(rscratch1, 0), imm);
}
- if (c_rarg2 != arg_3) {
- movq(c_rarg2, arg_3);
+}
+
+void MacroAssembler::cmp32(Register src1, AddressLiteral src2) {
+ assert(!src2.is_lval(), "use cmpptr");
+ if (reachable(src2)) {
+ cmpl(src1, as_Address(src2));
+ } else {
+ lea(rscratch1, src2);
+ cmpl(src1, Address(rscratch1, 0));
}
- call_VM_leaf(entry_point, 3);
}
+void MacroAssembler::cmp32(Register src1, int32_t imm) {
+ Assembler::cmpl(src1, imm);
+}
-// Calls to C land
-//
-// When entering C land, the rbp & rsp of the last Java frame have to
-// be recorded in the (thread-local) JavaThread object. When leaving C
-// land, the last Java fp has to be reset to 0. This is required to
-// allow proper stack traversal.
-void MacroAssembler::store_check(Register obj) {
- // Does a store check for the oop in register obj. The content of
- // register obj is destroyed afterwards.
- store_check_part_1(obj);
- store_check_part_2(obj);
+void MacroAssembler::cmp32(Register src1, Address src2) {
+ Assembler::cmpl(src1, src2);
}
-void MacroAssembler::store_check(Register obj, Address dst) {
- store_check(obj);
+void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
+ ucomisd(opr1, opr2);
+
+ Label L;
+ if (unordered_is_less) {
+ movl(dst, -1);
+ jcc(Assembler::parity, L);
+ jcc(Assembler::below , L);
+ movl(dst, 0);
+ jcc(Assembler::equal , L);
+ increment(dst);
+ } else { // unordered is greater
+ movl(dst, 1);
+ jcc(Assembler::parity, L);
+ jcc(Assembler::above , L);
+ movl(dst, 0);
+ jcc(Assembler::equal , L);
+ decrementl(dst);
+ }
+ bind(L);
}
-// split the store check operation so that other instructions can be
-// scheduled inbetween
-void MacroAssembler::store_check_part_1(Register obj) {
- BarrierSet* bs = Universe::heap()->barrier_set();
- assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
- shrq(obj, CardTableModRefBS::card_shift);
+void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
+ ucomiss(opr1, opr2);
+
+ Label L;
+ if (unordered_is_less) {
+ movl(dst, -1);
+ jcc(Assembler::parity, L);
+ jcc(Assembler::below , L);
+ movl(dst, 0);
+ jcc(Assembler::equal , L);
+ increment(dst);
+ } else { // unordered is greater
+ movl(dst, 1);
+ jcc(Assembler::parity, L);
+ jcc(Assembler::above , L);
+ movl(dst, 0);
+ jcc(Assembler::equal , L);
+ decrementl(dst);
+ }
+ bind(L);
}
-void MacroAssembler::store_check_part_2(Register obj) {
- BarrierSet* bs = Universe::heap()->barrier_set();
- assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
- CardTableModRefBS* ct = (CardTableModRefBS*)bs;
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
- // The calculation for byte_map_base is as follows:
- // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
- // So this essentially converts an address to a displacement and
- // it will never need to be relocated. On 64bit however the value may be too
- // large for a 32bit displacement
+void MacroAssembler::cmp8(AddressLiteral src1, int imm) {
+ if (reachable(src1)) {
+ cmpb(as_Address(src1), imm);
+ } else {
+ lea(rscratch1, src1);
+ cmpb(Address(rscratch1, 0), imm);
+ }
+}
- intptr_t disp = (intptr_t) ct->byte_map_base;
- if (is_simm32(disp)) {
- Address cardtable(noreg, obj, Address::times_1, disp);
- movb(cardtable, 0);
+void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) {
+#ifdef _LP64
+ if (src2.is_lval()) {
+ movptr(rscratch1, src2);
+ Assembler::cmpq(src1, rscratch1);
+ } else if (reachable(src2)) {
+ cmpq(src1, as_Address(src2));
} else {
- // By doing it as an ExternalAddress disp could be converted to a rip-relative
- // displacement and done in a single instruction given favorable mapping and
- // a smarter version of as_Address. Worst case it is two instructions which
- // is no worse off then loading disp into a register and doing as a simple
- // Address() as above.
- // We can't do as ExternalAddress as the only style since if disp == 0 we'll
- // assert since NULL isn't acceptable in a reloci (see 6644928). In any case
- // in some cases we'll get a single instruction version.
+ lea(rscratch1, src2);
+ Assembler::cmpq(src1, Address(rscratch1, 0));
+ }
+#else
+ if (src2.is_lval()) {
+ cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
+ } else {
+ cmpl(src1, as_Address(src2));
+ }
+#endif // _LP64
+}
- ExternalAddress cardtable((address)disp);
- Address index(noreg, obj, Address::times_1);
- movb(as_Address(ArrayAddress(cardtable, index)), 0);
+void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) {
+ assert(src2.is_lval(), "not a mem-mem compare");
+#ifdef _LP64
+ // moves src2's literal address
+ movptr(rscratch1, src2);
+ Assembler::cmpq(src1, rscratch1);
+#else
+ cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
+#endif // _LP64
+}
+
+void MacroAssembler::locked_cmpxchgptr(Register reg, AddressLiteral adr) {
+ if (reachable(adr)) {
+ if (os::is_MP())
+ lock();
+ cmpxchgptr(reg, as_Address(adr));
+ } else {
+ lea(rscratch1, adr);
+ if (os::is_MP())
+ lock();
+ cmpxchgptr(reg, Address(rscratch1, 0));
}
+}
+void MacroAssembler::cmpxchgptr(Register reg, Address adr) {
+ LP64_ONLY(cmpxchgq(reg, adr)) NOT_LP64(cmpxchgl(reg, adr));
}
-void MacroAssembler::c2bool(Register x) {
- // implements x == 0 ? 0 : 1
- // note: must only look at least-significant byte of x
- // since C-style booleans are stored in one byte
- // only! (was bug)
- andl(x, 0xFF);
- setb(Assembler::notZero, x);
+void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
+ comisd(dst, as_Address(src));
+}
+
+void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
+ comiss(dst, as_Address(src));
+}
+
+
+void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) {
+ Condition negated_cond = negate_condition(cond);
+ Label L;
+ jcc(negated_cond, L);
+ atomic_incl(counter_addr);
+ bind(L);
}
int MacroAssembler::corrected_idivl(Register reg) {
- // Full implementation of Java idiv and irem; checks for special
- // case as described in JVM spec., p.243 & p.271. The function
- // returns the (pc) offset of the idivl instruction - may be needed
- // for implicit exceptions.
+ // Full implementation of Java idiv and irem; checks for
+ // special case as described in JVM spec., p.243 & p.271.
+ // The function returns the (pc) offset of the idivl
+ // instruction - may be needed for implicit exceptions.
//
// normal case special case
//
- // input : eax: dividend min_int
- // reg: divisor (may not be eax/edx) -1
+ // input : rax,: dividend min_int
+ // reg: divisor (may not be rax,/rdx) -1
//
- // output: eax: quotient (= eax idiv reg) min_int
- // edx: remainder (= eax irem reg) 0
- assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
+ // output: rax,: quotient (= rax, idiv reg) min_int
+ // rdx: remainder (= rax, irem reg) 0
+ assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
const int min_int = 0x80000000;
Label normal_case, special_case;
// check for special case
cmpl(rax, min_int);
jcc(Assembler::notEqual, normal_case);
- xorl(rdx, rdx); // prepare edx for possible special case (where
- // remainder = 0)
+ xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
cmpl(reg, -1);
jcc(Assembler::equal, special_case);
@@ -4512,252 +5880,545 @@ int MacroAssembler::corrected_idivl(Register reg) {
return idivl_offset;
}
-int MacroAssembler::corrected_idivq(Register reg) {
- // Full implementation of Java ldiv and lrem; checks for special
- // case as described in JVM spec., p.243 & p.271. The function
- // returns the (pc) offset of the idivl instruction - may be needed
- // for implicit exceptions.
- //
- // normal case special case
- //
- // input : rax: dividend min_long
- // reg: divisor (may not be eax/edx) -1
- //
- // output: rax: quotient (= rax idiv reg) min_long
- // rdx: remainder (= rax irem reg) 0
- assert(reg != rax && reg != rdx, "reg cannot be rax or rdx register");
- static const int64_t min_long = 0x8000000000000000;
- Label normal_case, special_case;
- // check for special case
- cmp64(rax, ExternalAddress((address) &min_long));
- jcc(Assembler::notEqual, normal_case);
- xorl(rdx, rdx); // prepare rdx for possible special case (where
- // remainder = 0)
- cmpq(reg, -1);
- jcc(Assembler::equal, special_case);
- // handle normal case
- bind(normal_case);
- cdqq();
- int idivq_offset = offset();
- idivq(reg);
+void MacroAssembler::decrementl(Register reg, int value) {
+ if (value == min_jint) {subl(reg, value) ; return; }
+ if (value < 0) { incrementl(reg, -value); return; }
+ if (value == 0) { ; return; }
+ if (value == 1 && UseIncDec) { decl(reg) ; return; }
+ /* else */ { subl(reg, value) ; return; }
+}
- // normal and special case exit
- bind(special_case);
+void MacroAssembler::decrementl(Address dst, int value) {
+ if (value == min_jint) {subl(dst, value) ; return; }
+ if (value < 0) { incrementl(dst, -value); return; }
+ if (value == 0) { ; return; }
+ if (value == 1 && UseIncDec) { decl(dst) ; return; }
+ /* else */ { subl(dst, value) ; return; }
+}
- return idivq_offset;
+void MacroAssembler::division_with_shift (Register reg, int shift_value) {
+ assert (shift_value > 0, "illegal shift value");
+ Label _is_positive;
+ testl (reg, reg);
+ jcc (Assembler::positive, _is_positive);
+ int offset = (1 << shift_value) - 1 ;
+
+ if (offset == 1) {
+ incrementl(reg);
+ } else {
+ addl(reg, offset);
+ }
+
+ bind (_is_positive);
+ sarl(reg, shift_value);
}
-void MacroAssembler::push_IU_state() {
- pushfq(); // Push flags first because pushaq kills them
- subq(rsp, 8); // Make sure rsp stays 16-byte aligned
- pushaq();
+// !defined(COMPILER2) is because of stupid core builds
+#if !defined(_LP64) || defined(COMPILER1) || !defined(COMPILER2)
+void MacroAssembler::empty_FPU_stack() {
+ if (VM_Version::supports_mmx()) {
+ emms();
+ } else {
+ for (int i = 8; i-- > 0; ) ffree(i);
+ }
}
+#endif // !LP64 || C1 || !C2
-void MacroAssembler::pop_IU_state() {
- popaq();
- addq(rsp, 8);
- popfq();
+
+// Defines obj, preserves var_size_in_bytes
+void MacroAssembler::eden_allocate(Register obj,
+ Register var_size_in_bytes,
+ int con_size_in_bytes,
+ Register t1,
+ Label& slow_case) {
+ assert(obj == rax, "obj must be in rax, for cmpxchg");
+ assert_different_registers(obj, var_size_in_bytes, t1);
+ Register end = t1;
+ Label retry;
+ bind(retry);
+ ExternalAddress heap_top((address) Universe::heap()->top_addr());
+ movptr(obj, heap_top);
+ if (var_size_in_bytes == noreg) {
+ lea(end, Address(obj, con_size_in_bytes));
+ } else {
+ lea(end, Address(obj, var_size_in_bytes, Address::times_1));
+ }
+ // if end < obj then we wrapped around => object too long => slow case
+ cmpptr(end, obj);
+ jcc(Assembler::below, slow_case);
+ cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
+ jcc(Assembler::above, slow_case);
+ // Compare obj with the top addr, and if still equal, store the new top addr in
+ // end at the address of the top addr pointer. Sets ZF if was equal, and clears
+ // it otherwise. Use lock prefix for atomicity on MPs.
+ locked_cmpxchgptr(end, heap_top);
+ jcc(Assembler::notEqual, retry);
}
-void MacroAssembler::push_FPU_state() {
- subq(rsp, FPUStateSizeInWords * wordSize);
- fxsave(Address(rsp, 0));
+void MacroAssembler::enter() {
+ push(rbp);
+ mov(rbp, rsp);
}
-void MacroAssembler::pop_FPU_state() {
- fxrstor(Address(rsp, 0));
- addq(rsp, FPUStateSizeInWords * wordSize);
+void MacroAssembler::fcmp(Register tmp) {
+ fcmp(tmp, 1, true, true);
}
-// Save Integer and Float state
-// Warning: Stack must be 16 byte aligned
-void MacroAssembler::push_CPU_state() {
- push_IU_state();
- push_FPU_state();
+void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) {
+ assert(!pop_right || pop_left, "usage error");
+ if (VM_Version::supports_cmov()) {
+ assert(tmp == noreg, "unneeded temp");
+ if (pop_left) {
+ fucomip(index);
+ } else {
+ fucomi(index);
+ }
+ if (pop_right) {
+ fpop();
+ }
+ } else {
+ assert(tmp != noreg, "need temp");
+ if (pop_left) {
+ if (pop_right) {
+ fcompp();
+ } else {
+ fcomp(index);
+ }
+ } else {
+ fcom(index);
+ }
+ // convert FPU condition into eflags condition via rax,
+ save_rax(tmp);
+ fwait(); fnstsw_ax();
+ sahf();
+ restore_rax(tmp);
+ }
+ // condition codes set as follows:
+ //
+ // CF (corresponds to C0) if x < y
+ // PF (corresponds to C2) if unordered
+ // ZF (corresponds to C3) if x = y
}
-void MacroAssembler::pop_CPU_state() {
- pop_FPU_state();
- pop_IU_state();
+void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) {
+ fcmp2int(dst, unordered_is_less, 1, true, true);
}
-void MacroAssembler::sign_extend_short(Register reg) {
- movswl(reg, reg);
+void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) {
+ fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right);
+ Label L;
+ if (unordered_is_less) {
+ movl(dst, -1);
+ jcc(Assembler::parity, L);
+ jcc(Assembler::below , L);
+ movl(dst, 0);
+ jcc(Assembler::equal , L);
+ increment(dst);
+ } else { // unordered is greater
+ movl(dst, 1);
+ jcc(Assembler::parity, L);
+ jcc(Assembler::above , L);
+ movl(dst, 0);
+ jcc(Assembler::equal , L);
+ decrementl(dst);
+ }
+ bind(L);
}
-void MacroAssembler::sign_extend_byte(Register reg) {
- movsbl(reg, reg);
+void MacroAssembler::fld_d(AddressLiteral src) {
+ fld_d(as_Address(src));
}
-void MacroAssembler::division_with_shift(Register reg, int shift_value) {
- assert (shift_value > 0, "illegal shift value");
- Label _is_positive;
- testl (reg, reg);
- jcc (Assembler::positive, _is_positive);
- int offset = (1 << shift_value) - 1 ;
+void MacroAssembler::fld_s(AddressLiteral src) {
+ fld_s(as_Address(src));
+}
- if (offset == 1) {
- incrementl(reg);
+void MacroAssembler::fld_x(AddressLiteral src) {
+ Assembler::fld_x(as_Address(src));
+}
+
+void MacroAssembler::fldcw(AddressLiteral src) {
+ Assembler::fldcw(as_Address(src));
+}
+
+void MacroAssembler::fpop() {
+ ffree();
+ fincstp();
+}
+
+void MacroAssembler::fremr(Register tmp) {
+ save_rax(tmp);
+ { Label L;
+ bind(L);
+ fprem();
+ fwait(); fnstsw_ax();
+#ifdef _LP64
+ testl(rax, 0x400);
+ jcc(Assembler::notEqual, L);
+#else
+ sahf();
+ jcc(Assembler::parity, L);
+#endif // _LP64
+ }
+ restore_rax(tmp);
+ // Result is in ST0.
+ // Note: fxch & fpop to get rid of ST1
+ // (otherwise FPU stack could overflow eventually)
+ fxch(1);
+ fpop();
+}
+
+
+void MacroAssembler::incrementl(AddressLiteral dst) {
+ if (reachable(dst)) {
+ incrementl(as_Address(dst));
} else {
- addl(reg, offset);
+ lea(rscratch1, dst);
+ incrementl(Address(rscratch1, 0));
}
+}
- bind (_is_positive);
- sarl(reg, shift_value);
+void MacroAssembler::incrementl(ArrayAddress dst) {
+ incrementl(as_Address(dst));
}
-void MacroAssembler::round_to_l(Register reg, int modulus) {
- addl(reg, modulus - 1);
- andl(reg, -modulus);
+void MacroAssembler::incrementl(Register reg, int value) {
+ if (value == min_jint) {addl(reg, value) ; return; }
+ if (value < 0) { decrementl(reg, -value); return; }
+ if (value == 0) { ; return; }
+ if (value == 1 && UseIncDec) { incl(reg) ; return; }
+ /* else */ { addl(reg, value) ; return; }
}
-void MacroAssembler::round_to_q(Register reg, int modulus) {
- addq(reg, modulus - 1);
- andq(reg, -modulus);
+void MacroAssembler::incrementl(Address dst, int value) {
+ if (value == min_jint) {addl(dst, value) ; return; }
+ if (value < 0) { decrementl(dst, -value); return; }
+ if (value == 0) { ; return; }
+ if (value == 1 && UseIncDec) { incl(dst) ; return; }
+ /* else */ { addl(dst, value) ; return; }
}
-void MacroAssembler::verify_oop(Register reg, const char* s) {
- if (!VerifyOops) {
- return;
+void MacroAssembler::jump(AddressLiteral dst) {
+ if (reachable(dst)) {
+ jmp_literal(dst.target(), dst.rspec());
+ } else {
+ lea(rscratch1, dst);
+ jmp(rscratch1);
}
+}
- // Pass register number to verify_oop_subroutine
- char* b = new char[strlen(s) + 50];
- sprintf(b, "verify_oop: %s: %s", reg->name(), s);
+void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
+ if (reachable(dst)) {
+ InstructionMark im(this);
+ relocate(dst.reloc());
+ const int short_size = 2;
+ const int long_size = 6;
+ int offs = (intptr_t)dst.target() - ((intptr_t)_code_pos);
+ if (dst.reloc() == relocInfo::none && is8bit(offs - short_size)) {
+ // 0111 tttn #8-bit disp
+ emit_byte(0x70 | cc);
+ emit_byte((offs - short_size) & 0xFF);
+ } else {
+ // 0000 1111 1000 tttn #32-bit disp
+ emit_byte(0x0F);
+ emit_byte(0x80 | cc);
+ emit_long(offs - long_size);
+ }
+ } else {
+#ifdef ASSERT
+ warning("reversing conditional branch");
+#endif /* ASSERT */
+ Label skip;
+ jccb(reverse[cc], skip);
+ lea(rscratch1, dst);
+ Assembler::jmp(rscratch1);
+ bind(skip);
+ }
+}
- pushq(rax); // save rax, restored by receiver
+void MacroAssembler::ldmxcsr(AddressLiteral src) {
+ if (reachable(src)) {
+ Assembler::ldmxcsr(as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ Assembler::ldmxcsr(Address(rscratch1, 0));
+ }
+}
- // pass args on stack, only touch rax
- pushq(reg);
- // avoid using pushptr, as it modifies scratch registers
- // and our contract is not to modify anything
- ExternalAddress buffer((address)b);
- movptr(rax, buffer.addr());
- pushq(rax);
+int MacroAssembler::load_signed_byte(Register dst, Address src) {
+ int off;
+ if (LP64_ONLY(true ||) VM_Version::is_P6()) {
+ off = offset();
+ movsbl(dst, src); // movsxb
+ } else {
+ off = load_unsigned_byte(dst, src);
+ shll(dst, 24);
+ sarl(dst, 24);
+ }
+ return off;
+}
- // call indirectly to solve generation ordering problem
- movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
- call(rax); // no alignment requirement
- // everything popped by receiver
+// word => int32 which seems bad for 64bit
+int MacroAssembler::load_signed_word(Register dst, Address src) {
+ int off;
+ if (LP64_ONLY(true ||) VM_Version::is_P6()) {
+ // This is dubious to me since it seems safe to do a signed 16 => 64 bit
+ // version but this is what 64bit has always done. This seems to imply
+ // that users are only using 32bits worth.
+ off = offset();
+ movswl(dst, src); // movsxw
+ } else {
+ off = load_unsigned_word(dst, src);
+ shll(dst, 16);
+ sarl(dst, 16);
+ }
+ return off;
}
-void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
- if (!VerifyOops) return;
- // Pass register number to verify_oop_subroutine
- char* b = new char[strlen(s) + 50];
- sprintf(b, "verify_oop_addr: %s", s);
- pushq(rax); // save rax
- movq(addr, rax);
- pushq(rax); // pass register argument
+int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
+ // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
+ // and "3.9 Partial Register Penalties", p. 22).
+ int off;
+ if (LP64_ONLY(true || ) VM_Version::is_P6() || src.uses(dst)) {
+ off = offset();
+ movzbl(dst, src); // movzxb
+ } else {
+ xorl(dst, dst);
+ off = offset();
+ movb(dst, src);
+ }
+ return off;
+}
+int MacroAssembler::load_unsigned_word(Register dst, Address src) {
+ // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
+ // and "3.9 Partial Register Penalties", p. 22).
+ int off;
+ if (LP64_ONLY(true ||) VM_Version::is_P6() || src.uses(dst)) {
+ off = offset();
+ movzwl(dst, src); // movzxw
+ } else {
+ xorl(dst, dst);
+ off = offset();
+ movw(dst, src);
+ }
+ return off;
+}
- // avoid using pushptr, as it modifies scratch registers
- // and our contract is not to modify anything
- ExternalAddress buffer((address)b);
- movptr(rax, buffer.addr());
- pushq(rax);
+void MacroAssembler::mov32(AddressLiteral dst, Register src) {
+ if (reachable(dst)) {
+ movl(as_Address(dst), src);
+ } else {
+ lea(rscratch1, dst);
+ movl(Address(rscratch1, 0), src);
+ }
+}
- // call indirectly to solve generation ordering problem
- movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
- call(rax); // no alignment requirement
- // everything popped by receiver
+void MacroAssembler::mov32(Register dst, AddressLiteral src) {
+ if (reachable(src)) {
+ movl(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ movl(dst, Address(rscratch1, 0));
+ }
}
+// C++ bool manipulation
-void MacroAssembler::stop(const char* msg) {
- address rip = pc();
- pushaq(); // get regs on stack
- lea(c_rarg0, ExternalAddress((address) msg));
- lea(c_rarg1, InternalAddress(rip));
- movq(c_rarg2, rsp); // pass pointer to regs array
- andq(rsp, -16); // align stack as required by ABI
- call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug)));
- hlt();
+void MacroAssembler::movbool(Register dst, Address src) {
+ if(sizeof(bool) == 1)
+ movb(dst, src);
+ else if(sizeof(bool) == 2)
+ movw(dst, src);
+ else if(sizeof(bool) == 4)
+ movl(dst, src);
+ else
+ // unsupported
+ ShouldNotReachHere();
}
-void MacroAssembler::warn(const char* msg) {
- pushq(r12);
- movq(r12, rsp);
- andq(rsp, -16); // align stack as required by push_CPU_state and call
-
- push_CPU_state(); // keeps alignment at 16 bytes
- lea(c_rarg0, ExternalAddress((address) msg));
- call_VM_leaf(CAST_FROM_FN_PTR(address, warning), c_rarg0);
- pop_CPU_state();
+void MacroAssembler::movbool(Address dst, bool boolconst) {
+ if(sizeof(bool) == 1)
+ movb(dst, (int) boolconst);
+ else if(sizeof(bool) == 2)
+ movw(dst, (int) boolconst);
+ else if(sizeof(bool) == 4)
+ movl(dst, (int) boolconst);
+ else
+ // unsupported
+ ShouldNotReachHere();
+}
- movq(rsp, r12);
- popq(r12);
+void MacroAssembler::movbool(Address dst, Register src) {
+ if(sizeof(bool) == 1)
+ movb(dst, src);
+ else if(sizeof(bool) == 2)
+ movw(dst, src);
+ else if(sizeof(bool) == 4)
+ movl(dst, src);
+ else
+ // unsupported
+ ShouldNotReachHere();
}
-#ifndef PRODUCT
-extern "C" void findpc(intptr_t x);
-#endif
+void MacroAssembler::movbyte(ArrayAddress dst, int src) {
+ movb(as_Address(dst), src);
+}
-void MacroAssembler::debug(char* msg, int64_t pc, int64_t regs[]) {
- // In order to get locks to work, we need to fake a in_VM state
- if (ShowMessageBoxOnError ) {
- JavaThread* thread = JavaThread::current();
- JavaThreadState saved_state = thread->thread_state();
- thread->set_thread_state(_thread_in_vm);
-#ifndef PRODUCT
- if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
- ttyLocker ttyl;
- BytecodeCounter::print();
+void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ if (UseXmmLoadAndClearUpper) {
+ movsd (dst, as_Address(src));
+ } else {
+ movlpd(dst, as_Address(src));
}
-#endif
- // To see where a verify_oop failed, get $ebx+40/X for this frame.
- // XXX correct this offset for amd64
- // This is the value of eip which points to where verify_oop will return.
- if (os::message_box(msg, "Execution stopped, print registers?")) {
- ttyLocker ttyl;
- tty->print_cr("rip = 0x%016lx", pc);
-#ifndef PRODUCT
- tty->cr();
- findpc(pc);
- tty->cr();
-#endif
- tty->print_cr("rax = 0x%016lx", regs[15]);
- tty->print_cr("rbx = 0x%016lx", regs[12]);
- tty->print_cr("rcx = 0x%016lx", regs[14]);
- tty->print_cr("rdx = 0x%016lx", regs[13]);
- tty->print_cr("rdi = 0x%016lx", regs[8]);
- tty->print_cr("rsi = 0x%016lx", regs[9]);
- tty->print_cr("rbp = 0x%016lx", regs[10]);
- tty->print_cr("rsp = 0x%016lx", regs[11]);
- tty->print_cr("r8 = 0x%016lx", regs[7]);
- tty->print_cr("r9 = 0x%016lx", regs[6]);
- tty->print_cr("r10 = 0x%016lx", regs[5]);
- tty->print_cr("r11 = 0x%016lx", regs[4]);
- tty->print_cr("r12 = 0x%016lx", regs[3]);
- tty->print_cr("r13 = 0x%016lx", regs[2]);
- tty->print_cr("r14 = 0x%016lx", regs[1]);
- tty->print_cr("r15 = 0x%016lx", regs[0]);
- BREAKPOINT;
+ } else {
+ lea(rscratch1, src);
+ if (UseXmmLoadAndClearUpper) {
+ movsd (dst, Address(rscratch1, 0));
+ } else {
+ movlpd(dst, Address(rscratch1, 0));
}
- ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
+ }
+}
+
+void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ movss(dst, as_Address(src));
} else {
- ttyLocker ttyl;
- ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n",
- msg);
+ lea(rscratch1, src);
+ movss(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::movptr(Register dst, Register src) {
+ LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
+}
+
+void MacroAssembler::movptr(Register dst, Address src) {
+ LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
+}
+
+// src should NEVER be a real pointer. Use AddressLiteral for true pointers
+void MacroAssembler::movptr(Register dst, intptr_t src) {
+ LP64_ONLY(mov64(dst, src)) NOT_LP64(movl(dst, src));
+}
+
+void MacroAssembler::movptr(Address dst, Register src) {
+ LP64_ONLY(movq(dst, src)) NOT_LP64(movl(dst, src));
+}
+
+void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ movss(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ movss(dst, Address(rscratch1, 0));
+ }
+}
+
+void MacroAssembler::null_check(Register reg, int offset) {
+ if (needs_explicit_null_check(offset)) {
+ // provoke OS NULL exception if reg = NULL by
+ // accessing M[reg] w/o changing any (non-CC) registers
+ // NOTE: cmpl is plenty here to provoke a segv
+ cmpptr(rax, Address(reg, 0));
+ // Note: should probably use testl(rax, Address(reg, 0));
+ // may be shorter code (however, this version of
+ // testl needs to be implemented first)
+ } else {
+ // nothing to do, (later) access of M[reg + offset]
+ // will provoke OS NULL exception if reg = NULL
}
}
void MacroAssembler::os_breakpoint() {
- // instead of directly emitting a breakpoint, call os:breakpoint for
- // better debugability
- // This shouldn't need alignment, it's an empty function
+ // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
+ // (e.g., MSVC can't call ps() otherwise)
call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
}
+void MacroAssembler::pop_CPU_state() {
+ pop_FPU_state();
+ pop_IU_state();
+}
+
+void MacroAssembler::pop_FPU_state() {
+ NOT_LP64(frstor(Address(rsp, 0));)
+ LP64_ONLY(fxrstor(Address(rsp, 0));)
+ addptr(rsp, FPUStateSizeInWords * wordSize);
+}
+
+void MacroAssembler::pop_IU_state() {
+ popa();
+ LP64_ONLY(addq(rsp, 8));
+ popf();
+}
+
+// Save Integer and Float state
+// Warning: Stack must be 16 byte aligned (64bit)
+void MacroAssembler::push_CPU_state() {
+ push_IU_state();
+ push_FPU_state();
+}
+
+void MacroAssembler::push_FPU_state() {
+ subptr(rsp, FPUStateSizeInWords * wordSize);
+#ifndef _LP64
+ fnsave(Address(rsp, 0));
+ fwait();
+#else
+ fxsave(Address(rsp, 0));
+#endif // LP64
+}
+
+void MacroAssembler::push_IU_state() {
+ // Push flags first because pusha kills them
+ pushf();
+ // Make sure rsp stays 16-byte aligned
+ LP64_ONLY(subq(rsp, 8));
+ pusha();
+}
+
+void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) {
+ // determine java_thread register
+ if (!java_thread->is_valid()) {
+ java_thread = rdi;
+ get_thread(java_thread);
+ }
+ // we must set sp to zero to clear frame
+ movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), (int32_t)NULL_WORD);
+ if (clear_fp) {
+ movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), (int32_t)NULL_WORD);
+ }
+
+ if (clear_pc)
+ movptr(Address(java_thread, JavaThread::last_Java_pc_offset()), (int32_t)NULL_WORD);
+
+}
+
+void MacroAssembler::restore_rax(Register tmp) {
+ if (tmp == noreg) pop(rax);
+ else if (tmp != rax) mov(rax, tmp);
+}
+
+void MacroAssembler::round_to(Register reg, int modulus) {
+ addptr(reg, modulus - 1);
+ andptr(reg, -modulus);
+}
+
+void MacroAssembler::save_rax(Register tmp) {
+ if (tmp == noreg) push(rax);
+ else if (tmp != rax) mov(tmp, rax);
+}
+
// Write serialization page so VM thread can do a pseudo remote membar.
// We use the current thread pointer to calculate a thread specific
// offset to write to within the page. This minimizes bus traffic
// due to cache line collision.
-void MacroAssembler::serialize_memory(Register thread,
- Register tmp) {
-
+void MacroAssembler::serialize_memory(Register thread, Register tmp) {
movl(tmp, thread);
shrl(tmp, os::get_serialize_page_shift_count());
andl(tmp, (os::vm_page_size() - sizeof(int)));
@@ -4768,68 +6429,154 @@ void MacroAssembler::serialize_memory(Register thread,
movptr(ArrayAddress(page, index), tmp);
}
-void MacroAssembler::verify_tlab() {
-#ifdef ASSERT
- if (UseTLAB) {
- Label next, ok;
- Register t1 = rsi;
+// Calls to C land
+//
+// When entering C land, the rbp, & rsp of the last Java frame have to be recorded
+// in the (thread-local) JavaThread object. When leaving C land, the last Java fp
+// has to be reset to 0. This is required to allow proper stack traversal.
+void MacroAssembler::set_last_Java_frame(Register java_thread,
+ Register last_java_sp,
+ Register last_java_fp,
+ address last_java_pc) {
+ // determine java_thread register
+ if (!java_thread->is_valid()) {
+ java_thread = rdi;
+ get_thread(java_thread);
+ }
+ // determine last_java_sp register
+ if (!last_java_sp->is_valid()) {
+ last_java_sp = rsp;
+ }
- pushq(t1);
+ // last_java_fp is optional
- movq(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
- cmpq(t1, Address(r15_thread, in_bytes(JavaThread::tlab_start_offset())));
- jcc(Assembler::aboveEqual, next);
- stop("assert(top >= start)");
- should_not_reach_here();
+ if (last_java_fp->is_valid()) {
+ movptr(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
+ }
- bind(next);
- movq(t1, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
- cmpq(t1, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
- jcc(Assembler::aboveEqual, ok);
- stop("assert(top <= end)");
- should_not_reach_here();
+ // last_java_pc is optional
- bind(ok);
+ if (last_java_pc != NULL) {
+ lea(Address(java_thread,
+ JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()),
+ InternalAddress(last_java_pc));
- popq(t1);
}
-#endif
+ movptr(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
}
-// Defines obj, preserves var_size_in_bytes
-void MacroAssembler::eden_allocate(Register obj,
- Register var_size_in_bytes,
- int con_size_in_bytes,
- Register t1,
- Label& slow_case) {
- assert(obj == rax, "obj must be in rax for cmpxchg");
- assert_different_registers(obj, var_size_in_bytes, t1);
- Register end = t1;
- Label retry;
- bind(retry);
- ExternalAddress heap_top((address) Universe::heap()->top_addr());
- movptr(obj, heap_top);
- if (var_size_in_bytes == noreg) {
- leaq(end, Address(obj, con_size_in_bytes));
+void MacroAssembler::shlptr(Register dst, int imm8) {
+ LP64_ONLY(shlq(dst, imm8)) NOT_LP64(shll(dst, imm8));
+}
+
+void MacroAssembler::shrptr(Register dst, int imm8) {
+ LP64_ONLY(shrq(dst, imm8)) NOT_LP64(shrl(dst, imm8));
+}
+
+void MacroAssembler::sign_extend_byte(Register reg) {
+ if (LP64_ONLY(true ||) (VM_Version::is_P6() && reg->has_byte_register())) {
+ movsbl(reg, reg); // movsxb
} else {
- leaq(end, Address(obj, var_size_in_bytes, Address::times_1));
+ shll(reg, 24);
+ sarl(reg, 24);
}
- // if end < obj then we wrapped around => object too long => slow case
- cmpq(end, obj);
- jcc(Assembler::below, slow_case);
- cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
+}
- jcc(Assembler::above, slow_case);
- // Compare obj with the top addr, and if still equal, store the new
- // top addr in end at the address of the top addr pointer. Sets ZF
- // if was equal, and clears it otherwise. Use lock prefix for
- // atomicity on MPs.
- if (os::is_MP()) {
- lock();
+void MacroAssembler::sign_extend_short(Register reg) {
+ if (LP64_ONLY(true ||) VM_Version::is_P6()) {
+ movswl(reg, reg); // movsxw
+ } else {
+ shll(reg, 16);
+ sarl(reg, 16);
}
- cmpxchgptr(end, heap_top);
- // if someone beat us on the allocation, try again, otherwise continue
- jcc(Assembler::notEqual, retry);
+}
+
+void MacroAssembler::store_check(Register obj) {
+ // Does a store check for the oop in register obj. The content of
+ // register obj is destroyed afterwards.
+ store_check_part_1(obj);
+ store_check_part_2(obj);
+}
+
+void MacroAssembler::store_check(Register obj, Address dst) {
+ store_check(obj);
+}
+
+
+// split the store check operation so that other instructions can be scheduled inbetween
+void MacroAssembler::store_check_part_1(Register obj) {
+ BarrierSet* bs = Universe::heap()->barrier_set();
+ assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
+ shrptr(obj, CardTableModRefBS::card_shift);
+}
+
+void MacroAssembler::store_check_part_2(Register obj) {
+ BarrierSet* bs = Universe::heap()->barrier_set();
+ assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
+ CardTableModRefBS* ct = (CardTableModRefBS*)bs;
+ assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
+
+ // The calculation for byte_map_base is as follows:
+ // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
+ // So this essentially converts an address to a displacement and
+ // it will never need to be relocated. On 64bit however the value may be too
+ // large for a 32bit displacement
+
+ intptr_t disp = (intptr_t) ct->byte_map_base;
+ if (is_simm32(disp)) {
+ Address cardtable(noreg, obj, Address::times_1, disp);
+ movb(cardtable, 0);
+ } else {
+ // By doing it as an ExternalAddress disp could be converted to a rip-relative
+ // displacement and done in a single instruction given favorable mapping and
+ // a smarter version of as_Address. Worst case it is two instructions which
+ // is no worse off then loading disp into a register and doing as a simple
+ // Address() as above.
+ // We can't do as ExternalAddress as the only style since if disp == 0 we'll
+ // assert since NULL isn't acceptable in a reloci (see 6644928). In any case
+ // in some cases we'll get a single instruction version.
+
+ ExternalAddress cardtable((address)disp);
+ Address index(noreg, obj, Address::times_1);
+ movb(as_Address(ArrayAddress(cardtable, index)), 0);
+ }
+}
+
+void MacroAssembler::subptr(Register dst, int32_t imm32) {
+ LP64_ONLY(subq(dst, imm32)) NOT_LP64(subl(dst, imm32));
+}
+
+void MacroAssembler::subptr(Register dst, Register src) {
+ LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src));
+}
+
+void MacroAssembler::test32(Register src1, AddressLiteral src2) {
+ // src2 must be rval
+
+ if (reachable(src2)) {
+ testl(src1, as_Address(src2));
+ } else {
+ lea(rscratch1, src2);
+ testl(src1, Address(rscratch1, 0));
+ }
+}
+
+// C++ bool manipulation
+void MacroAssembler::testbool(Register dst) {
+ if(sizeof(bool) == 1)
+ testb(dst, 0xff);
+ else if(sizeof(bool) == 2) {
+ // testw implementation needed for two byte bools
+ ShouldNotReachHere();
+ } else if(sizeof(bool) == 4)
+ testl(dst, dst);
+ else
+ // unsupported
+ ShouldNotReachHere();
+}
+
+void MacroAssembler::testptr(Register dst, Register src) {
+ LP64_ONLY(testq(dst, src)) NOT_LP64(testl(dst, src));
}
// Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
@@ -4842,39 +6589,40 @@ void MacroAssembler::tlab_allocate(Register obj,
assert_different_registers(obj, t1, t2);
assert_different_registers(obj, var_size_in_bytes, t1);
Register end = t2;
+ Register thread = NOT_LP64(t1) LP64_ONLY(r15_thread);
verify_tlab();
- movq(obj, Address(r15_thread, JavaThread::tlab_top_offset()));
+ NOT_LP64(get_thread(thread));
+
+ movptr(obj, Address(thread, JavaThread::tlab_top_offset()));
if (var_size_in_bytes == noreg) {
- leaq(end, Address(obj, con_size_in_bytes));
+ lea(end, Address(obj, con_size_in_bytes));
} else {
- leaq(end, Address(obj, var_size_in_bytes, Address::times_1));
+ lea(end, Address(obj, var_size_in_bytes, Address::times_1));
}
- cmpq(end, Address(r15_thread, JavaThread::tlab_end_offset()));
+ cmpptr(end, Address(thread, JavaThread::tlab_end_offset()));
jcc(Assembler::above, slow_case);
// update the tlab top pointer
- movq(Address(r15_thread, JavaThread::tlab_top_offset()), end);
+ movptr(Address(thread, JavaThread::tlab_top_offset()), end);
// recover var_size_in_bytes if necessary
if (var_size_in_bytes == end) {
- subq(var_size_in_bytes, obj);
+ subptr(var_size_in_bytes, obj);
}
verify_tlab();
}
-// Preserves rbx and rdx.
+// Preserves rbx, and rdx.
void MacroAssembler::tlab_refill(Label& retry,
Label& try_eden,
Label& slow_case) {
Register top = rax;
- Register t1 = rcx;
- Register t2 = rsi;
- Register t3 = r10;
- Register thread_reg = r15_thread;
- assert_different_registers(top, thread_reg, t1, t2, t3,
- /* preserve: */ rbx, rdx);
+ Register t1 = rcx;
+ Register t2 = rsi;
+ Register thread_reg = NOT_LP64(rdi) LP64_ONLY(r15_thread);
+ assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
Label do_refill, discard_tlab;
if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
@@ -4882,58 +6630,52 @@ void MacroAssembler::tlab_refill(Label& retry,
jmp(slow_case);
}
- movq(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
- movq(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
+ NOT_LP64(get_thread(thread_reg));
+
+ movptr(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
+ movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
// calculate amount of free space
- subq(t1, top);
- shrq(t1, LogHeapWordSize);
+ subptr(t1, top);
+ shrptr(t1, LogHeapWordSize);
// Retain tlab and allocate object in shared space if
// the amount free in the tlab is too large to discard.
- cmpq(t1, Address(thread_reg, // size_t
- in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
+ cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
jcc(Assembler::lessEqual, discard_tlab);
// Retain
- mov64(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment());
- addq(Address(thread_reg, // size_t
- in_bytes(JavaThread::tlab_refill_waste_limit_offset())),
- t2);
+ // %%% yuck as movptr...
+ movptr(t2, (int32_t) ThreadLocalAllocBuffer::refill_waste_limit_increment());
+ addptr(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2);
if (TLABStats) {
// increment number of slow_allocations
- addl(Address(thread_reg, // unsigned int
- in_bytes(JavaThread::tlab_slow_allocations_offset())),
- 1);
+ addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1);
}
jmp(try_eden);
bind(discard_tlab);
if (TLABStats) {
// increment number of refills
- addl(Address(thread_reg, // unsigned int
- in_bytes(JavaThread::tlab_number_of_refills_offset())),
- 1);
+ addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1);
// accumulate wastage -- t1 is amount free in tlab
- addl(Address(thread_reg, // unsigned int
- in_bytes(JavaThread::tlab_fast_refill_waste_offset())),
- t1);
+ addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1);
}
// if tlab is currently allocated (top or end != null) then
// fill [top, end + alignment_reserve) with array object
- testq(top, top);
+ testptr (top, top);
jcc(Assembler::zero, do_refill);
// set up the mark word
- mov64(t3, (int64_t) markOopDesc::prototype()->copy_set_hash(0x2));
- movq(Address(top, oopDesc::mark_offset_in_bytes()), t3);
+ movptr(Address(top, oopDesc::mark_offset_in_bytes()), (intptr_t)markOopDesc::prototype()->copy_set_hash(0x2));
// set the length to the remaining space
- subq(t1, typeArrayOopDesc::header_size(T_INT));
- addq(t1, (int)ThreadLocalAllocBuffer::alignment_reserve());
- shlq(t1, log2_intptr(HeapWordSize / sizeof(jint)));
- movq(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
+ subptr(t1, typeArrayOopDesc::header_size(T_INT));
+ addptr(t1, (int32_t)ThreadLocalAllocBuffer::alignment_reserve());
+ shlptr(t1, log2_intptr(HeapWordSize/sizeof(jint)));
+ movptr(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
// set klass to intArrayKlass
+ // dubious reloc why not an oop reloc?
movptr(t1, ExternalAddress((address) Universe::intArrayKlassObj_addr()));
// store klass last. concurrent gcs assumes klass length is valid if
// klass field is not null.
@@ -4941,8 +6683,8 @@ void MacroAssembler::tlab_refill(Label& retry,
// refill the tlab with an eden allocation
bind(do_refill);
- movq(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
- shlq(t1, LogHeapWordSize);
+ movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
+ shlptr(t1, LogHeapWordSize);
// add object_size ??
eden_allocate(top, t1, 0, t2, slow_case);
@@ -4952,228 +6694,637 @@ void MacroAssembler::tlab_refill(Label& retry,
Label ok;
Register tsize = rsi;
assert_different_registers(tsize, thread_reg, t1);
- pushq(tsize);
- movq(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
- shlq(tsize, LogHeapWordSize);
- cmpq(t1, tsize);
+ push(tsize);
+ movptr(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
+ shlptr(tsize, LogHeapWordSize);
+ cmpptr(t1, tsize);
jcc(Assembler::equal, ok);
stop("assert(t1 != tlab size)");
should_not_reach_here();
bind(ok);
- popq(tsize);
+ pop(tsize);
}
#endif
- movq(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top);
- movq(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top);
- addq(top, t1);
- subq(top, (int)ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
- movq(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top);
+ movptr(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top);
+ movptr(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top);
+ addptr(top, t1);
+ subptr(top, (int32_t)ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
+ movptr(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top);
verify_tlab();
jmp(retry);
}
+static const double pi_4 = 0.7853981633974483;
-int MacroAssembler::biased_locking_enter(Register lock_reg, Register obj_reg, Register swap_reg, Register tmp_reg,
- bool swap_reg_contains_mark,
- Label& done, Label* slow_case,
- BiasedLockingCounters* counters) {
- assert(UseBiasedLocking, "why call this otherwise?");
- assert(swap_reg == rax, "swap_reg must be rax for cmpxchgq");
- assert(tmp_reg != noreg, "tmp_reg must be supplied");
- assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
- assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
- Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
- Address saved_mark_addr(lock_reg, 0);
+void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) {
+ // A hand-coded argument reduction for values in fabs(pi/4, pi/2)
+ // was attempted in this code; unfortunately it appears that the
+ // switch to 80-bit precision and back causes this to be
+ // unprofitable compared with simply performing a runtime call if
+ // the argument is out of the (-pi/4, pi/4) range.
- if (PrintBiasedLockingStatistics && counters == NULL)
- counters = BiasedLocking::counters();
+ Register tmp = noreg;
+ if (!VM_Version::supports_cmov()) {
+ // fcmp needs a temporary so preserve rbx,
+ tmp = rbx;
+ push(tmp);
+ }
- // Biased locking
- // See whether the lock is currently biased toward our thread and
- // whether the epoch is still valid
- // Note that the runtime guarantees sufficient alignment of JavaThread
- // pointers to allow age to be placed into low bits
- // First check to see whether biasing is even enabled for this object
- Label cas_label;
- int null_check_offset = -1;
- if (!swap_reg_contains_mark) {
- null_check_offset = offset();
- movq(swap_reg, mark_addr);
+ Label slow_case, done;
+
+ // x ?<= pi/4
+ fld_d(ExternalAddress((address)&pi_4));
+ fld_s(1); // Stack: X PI/4 X
+ fabs(); // Stack: |X| PI/4 X
+ fcmp(tmp);
+ jcc(Assembler::above, slow_case);
+
+ // fastest case: -pi/4 <= x <= pi/4
+ switch(trig) {
+ case 's':
+ fsin();
+ break;
+ case 'c':
+ fcos();
+ break;
+ case 't':
+ ftan();
+ break;
+ default:
+ assert(false, "bad intrinsic");
+ break;
}
- movq(tmp_reg, swap_reg);
- andq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
- cmpq(tmp_reg, markOopDesc::biased_lock_pattern);
- jcc(Assembler::notEqual, cas_label);
- // The bias pattern is present in the object's header. Need to check
- // whether the bias owner and the epoch are both still current.
- load_prototype_header(tmp_reg, obj_reg);
- orq(tmp_reg, r15_thread);
- xorq(tmp_reg, swap_reg);
- andq(tmp_reg, ~((int) markOopDesc::age_mask_in_place));
- if (counters != NULL) {
- cond_inc32(Assembler::zero,
- ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
+ jmp(done);
+
+ // slow case: runtime call
+ bind(slow_case);
+ // Preserve registers across runtime call
+ pusha();
+ int incoming_argument_and_return_value_offset = -1;
+ if (num_fpu_regs_in_use > 1) {
+ // Must preserve all other FPU regs (could alternatively convert
+ // SharedRuntime::dsin and dcos into assembly routines known not to trash
+ // FPU state, but can not trust C compiler)
+ NEEDS_CLEANUP;
+ // NOTE that in this case we also push the incoming argument to
+ // the stack and restore it later; we also use this stack slot to
+ // hold the return value from dsin or dcos.
+ for (int i = 0; i < num_fpu_regs_in_use; i++) {
+ subptr(rsp, sizeof(jdouble));
+ fstp_d(Address(rsp, 0));
+ }
+ incoming_argument_and_return_value_offset = sizeof(jdouble)*(num_fpu_regs_in_use-1);
+ fld_d(Address(rsp, incoming_argument_and_return_value_offset));
}
- jcc(Assembler::equal, done);
+ subptr(rsp, sizeof(jdouble));
+ fstp_d(Address(rsp, 0));
+#ifdef _LP64
+ movdbl(xmm0, Address(rsp, 0));
+#endif // _LP64
- Label try_revoke_bias;
- Label try_rebias;
+ // NOTE: we must not use call_VM_leaf here because that requires a
+ // complete interpreter frame in debug mode -- same bug as 4387334
+ // MacroAssembler::call_VM_leaf_base is perfectly safe and will
+ // do proper 64bit abi
+
+ NEEDS_CLEANUP;
+ // Need to add stack banging before this runtime call if it needs to
+ // be taken; however, there is no generic stack banging routine at
+ // the MacroAssembler level
+ switch(trig) {
+ case 's':
+ {
+ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::dsin), 0);
+ }
+ break;
+ case 'c':
+ {
+ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::dcos), 0);
+ }
+ break;
+ case 't':
+ {
+ MacroAssembler::call_VM_leaf_base(CAST_FROM_FN_PTR(address, SharedRuntime::dtan), 0);
+ }
+ break;
+ default:
+ assert(false, "bad intrinsic");
+ break;
+ }
+#ifdef _LP64
+ movsd(Address(rsp, 0), xmm0);
+ fld_d(Address(rsp, 0));
+#endif // _LP64
+ addptr(rsp, sizeof(jdouble));
+ if (num_fpu_regs_in_use > 1) {
+ // Must save return value to stack and then restore entire FPU stack
+ fstp_d(Address(rsp, incoming_argument_and_return_value_offset));
+ for (int i = 0; i < num_fpu_regs_in_use; i++) {
+ fld_d(Address(rsp, 0));
+ addptr(rsp, sizeof(jdouble));
+ }
+ }
+ popa();
- // At this point we know that the header has the bias pattern and
- // that we are not the bias owner in the current epoch. We need to
- // figure out more details about the state of the header in order to
- // know what operations can be legally performed on the object's
- // header.
+ // Come here with result in F-TOS
+ bind(done);
- // If the low three bits in the xor result aren't clear, that means
- // the prototype header is no longer biased and we have to revoke
- // the bias on this object.
- testq(tmp_reg, markOopDesc::biased_lock_mask_in_place);
- jcc(Assembler::notZero, try_revoke_bias);
+ if (tmp != noreg) {
+ pop(tmp);
+ }
+}
- // Biasing is still enabled for this data type. See whether the
- // epoch of the current bias is still valid, meaning that the epoch
- // bits of the mark word are equal to the epoch bits of the
- // prototype header. (Note that the prototype header's epoch bits
- // only change at a safepoint.) If not, attempt to rebias the object
- // toward the current thread. Note that we must be absolutely sure
- // that the current epoch is invalid in order to do this because
- // otherwise the manipulations it performs on the mark word are
- // illegal.
- testq(tmp_reg, markOopDesc::epoch_mask_in_place);
- jcc(Assembler::notZero, try_rebias);
- // The epoch of the current bias is still valid but we know nothing
- // about the owner; it might be set or it might be clear. Try to
- // acquire the bias of the object using an atomic operation. If this
- // fails we will go in to the runtime to revoke the object's bias.
- // Note that we first construct the presumed unbiased header so we
- // don't accidentally blow away another thread's valid bias.
- andq(swap_reg,
- markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
- movq(tmp_reg, swap_reg);
- orq(tmp_reg, r15_thread);
- if (os::is_MP()) {
- lock();
+void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) {
+ ucomisd(dst, as_Address(src));
+}
+
+void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) {
+ ucomiss(dst, as_Address(src));
+}
+
+void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ xorpd(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ xorpd(dst, Address(rscratch1, 0));
}
- cmpxchgq(tmp_reg, Address(obj_reg, 0));
- // If the biasing toward our thread failed, this means that
- // another thread succeeded in biasing it toward itself and we
- // need to revoke that bias. The revocation will occur in the
- // interpreter runtime in the slow case.
- if (counters != NULL) {
- cond_inc32(Assembler::zero,
- ExternalAddress((address) counters->anonymously_biased_lock_entry_count_addr()));
+}
+
+void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
+ if (reachable(src)) {
+ xorps(dst, as_Address(src));
+ } else {
+ lea(rscratch1, src);
+ xorps(dst, Address(rscratch1, 0));
}
- if (slow_case != NULL) {
- jcc(Assembler::notZero, *slow_case);
+}
+
+void MacroAssembler::verify_oop(Register reg, const char* s) {
+ if (!VerifyOops) return;
+
+ // Pass register number to verify_oop_subroutine
+ char* b = new char[strlen(s) + 50];
+ sprintf(b, "verify_oop: %s: %s", reg->name(), s);
+ push(rax); // save rax,
+ push(reg); // pass register argument
+ ExternalAddress buffer((address) b);
+ // avoid using pushptr, as it modifies scratch registers
+ // and our contract is not to modify anything
+ movptr(rax, buffer.addr());
+ push(rax);
+ // call indirectly to solve generation ordering problem
+ movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
+ call(rax);
+}
+
+
+void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
+ if (!VerifyOops) return;
+
+ // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord);
+ // Pass register number to verify_oop_subroutine
+ char* b = new char[strlen(s) + 50];
+ sprintf(b, "verify_oop_addr: %s", s);
+
+ push(rax); // save rax,
+ // addr may contain rsp so we will have to adjust it based on the push
+ // we just did
+ // NOTE: 64bit seemed to have had a bug in that it did movq(addr, rax); which
+ // stores rax into addr which is backwards of what was intended.
+ if (addr.uses(rsp)) {
+ lea(rax, addr);
+ pushptr(Address(rax, BytesPerWord));
+ } else {
+ pushptr(addr);
}
- jmp(done);
- bind(try_rebias);
- // At this point we know the epoch has expired, meaning that the
- // current "bias owner", if any, is actually invalid. Under these
- // circumstances _only_, we are allowed to use the current header's
- // value as the comparison value when doing the cas to acquire the
- // bias in the current epoch. In other words, we allow transfer of
- // the bias from one thread to another directly in this situation.
- //
- // FIXME: due to a lack of registers we currently blow away the age
- // bits in this situation. Should attempt to preserve them.
- load_prototype_header(tmp_reg, obj_reg);
- orq(tmp_reg, r15_thread);
- if (os::is_MP()) {
- lock();
+ ExternalAddress buffer((address) b);
+ // pass msg argument
+ // avoid using pushptr, as it modifies scratch registers
+ // and our contract is not to modify anything
+ movptr(rax, buffer.addr());
+ push(rax);
+
+ // call indirectly to solve generation ordering problem
+ movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
+ call(rax);
+ // Caller pops the arguments and restores rax, from the stack
+}
+
+void MacroAssembler::verify_tlab() {
+#ifdef ASSERT
+ if (UseTLAB && VerifyOops) {
+ Label next, ok;
+ Register t1 = rsi;
+ Register thread_reg = NOT_LP64(rbx) LP64_ONLY(r15_thread);
+
+ push(t1);
+ NOT_LP64(push(thread_reg));
+ NOT_LP64(get_thread(thread_reg));
+
+ movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
+ cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
+ jcc(Assembler::aboveEqual, next);
+ stop("assert(top >= start)");
+ should_not_reach_here();
+
+ bind(next);
+ movptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
+ cmpptr(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
+ jcc(Assembler::aboveEqual, ok);
+ stop("assert(top <= end)");
+ should_not_reach_here();
+
+ bind(ok);
+ NOT_LP64(pop(thread_reg));
+ pop(t1);
}
- cmpxchgq(tmp_reg, Address(obj_reg, 0));
- // If the biasing toward our thread failed, then another thread
- // succeeded in biasing it toward itself and we need to revoke that
- // bias. The revocation will occur in the runtime in the slow case.
- if (counters != NULL) {
- cond_inc32(Assembler::zero,
- ExternalAddress((address) counters->rebiased_lock_entry_count_addr()));
+#endif
+}
+
+class ControlWord {
+ public:
+ int32_t _value;
+
+ int rounding_control() const { return (_value >> 10) & 3 ; }
+ int precision_control() const { return (_value >> 8) & 3 ; }
+ bool precision() const { return ((_value >> 5) & 1) != 0; }
+ bool underflow() const { return ((_value >> 4) & 1) != 0; }
+ bool overflow() const { return ((_value >> 3) & 1) != 0; }
+ bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
+ bool denormalized() const { return ((_value >> 1) & 1) != 0; }
+ bool invalid() const { return ((_value >> 0) & 1) != 0; }
+
+ void print() const {
+ // rounding control
+ const char* rc;
+ switch (rounding_control()) {
+ case 0: rc = "round near"; break;
+ case 1: rc = "round down"; break;
+ case 2: rc = "round up "; break;
+ case 3: rc = "chop "; break;
+ };
+ // precision control
+ const char* pc;
+ switch (precision_control()) {
+ case 0: pc = "24 bits "; break;
+ case 1: pc = "reserved"; break;
+ case 2: pc = "53 bits "; break;
+ case 3: pc = "64 bits "; break;
+ };
+ // flags
+ char f[9];
+ f[0] = ' ';
+ f[1] = ' ';
+ f[2] = (precision ()) ? 'P' : 'p';
+ f[3] = (underflow ()) ? 'U' : 'u';
+ f[4] = (overflow ()) ? 'O' : 'o';
+ f[5] = (zero_divide ()) ? 'Z' : 'z';
+ f[6] = (denormalized()) ? 'D' : 'd';
+ f[7] = (invalid ()) ? 'I' : 'i';
+ f[8] = '\x0';
+ // output
+ printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
}
- if (slow_case != NULL) {
- jcc(Assembler::notZero, *slow_case);
+
+};
+
+class StatusWord {
+ public:
+ int32_t _value;
+
+ bool busy() const { return ((_value >> 15) & 1) != 0; }
+ bool C3() const { return ((_value >> 14) & 1) != 0; }
+ bool C2() const { return ((_value >> 10) & 1) != 0; }
+ bool C1() const { return ((_value >> 9) & 1) != 0; }
+ bool C0() const { return ((_value >> 8) & 1) != 0; }
+ int top() const { return (_value >> 11) & 7 ; }
+ bool error_status() const { return ((_value >> 7) & 1) != 0; }
+ bool stack_fault() const { return ((_value >> 6) & 1) != 0; }
+ bool precision() const { return ((_value >> 5) & 1) != 0; }
+ bool underflow() const { return ((_value >> 4) & 1) != 0; }
+ bool overflow() const { return ((_value >> 3) & 1) != 0; }
+ bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
+ bool denormalized() const { return ((_value >> 1) & 1) != 0; }
+ bool invalid() const { return ((_value >> 0) & 1) != 0; }
+
+ void print() const {
+ // condition codes
+ char c[5];
+ c[0] = (C3()) ? '3' : '-';
+ c[1] = (C2()) ? '2' : '-';
+ c[2] = (C1()) ? '1' : '-';
+ c[3] = (C0()) ? '0' : '-';
+ c[4] = '\x0';
+ // flags
+ char f[9];
+ f[0] = (error_status()) ? 'E' : '-';
+ f[1] = (stack_fault ()) ? 'S' : '-';
+ f[2] = (precision ()) ? 'P' : '-';
+ f[3] = (underflow ()) ? 'U' : '-';
+ f[4] = (overflow ()) ? 'O' : '-';
+ f[5] = (zero_divide ()) ? 'Z' : '-';
+ f[6] = (denormalized()) ? 'D' : '-';
+ f[7] = (invalid ()) ? 'I' : '-';
+ f[8] = '\x0';
+ // output
+ printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top());
}
- jmp(done);
- bind(try_revoke_bias);
- // The prototype mark in the klass doesn't have the bias bit set any
- // more, indicating that objects of this data type are not supposed
- // to be biased any more. We are going to try to reset the mark of
- // this object to the prototype value and fall through to the
- // CAS-based locking scheme. Note that if our CAS fails, it means
- // that another thread raced us for the privilege of revoking the
- // bias of this particular object, so it's okay to continue in the
- // normal locking code.
- //
- // FIXME: due to a lack of registers we currently blow away the age
- // bits in this situation. Should attempt to preserve them.
- load_prototype_header(tmp_reg, obj_reg);
- if (os::is_MP()) {
- lock();
+};
+
+class TagWord {
+ public:
+ int32_t _value;
+
+ int tag_at(int i) const { return (_value >> (i*2)) & 3; }
+
+ void print() const {
+ printf("%04x", _value & 0xFFFF);
}
- cmpxchgq(tmp_reg, Address(obj_reg, 0));
- // Fall through to the normal CAS-based lock, because no matter what
- // the result of the above CAS, some thread must have succeeded in
- // removing the bias bit from the object's header.
- if (counters != NULL) {
- cond_inc32(Assembler::zero,
- ExternalAddress((address) counters->revoked_lock_entry_count_addr()));
+
+};
+
+class FPU_Register {
+ public:
+ int32_t _m0;
+ int32_t _m1;
+ int16_t _ex;
+
+ bool is_indefinite() const {
+ return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
}
- bind(cas_label);
+ void print() const {
+ char sign = (_ex < 0) ? '-' : '+';
+ const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " ";
+ printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind);
+ };
- return null_check_offset;
-}
+};
+class FPU_State {
+ public:
+ enum {
+ register_size = 10,
+ number_of_registers = 8,
+ register_mask = 7
+ };
+
+ ControlWord _control_word;
+ StatusWord _status_word;
+ TagWord _tag_word;
+ int32_t _error_offset;
+ int32_t _error_selector;
+ int32_t _data_offset;
+ int32_t _data_selector;
+ int8_t _register[register_size * number_of_registers];
+
+ int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
+ FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; }
+
+ const char* tag_as_string(int tag) const {
+ switch (tag) {
+ case 0: return "valid";
+ case 1: return "zero";
+ case 2: return "special";
+ case 3: return "empty";
+ }
+ ShouldNotReachHere()
+ return NULL;
+ }
+
+ void print() const {
+ // print computation registers
+ { int t = _status_word.top();
+ for (int i = 0; i < number_of_registers; i++) {
+ int j = (i - t) & register_mask;
+ printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
+ st(j)->print();
+ printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
+ }
+ }
+ printf("\n");
+ // print control registers
+ printf("ctrl = "); _control_word.print(); printf("\n");
+ printf("stat = "); _status_word .print(); printf("\n");
+ printf("tags = "); _tag_word .print(); printf("\n");
+ }
-void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
- assert(UseBiasedLocking, "why call this otherwise?");
+};
- // Check for biased locking unlock case, which is a no-op
- // Note: we do not have to check the thread ID for two reasons.
- // First, the interpreter checks for IllegalMonitorStateException at
- // a higher level. Second, if the bias was revoked while we held the
- // lock, the object could not be rebiased toward another thread, so
- // the bias bit would be clear.
- movq(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- andq(temp_reg, markOopDesc::biased_lock_mask_in_place);
- cmpq(temp_reg, markOopDesc::biased_lock_pattern);
- jcc(Assembler::equal, done);
+class Flag_Register {
+ public:
+ int32_t _value;
+
+ bool overflow() const { return ((_value >> 11) & 1) != 0; }
+ bool direction() const { return ((_value >> 10) & 1) != 0; }
+ bool sign() const { return ((_value >> 7) & 1) != 0; }
+ bool zero() const { return ((_value >> 6) & 1) != 0; }
+ bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; }
+ bool parity() const { return ((_value >> 2) & 1) != 0; }
+ bool carry() const { return ((_value >> 0) & 1) != 0; }
+
+ void print() const {
+ // flags
+ char f[8];
+ f[0] = (overflow ()) ? 'O' : '-';
+ f[1] = (direction ()) ? 'D' : '-';
+ f[2] = (sign ()) ? 'S' : '-';
+ f[3] = (zero ()) ? 'Z' : '-';
+ f[4] = (auxiliary_carry()) ? 'A' : '-';
+ f[5] = (parity ()) ? 'P' : '-';
+ f[6] = (carry ()) ? 'C' : '-';
+ f[7] = '\x0';
+ // output
+ printf("%08x flags = %s", _value, f);
+ }
+
+};
+
+class IU_Register {
+ public:
+ int32_t _value;
+
+ void print() const {
+ printf("%08x %11d", _value, _value);
+ }
+
+};
+
+class IU_State {
+ public:
+ Flag_Register _eflags;
+ IU_Register _rdi;
+ IU_Register _rsi;
+ IU_Register _rbp;
+ IU_Register _rsp;
+ IU_Register _rbx;
+ IU_Register _rdx;
+ IU_Register _rcx;
+ IU_Register _rax;
+
+ void print() const {
+ // computation registers
+ printf("rax, = "); _rax.print(); printf("\n");
+ printf("rbx, = "); _rbx.print(); printf("\n");
+ printf("rcx = "); _rcx.print(); printf("\n");
+ printf("rdx = "); _rdx.print(); printf("\n");
+ printf("rdi = "); _rdi.print(); printf("\n");
+ printf("rsi = "); _rsi.print(); printf("\n");
+ printf("rbp, = "); _rbp.print(); printf("\n");
+ printf("rsp = "); _rsp.print(); printf("\n");
+ printf("\n");
+ // control registers
+ printf("flgs = "); _eflags.print(); printf("\n");
+ }
+};
+
+
+class CPU_State {
+ public:
+ FPU_State _fpu_state;
+ IU_State _iu_state;
+
+ void print() const {
+ printf("--------------------------------------------------\n");
+ _iu_state .print();
+ printf("\n");
+ _fpu_state.print();
+ printf("--------------------------------------------------\n");
+ }
+
+};
+
+
+static void _print_CPU_state(CPU_State* state) {
+ state->print();
+};
+
+
+void MacroAssembler::print_CPU_state() {
+ push_CPU_state();
+ push(rsp); // pass CPU state
+ call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
+ addptr(rsp, wordSize); // discard argument
+ pop_CPU_state();
}
+static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) {
+ static int counter = 0;
+ FPU_State* fs = &state->_fpu_state;
+ counter++;
+ // For leaf calls, only verify that the top few elements remain empty.
+ // We only need 1 empty at the top for C2 code.
+ if( stack_depth < 0 ) {
+ if( fs->tag_for_st(7) != 3 ) {
+ printf("FPR7 not empty\n");
+ state->print();
+ assert(false, "error");
+ return false;
+ }
+ return true; // All other stack states do not matter
+ }
+
+ assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std,
+ "bad FPU control word");
+
+ // compute stack depth
+ int i = 0;
+ while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++;
+ int d = i;
+ while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++;
+ // verify findings
+ if (i != FPU_State::number_of_registers) {
+ // stack not contiguous
+ printf("%s: stack not contiguous at ST%d\n", s, i);
+ state->print();
+ assert(false, "error");
+ return false;
+ }
+ // check if computed stack depth corresponds to expected stack depth
+ if (stack_depth < 0) {
+ // expected stack depth is -stack_depth or less
+ if (d > -stack_depth) {
+ // too many elements on the stack
+ printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d);
+ state->print();
+ assert(false, "error");
+ return false;
+ }
+ } else {
+ // expected stack depth is stack_depth
+ if (d != stack_depth) {
+ // wrong stack depth
+ printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d);
+ state->print();
+ assert(false, "error");
+ return false;
+ }
+ }
+ // everything is cool
+ return true;
+}
+
+
+void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
+ if (!VerifyFPU) return;
+ push_CPU_state();
+ push(rsp); // pass CPU state
+ ExternalAddress msg((address) s);
+ // pass message string s
+ pushptr(msg.addr());
+ push(stack_depth); // pass stack depth
+ call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU)));
+ addptr(rsp, 3 * wordSize); // discard arguments
+ // check for error
+ { Label L;
+ testl(rax, rax);
+ jcc(Assembler::notZero, L);
+ int3(); // break if error condition
+ bind(L);
+ }
+ pop_CPU_state();
+}
+
void MacroAssembler::load_klass(Register dst, Register src) {
+#ifdef _LP64
if (UseCompressedOops) {
movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
decode_heap_oop_not_null(dst);
- } else {
- movq(dst, Address(src, oopDesc::klass_offset_in_bytes()));
- }
+ } else
+#endif
+ movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
}
void MacroAssembler::load_prototype_header(Register dst, Register src) {
+#ifdef _LP64
if (UseCompressedOops) {
movl(dst, Address(src, oopDesc::klass_offset_in_bytes()));
movq(dst, Address(r12_heapbase, dst, Address::times_8, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
- } else {
- movq(dst, Address(src, oopDesc::klass_offset_in_bytes()));
- movq(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
- }
+ } else
+#endif
+ {
+ movptr(dst, Address(src, oopDesc::klass_offset_in_bytes()));
+ movptr(dst, Address(dst, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
+ }
}
void MacroAssembler::store_klass(Register dst, Register src) {
+#ifdef _LP64
if (UseCompressedOops) {
encode_heap_oop_not_null(src);
movl(Address(dst, oopDesc::klass_offset_in_bytes()), src);
- } else {
- movq(Address(dst, oopDesc::klass_offset_in_bytes()), src);
- }
+ } else
+#endif
+ movptr(Address(dst, oopDesc::klass_offset_in_bytes()), src);
}
+#ifdef _LP64
void MacroAssembler::store_klass_gap(Register dst, Register src) {
if (UseCompressedOops) {
// Store to klass gap in destination
@@ -5206,12 +7357,12 @@ void MacroAssembler::encode_heap_oop(Register r) {
#ifdef ASSERT
if (CheckCompressedOops) {
Label ok;
- pushq(rscratch1); // cmpptr trashes rscratch1
+ push(rscratch1); // cmpptr trashes rscratch1
cmpptr(r12_heapbase, ExternalAddress((address)Universe::heap_base_addr()));
jcc(Assembler::equal, ok);
stop("MacroAssembler::encode_heap_oop: heap base corrupted?");
bind(ok);
- popq(rscratch1);
+ pop(rscratch1);
}
#endif
verify_oop(r, "broken oop in encode_heap_oop");
@@ -5261,13 +7412,13 @@ void MacroAssembler::decode_heap_oop(Register r) {
#ifdef ASSERT
if (CheckCompressedOops) {
Label ok;
- pushq(rscratch1);
+ push(rscratch1);
cmpptr(r12_heapbase,
ExternalAddress((address)Universe::heap_base_addr()));
jcc(Assembler::equal, ok);
stop("MacroAssembler::decode_heap_oop: heap base corrupted?");
bind(ok);
- popq(rscratch1);
+ pop(rscratch1);
}
#endif
@@ -5307,14 +7458,15 @@ void MacroAssembler::set_narrow_oop(Register dst, jobject obj) {
assert(oop_recorder() != NULL, "this assembler needs an OopRecorder");
int oop_index = oop_recorder()->find_index(obj);
RelocationHolder rspec = oop_Relocation::spec(oop_index);
-
- // movl dst,obj
- InstructionMark im(this);
- int encode = prefix_and_encode(dst->encoding());
- emit_byte(0xB8 | encode);
- emit_data(oop_index, rspec, narrow_oop_operand);
+ mov_literal32(dst, oop_index, rspec, narrow_oop_operand);
}
+void MacroAssembler::reinit_heapbase() {
+ if (UseCompressedOops) {
+ movptr(r12_heapbase, ExternalAddress((address)Universe::heap_base_addr()));
+ }
+}
+#endif // _LP64
Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
switch (cond) {
@@ -5339,23 +7491,6 @@ Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond)
ShouldNotReachHere(); return Assembler::overflow;
}
-
-void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) {
- Condition negated_cond = negate_condition(cond);
- Label L;
- jcc(negated_cond, L);
- atomic_incl(counter_addr);
- bind(L);
-}
-
-void MacroAssembler::atomic_incl(AddressLiteral counter_addr) {
- pushfq();
- if (os::is_MP())
- lock();
- incrementl(counter_addr);
- popfq();
-}
-
SkipIfEqual::SkipIfEqual(
MacroAssembler* masm, const bool* flag_addr, bool value) {
_masm = masm;
@@ -5366,28 +7501,3 @@ SkipIfEqual::SkipIfEqual(
SkipIfEqual::~SkipIfEqual() {
_masm->bind(_label);
}
-
-void MacroAssembler::bang_stack_size(Register size, Register tmp) {
- movq(tmp, rsp);
- // Bang stack for total size given plus shadow page size.
- // Bang one page at a time because large size can bang beyond yellow and
- // red zones.
- Label loop;
- bind(loop);
- movl(Address(tmp, (-os::vm_page_size())), size );
- subq(tmp, os::vm_page_size());
- subl(size, os::vm_page_size());
- jcc(Assembler::greater, loop);
-
- // Bang down shadow pages too.
- // The -1 because we already subtracted 1 page.
- for (int i = 0; i< StackShadowPages-1; i++) {
- movq(Address(tmp, (-i*os::vm_page_size())), size );
- }
-}
-
-void MacroAssembler::reinit_heapbase() {
- if (UseCompressedOops) {
- movptr(r12_heapbase, ExternalAddress((address)Universe::heap_base_addr()));
- }
-}
diff --git a/src/cpu/x86/vm/assembler_x86_32.hpp b/src/cpu/x86/vm/assembler_x86.hpp
index 958844995..d9637ffc9 100644
--- a/src/cpu/x86/vm/assembler_x86_32.hpp
+++ b/src/cpu/x86/vm/assembler_x86.hpp
@@ -58,10 +58,10 @@ REGISTER_DECLARATION(Register, c_rarg1, rdx);
REGISTER_DECLARATION(Register, c_rarg2, r8);
REGISTER_DECLARATION(Register, c_rarg3, r9);
-REGISTER_DECLARATION(FloatRegister, c_farg0, xmm0);
-REGISTER_DECLARATION(FloatRegister, c_farg1, xmm1);
-REGISTER_DECLARATION(FloatRegister, c_farg2, xmm2);
-REGISTER_DECLARATION(FloatRegister, c_farg3, xmm3);
+REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0);
+REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1);
+REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2);
+REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3);
#else
@@ -72,14 +72,14 @@ REGISTER_DECLARATION(Register, c_rarg3, rcx);
REGISTER_DECLARATION(Register, c_rarg4, r8);
REGISTER_DECLARATION(Register, c_rarg5, r9);
-REGISTER_DECLARATION(FloatRegister, c_farg0, xmm0);
-REGISTER_DECLARATION(FloatRegister, c_farg1, xmm1);
-REGISTER_DECLARATION(FloatRegister, c_farg2, xmm2);
-REGISTER_DECLARATION(FloatRegister, c_farg3, xmm3);
-REGISTER_DECLARATION(FloatRegister, c_farg4, xmm4);
-REGISTER_DECLARATION(FloatRegister, c_farg5, xmm5);
-REGISTER_DECLARATION(FloatRegister, c_farg6, xmm6);
-REGISTER_DECLARATION(FloatRegister, c_farg7, xmm7);
+REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0);
+REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1);
+REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2);
+REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3);
+REGISTER_DECLARATION(XMMRegister, c_farg4, xmm4);
+REGISTER_DECLARATION(XMMRegister, c_farg5, xmm5);
+REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6);
+REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7);
#endif // _WIN64
@@ -112,20 +112,27 @@ REGISTER_DECLARATION(Register, j_rarg4, c_rarg5);
#endif /* _WIN64 */
REGISTER_DECLARATION(Register, j_rarg5, c_rarg0);
-REGISTER_DECLARATION(FloatRegister, j_farg0, xmm0);
-REGISTER_DECLARATION(FloatRegister, j_farg1, xmm1);
-REGISTER_DECLARATION(FloatRegister, j_farg2, xmm2);
-REGISTER_DECLARATION(FloatRegister, j_farg3, xmm3);
-REGISTER_DECLARATION(FloatRegister, j_farg4, xmm4);
-REGISTER_DECLARATION(FloatRegister, j_farg5, xmm5);
-REGISTER_DECLARATION(FloatRegister, j_farg6, xmm6);
-REGISTER_DECLARATION(FloatRegister, j_farg7, xmm7);
+REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0);
+REGISTER_DECLARATION(XMMRegister, j_farg1, xmm1);
+REGISTER_DECLARATION(XMMRegister, j_farg2, xmm2);
+REGISTER_DECLARATION(XMMRegister, j_farg3, xmm3);
+REGISTER_DECLARATION(XMMRegister, j_farg4, xmm4);
+REGISTER_DECLARATION(XMMRegister, j_farg5, xmm5);
+REGISTER_DECLARATION(XMMRegister, j_farg6, xmm6);
+REGISTER_DECLARATION(XMMRegister, j_farg7, xmm7);
REGISTER_DECLARATION(Register, rscratch1, r10); // volatile
REGISTER_DECLARATION(Register, rscratch2, r11); // volatile
+REGISTER_DECLARATION(Register, r12_heapbase, r12); // callee-saved
REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
+#else
+// rscratch1 will apear in 32bit code that is dead but of course must compile
+// Using noreg ensures if the dead code is incorrectly live and executed it
+// will cause an assertion failure
+#define rscratch1 noreg
+
#endif // _LP64
// Address is an abstraction used to represent a memory location
@@ -143,7 +150,8 @@ class Address VALUE_OBJ_CLASS_SPEC {
times_1 = 0,
times_2 = 1,
times_4 = 2,
- times_8 = 3
+ times_8 = 3,
+ times_ptr = LP64_ONLY(times_8) NOT_LP64(times_4)
};
private:
@@ -153,12 +161,15 @@ class Address VALUE_OBJ_CLASS_SPEC {
int _disp;
RelocationHolder _rspec;
- // Easily misused constructor make them private
-#ifndef _LP64
- Address(address loc, RelocationHolder spec);
-#endif // _LP64
+ // Easily misused constructors make them private
+ // %%% can we make these go away?
+ NOT_LP64(Address(address loc, RelocationHolder spec);)
+ Address(int disp, address loc, relocInfo::relocType rtype);
+ Address(int disp, address loc, RelocationHolder spec);
public:
+
+ int disp() { return _disp; }
// creation
Address()
: _base(noreg),
@@ -358,11 +369,7 @@ class ArrayAddress VALUE_OBJ_CLASS_SPEC {
};
-#ifndef _LP64
-const int FPUStateSizeInWords = 27;
-#else
-const int FPUStateSizeInWords = 512 / wordSize;
-#endif // _LP64
+const int FPUStateSizeInWords = NOT_LP64(27) LP64_ONLY( 512 / wordSize);
// The Intel x86/Amd64 Assembler: Pure assembler doing NO optimizations on the instruction
// level (e.g. mov rax, 0 is not translated into xor rax, rax!); i.e., what you write
@@ -371,62 +378,7 @@ const int FPUStateSizeInWords = 512 / wordSize;
class Assembler : public AbstractAssembler {
friend class AbstractAssembler; // for the non-virtual hack
friend class LIR_Assembler; // as_Address()
-
- protected:
- #ifdef ASSERT
- void check_relocation(RelocationHolder const& rspec, int format);
- #endif
-
- inline void emit_long64(jlong x);
-
- void emit_data(jint data, relocInfo::relocType rtype, int format /* = 0 */);
- void emit_data(jint data, RelocationHolder const& rspec, int format /* = 0 */);
- void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
- void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);
-
- // Helper functions for groups of instructions
- void emit_arith_b(int op1, int op2, Register dst, int imm8);
-
- void emit_arith(int op1, int op2, Register dst, int imm32);
- // only x86??
- void emit_arith(int op1, int op2, Register dst, jobject obj);
- void emit_arith(int op1, int op2, Register dst, Register src);
-
- void emit_operand(Register reg,
- Register base, Register index, Address::ScaleFactor scale,
- int disp,
- RelocationHolder const& rspec);
- void emit_operand(Register reg, Address adr);
-
- // Immediate-to-memory forms
- void emit_arith_operand(int op1, Register rm, Address adr, int imm32);
-
- void emit_farith(int b1, int b2, int i);
-
- // macroassembler?? QQQ
- bool reachable(AddressLiteral adr) { return true; }
-
- // These are all easily abused and hence protected
-
- // Make these disappear in 64bit mode since they would never be correct
-#ifndef _LP64
- void cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec);
- void cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec);
-
- void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec);
- void mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec);
-
- void push_literal32(int32_t imm32, RelocationHolder const& rspec);
-#endif // _LP64
-
- // These are unique in that we are ensured by the caller that the 32bit
- // relative in these instructions will always be able to reach the potentially
- // 64bit address described by entry. Since they can take a 64bit address they
- // don't have the 32 suffix like the other instructions in this class.
-
- void call_literal(address entry, RelocationHolder const& rspec);
- void jmp_literal(address entry, RelocationHolder const& rspec);
-
+ friend class StubGenerator;
public:
enum Condition { // The x86 condition codes used for conditional jumps/moves.
@@ -484,113 +436,135 @@ class Assembler : public AbstractAssembler {
enum WhichOperand {
// input to locate_operand, and format code for relocations
- imm32_operand = 0, // embedded 32-bit immediate operand
+ imm_operand = 0, // embedded 32-bit|64-bit immediate operand
disp32_operand = 1, // embedded 32-bit displacement or address
call32_operand = 2, // embedded 32-bit self-relative displacement
+#ifndef _LP64
_WhichOperand_limit = 3
+#else
+ narrow_oop_operand = 3, // embedded 32-bit immediate narrow oop
+ _WhichOperand_limit = 4
+#endif
};
- public:
- // Creation
- Assembler(CodeBuffer* code) : AbstractAssembler(code) {}
- // Decoding
- static address locate_operand(address inst, WhichOperand which);
- static address locate_next_instruction(address inst);
+ // NOTE: The general philopsophy of the declarations here is that 64bit versions
+ // of instructions are freely declared without the need for wrapping them an ifdef.
+ // (Some dangerous instructions are ifdef's out of inappropriate jvm's.)
+ // In the .cpp file the implementations are wrapped so that they are dropped out
+ // of the resulting jvm. This is done mostly to keep the footprint of KERNEL
+ // to the size it was prior to merging up the 32bit and 64bit assemblers.
+ //
+ // This does mean you'll get a linker/runtime error if you use a 64bit only instruction
+ // in a 32bit vm. This is somewhat unfortunate but keeps the ifdef noise down.
- // Stack
- void pushad();
- void popad();
+private:
- void pushfd();
- void popfd();
- void pushl(int imm32);
- void pushoop(jobject obj);
+ // 64bit prefixes
+ int prefix_and_encode(int reg_enc, bool byteinst = false);
+ int prefixq_and_encode(int reg_enc);
- void pushl(Register src);
- void pushl(Address src);
- // void pushl(Label& L, relocInfo::relocType rtype); ? needed?
+ int prefix_and_encode(int dst_enc, int src_enc, bool byteinst = false);
+ int prefixq_and_encode(int dst_enc, int src_enc);
- // dummy to prevent NULL being converted to Register
- void pushl(void* dummy);
+ void prefix(Register reg);
+ void prefix(Address adr);
+ void prefixq(Address adr);
- void popl(Register dst);
- void popl(Address dst);
-
- // Instruction prefixes
- void prefix(Prefix p);
+ void prefix(Address adr, Register reg, bool byteinst = false);
+ void prefixq(Address adr, Register reg);
- // Moves
- void movb(Register dst, Address src);
- void movb(Address dst, int imm8);
- void movb(Address dst, Register src);
+ void prefix(Address adr, XMMRegister reg);
- void movw(Address dst, int imm16);
- void movw(Register dst, Address src);
- void movw(Address dst, Register src);
+ void prefetch_prefix(Address src);
- // these are dummies used to catch attempting to convert NULL to Register
- void movl(Register dst, void* junk);
- void movl(Address dst, void* junk);
+ // Helper functions for groups of instructions
+ void emit_arith_b(int op1, int op2, Register dst, int imm8);
- void movl(Register dst, int imm32);
- void movl(Address dst, int imm32);
- void movl(Register dst, Register src);
- void movl(Register dst, Address src);
- void movl(Address dst, Register src);
+ void emit_arith(int op1, int op2, Register dst, int32_t imm32);
+ // only 32bit??
+ void emit_arith(int op1, int op2, Register dst, jobject obj);
+ void emit_arith(int op1, int op2, Register dst, Register src);
- void movsxb(Register dst, Address src);
- void movsxb(Register dst, Register src);
+ void emit_operand(Register reg,
+ Register base, Register index, Address::ScaleFactor scale,
+ int disp,
+ RelocationHolder const& rspec,
+ int rip_relative_correction = 0);
- void movsxw(Register dst, Address src);
- void movsxw(Register dst, Register src);
+ void emit_operand(Register reg, Address adr, int rip_relative_correction = 0);
- void movzxb(Register dst, Address src);
- void movzxb(Register dst, Register src);
+ // operands that only take the original 32bit registers
+ void emit_operand32(Register reg, Address adr);
- void movzxw(Register dst, Address src);
- void movzxw(Register dst, Register src);
+ void emit_operand(XMMRegister reg,
+ Register base, Register index, Address::ScaleFactor scale,
+ int disp,
+ RelocationHolder const& rspec);
- // Conditional moves (P6 only)
- void cmovl(Condition cc, Register dst, Register src);
- void cmovl(Condition cc, Register dst, Address src);
+ void emit_operand(XMMRegister reg, Address adr);
- // Prefetches (SSE, SSE2, 3DNOW only)
- void prefetcht0(Address src);
- void prefetcht1(Address src);
- void prefetcht2(Address src);
- void prefetchnta(Address src);
- void prefetchw(Address src);
- void prefetchr(Address src);
+ void emit_operand(MMXRegister reg, Address adr);
- // Arithmetics
- void adcl(Register dst, int imm32);
- void adcl(Register dst, Address src);
- void adcl(Register dst, Register src);
+ // workaround gcc (3.2.1-7) bug
+ void emit_operand(Address adr, MMXRegister reg);
- void addl(Address dst, int imm32);
- void addl(Address dst, Register src);
- void addl(Register dst, int imm32);
- void addl(Register dst, Address src);
- void addl(Register dst, Register src);
- void andl(Register dst, int imm32);
- void andl(Register dst, Address src);
- void andl(Register dst, Register src);
+ // Immediate-to-memory forms
+ void emit_arith_operand(int op1, Register rm, Address adr, int32_t imm32);
- void cmpb(Address dst, int imm8);
- void cmpw(Address dst, int imm16);
- void cmpl(Address dst, int imm32);
- void cmpl(Register dst, int imm32);
- void cmpl(Register dst, Register src);
- void cmpl(Register dst, Address src);
+ void emit_farith(int b1, int b2, int i);
- // this is a dummy used to catch attempting to convert NULL to Register
- void cmpl(Register dst, void* junk);
protected:
+ #ifdef ASSERT
+ void check_relocation(RelocationHolder const& rspec, int format);
+ #endif
+
+ inline void emit_long64(jlong x);
+
+ void emit_data(jint data, relocInfo::relocType rtype, int format);
+ void emit_data(jint data, RelocationHolder const& rspec, int format);
+ void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
+ void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);
+
+
+ bool reachable(AddressLiteral adr) NOT_LP64({ return true;});
+
+ // These are all easily abused and hence protected
+
+ void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec, int format = 0);
+
+ // 32BIT ONLY SECTION
+#ifndef _LP64
+ // Make these disappear in 64bit mode since they would never be correct
+ void cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
+ void cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
+
+ void mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
+
+ void push_literal32(int32_t imm32, RelocationHolder const& rspec); // 32BIT ONLY
+#else
+ // 64BIT ONLY SECTION
+ void mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec); // 64BIT ONLY
+#endif // _LP64
+
+ // These are unique in that we are ensured by the caller that the 32bit
+ // relative in these instructions will always be able to reach the potentially
+ // 64bit address described by entry. Since they can take a 64bit address they
+ // don't have the 32 suffix like the other instructions in this class.
+
+ void call_literal(address entry, RelocationHolder const& rspec);
+ void jmp_literal(address entry, RelocationHolder const& rspec);
+
+ // Avoid using directly section
+ // Instructions in this section are actually usable by anyone without danger
+ // of failure but have performance issues that are addressed my enhanced
+ // instructions which will do the proper thing base on the particular cpu.
+ // We protect them because we don't trust you...
+
// Don't use next inc() and dec() methods directly. INC & DEC instructions
// could cause a partial flag stall since they don't set CF flag.
// Use MacroAssembler::decrement() & MacroAssembler::increment() methods
@@ -599,119 +573,394 @@ class Assembler : public AbstractAssembler {
void decl(Register dst);
void decl(Address dst);
+ void decq(Register dst);
+ void decq(Address dst);
void incl(Register dst);
void incl(Address dst);
+ void incq(Register dst);
+ void incq(Address dst);
- public:
- void idivl(Register src);
- void cdql();
+ // New cpus require use of movsd and movss to avoid partial register stall
+ // when loading from memory. But for old Opteron use movlpd instead of movsd.
+ // The selection is done in MacroAssembler::movdbl() and movflt().
- void imull(Register dst, Register src);
- void imull(Register dst, Register src, int value);
+ // Move Scalar Single-Precision Floating-Point Values
+ void movss(XMMRegister dst, Address src);
+ void movss(XMMRegister dst, XMMRegister src);
+ void movss(Address dst, XMMRegister src);
- void leal(Register dst, Address src);
+ // Move Scalar Double-Precision Floating-Point Values
+ void movsd(XMMRegister dst, Address src);
+ void movsd(XMMRegister dst, XMMRegister src);
+ void movsd(Address dst, XMMRegister src);
+ void movlpd(XMMRegister dst, Address src);
- void mull(Address src);
- void mull(Register src);
+ // New cpus require use of movaps and movapd to avoid partial register stall
+ // when moving between registers.
+ void movaps(XMMRegister dst, XMMRegister src);
+ void movapd(XMMRegister dst, XMMRegister src);
- void negl(Register dst);
+ // End avoid using directly
- void notl(Register dst);
- void orl(Address dst, int imm32);
- void orl(Register dst, int imm32);
- void orl(Register dst, Address src);
- void orl(Register dst, Register src);
+ // Instruction prefixes
+ void prefix(Prefix p);
- void rcll(Register dst, int imm8);
+ public:
- void sarl(Register dst, int imm8);
- void sarl(Register dst);
+ // Creation
+ Assembler(CodeBuffer* code) : AbstractAssembler(code) {}
- void sbbl(Address dst, int imm32);
- void sbbl(Register dst, int imm32);
- void sbbl(Register dst, Address src);
- void sbbl(Register dst, Register src);
+ // Decoding
+ static address locate_operand(address inst, WhichOperand which);
+ static address locate_next_instruction(address inst);
- void shldl(Register dst, Register src);
+ // Utilities
- void shll(Register dst, int imm8);
- void shll(Register dst);
+#ifdef _LP64
+ static bool is_simm(int64_t x, int nbits) { return -( CONST64(1) << (nbits-1) ) <= x && x < ( CONST64(1) << (nbits-1) ); }
+ static bool is_simm32(int64_t x) { return x == (int64_t)(int32_t)x; }
+#else
+ static bool is_simm(int32_t x, int nbits) { return -( 1 << (nbits-1) ) <= x && x < ( 1 << (nbits-1) ); }
+ static bool is_simm32(int32_t x) { return true; }
+#endif // LP64
- void shrdl(Register dst, Register src);
+ // Generic instructions
+ // Does 32bit or 64bit as needed for the platform. In some sense these
+ // belong in macro assembler but there is no need for both varieties to exist
- void shrl(Register dst, int imm8);
- void shrl(Register dst);
+ void lea(Register dst, Address src);
- void subl(Address dst, int imm32);
- void subl(Address dst, Register src);
- void subl(Register dst, int imm32);
- void subl(Register dst, Address src);
- void subl(Register dst, Register src);
+ void mov(Register dst, Register src);
- void testb(Register dst, int imm8);
- void testl(Register dst, int imm32);
- void testl(Register dst, Address src);
- void testl(Register dst, Register src);
+ void pusha();
+ void popa();
- void xaddl(Address dst, Register src);
+ void pushf();
+ void popf();
- void xorl(Register dst, int imm32);
- void xorl(Register dst, Address src);
- void xorl(Register dst, Register src);
+ void push(int32_t imm32);
- // Miscellaneous
- void bswap(Register reg);
- void lock();
+ void push(Register src);
- void xchg (Register reg, Address adr);
- void xchgl(Register dst, Register src);
+ void pop(Register dst);
+
+ // These are dummies to prevent surprise implicit conversions to Register
+ void push(void* v);
+ void pop(void* v);
+
+
+ // These do register sized moves/scans
+ void rep_mov();
+ void rep_set();
+ void repne_scan();
+#ifdef _LP64
+ void repne_scanl();
+#endif
+
+ // Vanilla instructions in lexical order
+
+ void adcl(Register dst, int32_t imm32);
+ void adcl(Register dst, Address src);
+ void adcl(Register dst, Register src);
+
+ void adcq(Register dst, int32_t imm32);
+ void adcq(Register dst, Address src);
+ void adcq(Register dst, Register src);
+
+
+ void addl(Address dst, int32_t imm32);
+ void addl(Address dst, Register src);
+ void addl(Register dst, int32_t imm32);
+ void addl(Register dst, Address src);
+ void addl(Register dst, Register src);
+
+ void addq(Address dst, int32_t imm32);
+ void addq(Address dst, Register src);
+ void addq(Register dst, int32_t imm32);
+ void addq(Register dst, Address src);
+ void addq(Register dst, Register src);
- void cmpxchg (Register reg, Address adr);
- void cmpxchg8 (Address adr);
- void nop(int i = 1);
void addr_nop_4();
void addr_nop_5();
void addr_nop_7();
void addr_nop_8();
- void hlt();
- void ret(int imm16);
- void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0
- void smovl();
- void rep_movl();
- void rep_set();
- void repne_scan();
- void setb(Condition cc, Register dst);
- void membar(); // Serializing memory-fence
- void cpuid();
- void cld();
- void std();
+ // Add Scalar Double-Precision Floating-Point Values
+ void addsd(XMMRegister dst, Address src);
+ void addsd(XMMRegister dst, XMMRegister src);
- void emit_raw (unsigned char);
+ // Add Scalar Single-Precision Floating-Point Values
+ void addss(XMMRegister dst, Address src);
+ void addss(XMMRegister dst, XMMRegister src);
+
+ void andl(Register dst, int32_t imm32);
+ void andl(Register dst, Address src);
+ void andl(Register dst, Register src);
+
+ void andq(Register dst, int32_t imm32);
+ void andq(Register dst, Address src);
+ void andq(Register dst, Register src);
+
+
+ // Bitwise Logical AND of Packed Double-Precision Floating-Point Values
+ void andpd(XMMRegister dst, Address src);
+ void andpd(XMMRegister dst, XMMRegister src);
+
+ void bswapl(Register reg);
+
+ void bswapq(Register reg);
- // Calls
void call(Label& L, relocInfo::relocType rtype);
void call(Register reg); // push pc; pc <- reg
void call(Address adr); // push pc; pc <- adr
- // Jumps
- void jmp(Address entry); // pc <- entry
- void jmp(Register entry); // pc <- entry
+ void cdql();
- // Label operations & relative jumps (PPUM Appendix D)
- void jmp(Label& L, relocInfo::relocType rtype = relocInfo::none); // unconditional jump to L
+ void cdqq();
- // Force an 8-bit jump offset
- // void jmpb(address entry);
+ void cld() { emit_byte(0xfc); }
+
+ void clflush(Address adr);
+
+ void cmovl(Condition cc, Register dst, Register src);
+ void cmovl(Condition cc, Register dst, Address src);
+
+ void cmovq(Condition cc, Register dst, Register src);
+ void cmovq(Condition cc, Register dst, Address src);
+
+
+ void cmpb(Address dst, int imm8);
+
+ void cmpl(Address dst, int32_t imm32);
+
+ void cmpl(Register dst, int32_t imm32);
+ void cmpl(Register dst, Register src);
+ void cmpl(Register dst, Address src);
+
+ void cmpq(Address dst, int32_t imm32);
+ void cmpq(Address dst, Register src);
+
+ void cmpq(Register dst, int32_t imm32);
+ void cmpq(Register dst, Register src);
+ void cmpq(Register dst, Address src);
+
+ // these are dummies used to catch attempting to convert NULL to Register
+ void cmpl(Register dst, void* junk); // dummy
+ void cmpq(Register dst, void* junk); // dummy
+
+ void cmpw(Address dst, int imm16);
+
+ void cmpxchg8 (Address adr);
+
+ void cmpxchgl(Register reg, Address adr);
+
+ void cmpxchgq(Register reg, Address adr);
+
+ // Ordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
+ void comisd(XMMRegister dst, Address src);
+
+ // Ordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
+ void comiss(XMMRegister dst, Address src);
+
+ // Identify processor type and features
+ void cpuid() {
+ emit_byte(0x0F);
+ emit_byte(0xA2);
+ }
+
+ // Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value
+ void cvtsd2ss(XMMRegister dst, XMMRegister src);
+
+ // Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value
+ void cvtsi2sdl(XMMRegister dst, Register src);
+ void cvtsi2sdq(XMMRegister dst, Register src);
+
+ // Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value
+ void cvtsi2ssl(XMMRegister dst, Register src);
+ void cvtsi2ssq(XMMRegister dst, Register src);
+
+ // Convert Packed Signed Doubleword Integers to Packed Double-Precision Floating-Point Value
+ void cvtdq2pd(XMMRegister dst, XMMRegister src);
+
+ // Convert Packed Signed Doubleword Integers to Packed Single-Precision Floating-Point Value
+ void cvtdq2ps(XMMRegister dst, XMMRegister src);
+
+ // Convert Scalar Single-Precision Floating-Point Value to Scalar Double-Precision Floating-Point Value
+ void cvtss2sd(XMMRegister dst, XMMRegister src);
+
+ // Convert with Truncation Scalar Double-Precision Floating-Point Value to Doubleword Integer
+ void cvttsd2sil(Register dst, Address src);
+ void cvttsd2sil(Register dst, XMMRegister src);
+ void cvttsd2siq(Register dst, XMMRegister src);
+
+ // Convert with Truncation Scalar Single-Precision Floating-Point Value to Doubleword Integer
+ void cvttss2sil(Register dst, XMMRegister src);
+ void cvttss2siq(Register dst, XMMRegister src);
+
+ // Divide Scalar Double-Precision Floating-Point Values
+ void divsd(XMMRegister dst, Address src);
+ void divsd(XMMRegister dst, XMMRegister src);
+
+ // Divide Scalar Single-Precision Floating-Point Values
+ void divss(XMMRegister dst, Address src);
+ void divss(XMMRegister dst, XMMRegister src);
+
+ void emms();
+
+ void fabs();
+
+ void fadd(int i);
+
+ void fadd_d(Address src);
+ void fadd_s(Address src);
+
+ // "Alternate" versions of x87 instructions place result down in FPU
+ // stack instead of on TOS
+
+ void fadda(int i); // "alternate" fadd
+ void faddp(int i = 1);
+
+ void fchs();
+
+ void fcom(int i);
+
+ void fcomp(int i = 1);
+ void fcomp_d(Address src);
+ void fcomp_s(Address src);
+
+ void fcompp();
+
+ void fcos();
+
+ void fdecstp();
+
+ void fdiv(int i);
+ void fdiv_d(Address src);
+ void fdivr_s(Address src);
+ void fdiva(int i); // "alternate" fdiv
+ void fdivp(int i = 1);
+
+ void fdivr(int i);
+ void fdivr_d(Address src);
+ void fdiv_s(Address src);
+
+ void fdivra(int i); // "alternate" reversed fdiv
+
+ void fdivrp(int i = 1);
+
+ void ffree(int i = 0);
+
+ void fild_d(Address adr);
+ void fild_s(Address adr);
+
+ void fincstp();
+
+ void finit();
+
+ void fist_s (Address adr);
+ void fistp_d(Address adr);
+ void fistp_s(Address adr);
+
+ void fld1();
+
+ void fld_d(Address adr);
+ void fld_s(Address adr);
+ void fld_s(int index);
+ void fld_x(Address adr); // extended-precision (80-bit) format
+
+ void fldcw(Address src);
+
+ void fldenv(Address src);
+
+ void fldlg2();
+
+ void fldln2();
+
+ void fldz();
+
+ void flog();
+ void flog10();
+
+ void fmul(int i);
+
+ void fmul_d(Address src);
+ void fmul_s(Address src);
+
+ void fmula(int i); // "alternate" fmul
+
+ void fmulp(int i = 1);
+
+ void fnsave(Address dst);
+
+ void fnstcw(Address src);
+
+ void fnstsw_ax();
+
+ void fprem();
+ void fprem1();
+
+ void frstor(Address src);
+
+ void fsin();
+
+ void fsqrt();
+
+ void fst_d(Address adr);
+ void fst_s(Address adr);
+
+ void fstp_d(Address adr);
+ void fstp_d(int index);
+ void fstp_s(Address adr);
+ void fstp_x(Address adr); // extended-precision (80-bit) format
+
+ void fsub(int i);
+ void fsub_d(Address src);
+ void fsub_s(Address src);
+
+ void fsuba(int i); // "alternate" fsub
+
+ void fsubp(int i = 1);
+
+ void fsubr(int i);
+ void fsubr_d(Address src);
+ void fsubr_s(Address src);
+
+ void fsubra(int i); // "alternate" reversed fsub
+
+ void fsubrp(int i = 1);
+
+ void ftan();
+
+ void ftst();
+
+ void fucomi(int i = 1);
+ void fucomip(int i = 1);
+
+ void fwait();
+
+ void fxch(int i = 1);
+
+ void fxrstor(Address src);
+
+ void fxsave(Address dst);
+
+ void fyl2x();
+
+ void hlt();
+
+ void idivl(Register src);
+
+ void idivq(Register src);
+
+ void imull(Register dst, Register src);
+ void imull(Register dst, Register src, int value);
+
+ void imulq(Register dst, Register src);
+ void imulq(Register dst, Register src, int value);
- // Unconditional 8-bit offset jump to L.
- // WARNING: be very careful using this for forward jumps. If the label is
- // not bound within an 8-bit offset of this instruction, a run-time error
- // will occur.
- void jmpb(Label& L);
// jcc is the generic conditional branch generator to run-
// time routines, jcc is used for branches to labels. jcc
@@ -737,250 +986,321 @@ class Assembler : public AbstractAssembler {
// will occur.
void jccb(Condition cc, Label& L);
- // Floating-point operations
- void fld1();
- void fldz();
+ void jmp(Address entry); // pc <- entry
- void fld_s(Address adr);
- void fld_s(int index);
- void fld_d(Address adr);
- void fld_x(Address adr); // extended-precision (80-bit) format
+ // Label operations & relative jumps (PPUM Appendix D)
+ void jmp(Label& L, relocInfo::relocType rtype = relocInfo::none); // unconditional jump to L
- void fst_s(Address adr);
- void fst_d(Address adr);
+ void jmp(Register entry); // pc <- entry
- void fstp_s(Address adr);
- void fstp_d(Address adr);
- void fstp_d(int index);
- void fstp_x(Address adr); // extended-precision (80-bit) format
+ // Unconditional 8-bit offset jump to L.
+ // WARNING: be very careful using this for forward jumps. If the label is
+ // not bound within an 8-bit offset of this instruction, a run-time error
+ // will occur.
+ void jmpb(Label& L);
- void fild_s(Address adr);
- void fild_d(Address adr);
+ void ldmxcsr( Address src );
- void fist_s (Address adr);
- void fistp_s(Address adr);
- void fistp_d(Address adr);
+ void leal(Register dst, Address src);
- void fabs();
- void fchs();
+ void leaq(Register dst, Address src);
- void flog();
- void flog10();
+ void lfence() {
+ emit_byte(0x0F);
+ emit_byte(0xAE);
+ emit_byte(0xE8);
+ }
- void fldln2();
- void fyl2x();
- void fldlg2();
+ void lock();
- void fcos();
- void fsin();
- void ftan();
- void fsqrt();
+ enum Membar_mask_bits {
+ StoreStore = 1 << 3,
+ LoadStore = 1 << 2,
+ StoreLoad = 1 << 1,
+ LoadLoad = 1 << 0
+ };
- // "Alternate" versions of instructions place result down in FPU
- // stack instead of on TOS
- void fadd_s(Address src);
- void fadd_d(Address src);
- void fadd(int i);
- void fadda(int i); // "alternate" fadd
+ // Serializes memory.
+ void membar(Membar_mask_bits order_constraint) {
+ // We only have to handle StoreLoad and LoadLoad
+ if (order_constraint & StoreLoad) {
+ // MFENCE subsumes LFENCE
+ mfence();
+ } /* [jk] not needed currently: else if (order_constraint & LoadLoad) {
+ lfence();
+ } */
+ }
- void fsub_s(Address src);
- void fsub_d(Address src);
- void fsubr_s(Address src);
- void fsubr_d(Address src);
+ void mfence();
- void fmul_s(Address src);
- void fmul_d(Address src);
- void fmul(int i);
- void fmula(int i); // "alternate" fmul
+ // Moves
- void fdiv_s(Address src);
- void fdiv_d(Address src);
- void fdivr_s(Address src);
- void fdivr_d(Address src);
+ void mov64(Register dst, int64_t imm64);
- void fsub(int i);
- void fsuba(int i); // "alternate" fsub
- void fsubr(int i);
- void fsubra(int i); // "alternate" reversed fsub
- void fdiv(int i);
- void fdiva(int i); // "alternate" fdiv
- void fdivr(int i);
- void fdivra(int i); // "alternate" reversed fdiv
+ void movb(Address dst, Register src);
+ void movb(Address dst, int imm8);
+ void movb(Register dst, Address src);
- void faddp(int i = 1);
- void fsubp(int i = 1);
- void fsubrp(int i = 1);
- void fmulp(int i = 1);
- void fdivp(int i = 1);
- void fdivrp(int i = 1);
- void fprem();
- void fprem1();
+ void movdl(XMMRegister dst, Register src);
+ void movdl(Register dst, XMMRegister src);
- void fxch(int i = 1);
- void fincstp();
- void fdecstp();
- void ffree(int i = 0);
+ // Move Double Quadword
+ void movdq(XMMRegister dst, Register src);
+ void movdq(Register dst, XMMRegister src);
- void fcomp_s(Address src);
- void fcomp_d(Address src);
- void fcom(int i);
- void fcomp(int i = 1);
- void fcompp();
+ // Move Aligned Double Quadword
+ void movdqa(Address dst, XMMRegister src);
+ void movdqa(XMMRegister dst, Address src);
+ void movdqa(XMMRegister dst, XMMRegister src);
- void fucomi(int i = 1);
- void fucomip(int i = 1);
+ void movl(Register dst, int32_t imm32);
+ void movl(Address dst, int32_t imm32);
+ void movl(Register dst, Register src);
+ void movl(Register dst, Address src);
+ void movl(Address dst, Register src);
- void ftst();
- void fnstsw_ax();
- void fwait();
- void finit();
- void fldcw(Address src);
- void fnstcw(Address src);
+ // These dummies prevent using movl from converting a zero (like NULL) into Register
+ // by giving the compiler two choices it can't resolve
- void fnsave(Address dst);
- void frstor(Address src);
- void fldenv(Address src);
+ void movl(Address dst, void* junk);
+ void movl(Register dst, void* junk);
- void sahf();
+#ifdef _LP64
+ void movq(Register dst, Register src);
+ void movq(Register dst, Address src);
+ void movq(Address dst, Register src);
+#endif
- protected:
- void emit_sse_operand(XMMRegister reg, Address adr);
- void emit_sse_operand(Register reg, Address adr);
- void emit_sse_operand(XMMRegister dst, XMMRegister src);
- void emit_sse_operand(XMMRegister dst, Register src);
- void emit_sse_operand(Register dst, XMMRegister src);
+ void movq(Address dst, MMXRegister src );
+ void movq(MMXRegister dst, Address src );
- void emit_operand(MMXRegister reg, Address adr);
+#ifdef _LP64
+ // These dummies prevent using movq from converting a zero (like NULL) into Register
+ // by giving the compiler two choices it can't resolve
- public:
- // mmx operations
- void movq( MMXRegister dst, Address src );
- void movq( Address dst, MMXRegister src );
- void emms();
+ void movq(Address dst, void* dummy);
+ void movq(Register dst, void* dummy);
+#endif
- // xmm operations
- void addss(XMMRegister dst, Address src); // Add Scalar Single-Precision Floating-Point Values
- void addss(XMMRegister dst, XMMRegister src);
- void addsd(XMMRegister dst, Address src); // Add Scalar Double-Precision Floating-Point Values
- void addsd(XMMRegister dst, XMMRegister src);
+ // Move Quadword
+ void movq(Address dst, XMMRegister src);
+ void movq(XMMRegister dst, Address src);
- void subss(XMMRegister dst, Address src); // Subtract Scalar Single-Precision Floating-Point Values
- void subss(XMMRegister dst, XMMRegister src);
- void subsd(XMMRegister dst, Address src); // Subtract Scalar Double-Precision Floating-Point Values
- void subsd(XMMRegister dst, XMMRegister src);
+ void movsbl(Register dst, Address src);
+ void movsbl(Register dst, Register src);
- void mulss(XMMRegister dst, Address src); // Multiply Scalar Single-Precision Floating-Point Values
- void mulss(XMMRegister dst, XMMRegister src);
- void mulsd(XMMRegister dst, Address src); // Multiply Scalar Double-Precision Floating-Point Values
- void mulsd(XMMRegister dst, XMMRegister src);
+#ifdef _LP64
+ // Move signed 32bit immediate to 64bit extending sign
+ void movslq(Address dst, int32_t imm64);
+ void movslq(Register dst, int32_t imm64);
- void divss(XMMRegister dst, Address src); // Divide Scalar Single-Precision Floating-Point Values
- void divss(XMMRegister dst, XMMRegister src);
- void divsd(XMMRegister dst, Address src); // Divide Scalar Double-Precision Floating-Point Values
- void divsd(XMMRegister dst, XMMRegister src);
+ void movslq(Register dst, Address src);
+ void movslq(Register dst, Register src);
+ void movslq(Register dst, void* src); // Dummy declaration to cause NULL to be ambiguous
+#endif
- void sqrtss(XMMRegister dst, Address src); // Compute Square Root of Scalar Single-Precision Floating-Point Value
- void sqrtss(XMMRegister dst, XMMRegister src);
- void sqrtsd(XMMRegister dst, Address src); // Compute Square Root of Scalar Double-Precision Floating-Point Value
- void sqrtsd(XMMRegister dst, XMMRegister src);
+ void movswl(Register dst, Address src);
+ void movswl(Register dst, Register src);
- void pxor(XMMRegister dst, Address src); // Xor Packed Byte Integer Values
- void pxor(XMMRegister dst, XMMRegister src); // Xor Packed Byte Integer Values
+ void movw(Address dst, int imm16);
+ void movw(Register dst, Address src);
+ void movw(Address dst, Register src);
- void comiss(XMMRegister dst, Address src); // Ordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
- void comiss(XMMRegister dst, XMMRegister src);
- void comisd(XMMRegister dst, Address src); // Ordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
- void comisd(XMMRegister dst, XMMRegister src);
+ void movzbl(Register dst, Address src);
+ void movzbl(Register dst, Register src);
- void ucomiss(XMMRegister dst, Address src); // Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
- void ucomiss(XMMRegister dst, XMMRegister src);
- void ucomisd(XMMRegister dst, Address src); // Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
- void ucomisd(XMMRegister dst, XMMRegister src);
+ void movzwl(Register dst, Address src);
+ void movzwl(Register dst, Register src);
- void cvtss2sd(XMMRegister dst, Address src); // Convert Scalar Single-Precision Floating-Point Value to Scalar Double-Precision Floating-Point Value
- void cvtss2sd(XMMRegister dst, XMMRegister src);
- void cvtsd2ss(XMMRegister dst, Address src); // Convert Scalar Double-Precision Floating-Point Value to Scalar Single-Precision Floating-Point Value
- void cvtsd2ss(XMMRegister dst, XMMRegister src);
- void cvtdq2pd(XMMRegister dst, XMMRegister src);
- void cvtdq2ps(XMMRegister dst, XMMRegister src);
+ void mull(Address src);
+ void mull(Register src);
- void cvtsi2ss(XMMRegister dst, Address src); // Convert Doubleword Integer to Scalar Single-Precision Floating-Point Value
- void cvtsi2ss(XMMRegister dst, Register src);
- void cvtsi2sd(XMMRegister dst, Address src); // Convert Doubleword Integer to Scalar Double-Precision Floating-Point Value
- void cvtsi2sd(XMMRegister dst, Register src);
+ // Multiply Scalar Double-Precision Floating-Point Values
+ void mulsd(XMMRegister dst, Address src);
+ void mulsd(XMMRegister dst, XMMRegister src);
- void cvtss2si(Register dst, Address src); // Convert Scalar Single-Precision Floating-Point Value to Doubleword Integer
- void cvtss2si(Register dst, XMMRegister src);
- void cvtsd2si(Register dst, Address src); // Convert Scalar Double-Precision Floating-Point Value to Doubleword Integer
- void cvtsd2si(Register dst, XMMRegister src);
+ // Multiply Scalar Single-Precision Floating-Point Values
+ void mulss(XMMRegister dst, Address src);
+ void mulss(XMMRegister dst, XMMRegister src);
- void cvttss2si(Register dst, Address src); // Convert with Truncation Scalar Single-Precision Floating-Point Value to Doubleword Integer
- void cvttss2si(Register dst, XMMRegister src);
- void cvttsd2si(Register dst, Address src); // Convert with Truncation Scalar Double-Precision Floating-Point Value to Doubleword Integer
- void cvttsd2si(Register dst, XMMRegister src);
+ void negl(Register dst);
- protected: // Avoid using the next instructions directly.
- // New cpus require use of movsd and movss to avoid partial register stall
- // when loading from memory. But for old Opteron use movlpd instead of movsd.
- // The selection is done in MacroAssembler::movdbl() and movflt().
- void movss(XMMRegister dst, Address src); // Move Scalar Single-Precision Floating-Point Values
- void movss(XMMRegister dst, XMMRegister src);
- void movss(Address dst, XMMRegister src);
- void movsd(XMMRegister dst, Address src); // Move Scalar Double-Precision Floating-Point Values
- void movsd(XMMRegister dst, XMMRegister src);
- void movsd(Address dst, XMMRegister src);
- void movlpd(XMMRegister dst, Address src);
- // New cpus require use of movaps and movapd to avoid partial register stall
- // when moving between registers.
- void movaps(XMMRegister dst, XMMRegister src);
- void movapd(XMMRegister dst, XMMRegister src);
- public:
+#ifdef _LP64
+ void negq(Register dst);
+#endif
- void andps(XMMRegister dst, Address src); // Bitwise Logical AND of Packed Single-Precision Floating-Point Values
- void andps(XMMRegister dst, XMMRegister src);
- void andpd(XMMRegister dst, Address src); // Bitwise Logical AND of Packed Double-Precision Floating-Point Values
- void andpd(XMMRegister dst, XMMRegister src);
+ void nop(int i = 1);
- void andnps(XMMRegister dst, Address src); // Bitwise Logical AND NOT of Packed Single-Precision Floating-Point Values
- void andnps(XMMRegister dst, XMMRegister src);
- void andnpd(XMMRegister dst, Address src); // Bitwise Logical AND NOT of Packed Double-Precision Floating-Point Values
- void andnpd(XMMRegister dst, XMMRegister src);
+ void notl(Register dst);
- void orps(XMMRegister dst, Address src); // Bitwise Logical OR of Packed Single-Precision Floating-Point Values
- void orps(XMMRegister dst, XMMRegister src);
- void orpd(XMMRegister dst, Address src); // Bitwise Logical OR of Packed Double-Precision Floating-Point Values
- void orpd(XMMRegister dst, XMMRegister src);
+#ifdef _LP64
+ void notq(Register dst);
+#endif
- void xorps(XMMRegister dst, Address src); // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
- void xorps(XMMRegister dst, XMMRegister src);
- void xorpd(XMMRegister dst, Address src); // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
- void xorpd(XMMRegister dst, XMMRegister src);
+ void orl(Address dst, int32_t imm32);
+ void orl(Register dst, int32_t imm32);
+ void orl(Register dst, Address src);
+ void orl(Register dst, Register src);
- void movq(XMMRegister dst, Address src); // Move Quadword
- void movq(XMMRegister dst, XMMRegister src);
- void movq(Address dst, XMMRegister src);
+ void orq(Address dst, int32_t imm32);
+ void orq(Register dst, int32_t imm32);
+ void orq(Register dst, Address src);
+ void orq(Register dst, Register src);
- void movd(XMMRegister dst, Address src); // Move Doubleword
- void movd(XMMRegister dst, Register src);
- void movd(Register dst, XMMRegister src);
- void movd(Address dst, XMMRegister src);
+ void popl(Address dst);
- void movdqa(XMMRegister dst, Address src); // Move Aligned Double Quadword
- void movdqa(XMMRegister dst, XMMRegister src);
- void movdqa(Address dst, XMMRegister src);
+#ifdef _LP64
+ void popq(Address dst);
+#endif
- void pshufd(XMMRegister dst, XMMRegister src, int mode); // Shuffle Packed Doublewords
+ // Prefetches (SSE, SSE2, 3DNOW only)
+
+ void prefetchnta(Address src);
+ void prefetchr(Address src);
+ void prefetcht0(Address src);
+ void prefetcht1(Address src);
+ void prefetcht2(Address src);
+ void prefetchw(Address src);
+
+ // Shuffle Packed Doublewords
+ void pshufd(XMMRegister dst, XMMRegister src, int mode);
void pshufd(XMMRegister dst, Address src, int mode);
- void pshuflw(XMMRegister dst, XMMRegister src, int mode); // Shuffle Packed Low Words
+
+ // Shuffle Packed Low Words
+ void pshuflw(XMMRegister dst, XMMRegister src, int mode);
void pshuflw(XMMRegister dst, Address src, int mode);
- void psrlq(XMMRegister dst, int shift); // Shift Right Logical Quadword Immediate
+ // Shift Right Logical Quadword Immediate
+ void psrlq(XMMRegister dst, int shift);
- void punpcklbw(XMMRegister dst, XMMRegister src); // Interleave Low Bytes
- void punpcklbw(XMMRegister dst, Address src);
+ // Interleave Low Bytes
+ void punpcklbw(XMMRegister dst, XMMRegister src);
+
+ void pushl(Address src);
+
+ void pushq(Address src);
+
+ // Xor Packed Byte Integer Values
+ void pxor(XMMRegister dst, Address src);
+ void pxor(XMMRegister dst, XMMRegister src);
+
+ void rcll(Register dst, int imm8);
+
+ void rclq(Register dst, int imm8);
+
+ void ret(int imm16);
+
+ void sahf();
+
+ void sarl(Register dst, int imm8);
+ void sarl(Register dst);
+
+ void sarq(Register dst, int imm8);
+ void sarq(Register dst);
+
+ void sbbl(Address dst, int32_t imm32);
+ void sbbl(Register dst, int32_t imm32);
+ void sbbl(Register dst, Address src);
+ void sbbl(Register dst, Register src);
+
+ void sbbq(Address dst, int32_t imm32);
+ void sbbq(Register dst, int32_t imm32);
+ void sbbq(Register dst, Address src);
+ void sbbq(Register dst, Register src);
+
+ void setb(Condition cc, Register dst);
+
+ void shldl(Register dst, Register src);
+
+ void shll(Register dst, int imm8);
+ void shll(Register dst);
+
+ void shlq(Register dst, int imm8);
+ void shlq(Register dst);
+
+ void shrdl(Register dst, Register src);
+
+ void shrl(Register dst, int imm8);
+ void shrl(Register dst);
+
+ void shrq(Register dst, int imm8);
+ void shrq(Register dst);
+
+ void smovl(); // QQQ generic?
+
+ // Compute Square Root of Scalar Double-Precision Floating-Point Value
+ void sqrtsd(XMMRegister dst, Address src);
+ void sqrtsd(XMMRegister dst, XMMRegister src);
+
+ void std() { emit_byte(0xfd); }
- void ldmxcsr( Address src );
void stmxcsr( Address dst );
+
+ void subl(Address dst, int32_t imm32);
+ void subl(Address dst, Register src);
+ void subl(Register dst, int32_t imm32);
+ void subl(Register dst, Address src);
+ void subl(Register dst, Register src);
+
+ void subq(Address dst, int32_t imm32);
+ void subq(Address dst, Register src);
+ void subq(Register dst, int32_t imm32);
+ void subq(Register dst, Address src);
+ void subq(Register dst, Register src);
+
+
+ // Subtract Scalar Double-Precision Floating-Point Values
+ void subsd(XMMRegister dst, Address src);
+ void subsd(XMMRegister dst, XMMRegister src);
+
+ // Subtract Scalar Single-Precision Floating-Point Values
+ void subss(XMMRegister dst, Address src);
+ void subss(XMMRegister dst, XMMRegister src);
+
+ void testb(Register dst, int imm8);
+
+ void testl(Register dst, int32_t imm32);
+ void testl(Register dst, Register src);
+ void testl(Register dst, Address src);
+
+ void testq(Register dst, int32_t imm32);
+ void testq(Register dst, Register src);
+
+
+ // Unordered Compare Scalar Double-Precision Floating-Point Values and set EFLAGS
+ void ucomisd(XMMRegister dst, Address src);
+ void ucomisd(XMMRegister dst, XMMRegister src);
+
+ // Unordered Compare Scalar Single-Precision Floating-Point Values and set EFLAGS
+ void ucomiss(XMMRegister dst, Address src);
+ void ucomiss(XMMRegister dst, XMMRegister src);
+
+ void xaddl(Address dst, Register src);
+
+ void xaddq(Address dst, Register src);
+
+ void xchgl(Register reg, Address adr);
+ void xchgl(Register dst, Register src);
+
+ void xchgq(Register reg, Address adr);
+ void xchgq(Register dst, Register src);
+
+ void xorl(Register dst, int32_t imm32);
+ void xorl(Register dst, Address src);
+ void xorl(Register dst, Register src);
+
+ void xorq(Register dst, Address src);
+ void xorq(Register dst, Register src);
+
+ // Bitwise Logical XOR of Packed Double-Precision Floating-Point Values
+ void xorpd(XMMRegister dst, Address src);
+ void xorpd(XMMRegister dst, XMMRegister src);
+
+ // Bitwise Logical XOR of Packed Single-Precision Floating-Point Values
+ void xorps(XMMRegister dst, Address src);
+ void xorps(XMMRegister dst, XMMRegister src);
+
+ void set_byte_if_not_zero(Register dst); // sets reg to 1 if not zero, otherwise 0
};
@@ -1077,10 +1397,22 @@ class MacroAssembler: public Assembler {
void extend_sign(Register hi, Register lo);
// Support for inc/dec with optimal instruction selection depending on value
- void increment(Register reg, int value = 1);
- void decrement(Register reg, int value = 1);
- void increment(Address dst, int value = 1);
- void decrement(Address dst, int value = 1);
+
+ void increment(Register reg, int value = 1) { LP64_ONLY(incrementq(reg, value)) NOT_LP64(incrementl(reg, value)) ; }
+ void decrement(Register reg, int value = 1) { LP64_ONLY(decrementq(reg, value)) NOT_LP64(decrementl(reg, value)) ; }
+
+ void decrementl(Address dst, int value = 1);
+ void decrementl(Register reg, int value = 1);
+
+ void decrementq(Register reg, int value = 1);
+ void decrementq(Address dst, int value = 1);
+
+ void incrementl(Address dst, int value = 1);
+ void incrementl(Register reg, int value = 1);
+
+ void incrementq(Register reg, int value = 1);
+ void incrementq(Address dst, int value = 1);
+
// Support optimal SSE move instructions.
void movflt(XMMRegister dst, XMMRegister src) {
@@ -1104,9 +1436,8 @@ class MacroAssembler: public Assembler {
}
void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
- void increment(AddressLiteral dst);
- void increment(ArrayAddress dst);
-
+ void incrementl(AddressLiteral dst);
+ void incrementl(ArrayAddress dst);
// Alignment
void align(int modulus);
@@ -1128,25 +1459,70 @@ class MacroAssembler: public Assembler {
// They make sure that the stack linkage is setup correctly. call_VM's correspond
// to ENTRY/ENTRY_X entry points while call_VM_leaf's correspond to LEAF entry points.
- void call_VM(Register oop_result, address entry_point, bool check_exceptions = true);
- void call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions = true);
- void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
- void call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
-
- void call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments = 0, bool check_exceptions = true);
- void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions = true);
- void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions = true);
- void call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions = true);
- void call_VM_leaf(address entry_point, int number_of_arguments = 0);
- void call_VM_leaf(address entry_point, Register arg_1);
- void call_VM_leaf(address entry_point, Register arg_1, Register arg_2);
- void call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3);
+ void call_VM(Register oop_result,
+ address entry_point,
+ bool check_exceptions = true);
+ void call_VM(Register oop_result,
+ address entry_point,
+ Register arg_1,
+ bool check_exceptions = true);
+ void call_VM(Register oop_result,
+ address entry_point,
+ Register arg_1, Register arg_2,
+ bool check_exceptions = true);
+ void call_VM(Register oop_result,
+ address entry_point,
+ Register arg_1, Register arg_2, Register arg_3,
+ bool check_exceptions = true);
+
+ // Overloadings with last_Java_sp
+ void call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ int number_of_arguments = 0,
+ bool check_exceptions = true);
+ void call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ Register arg_1, bool
+ check_exceptions = true);
+ void call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ Register arg_1, Register arg_2,
+ bool check_exceptions = true);
+ void call_VM(Register oop_result,
+ Register last_java_sp,
+ address entry_point,
+ Register arg_1, Register arg_2, Register arg_3,
+ bool check_exceptions = true);
+
+ void call_VM_leaf(address entry_point,
+ int number_of_arguments = 0);
+ void call_VM_leaf(address entry_point,
+ Register arg_1);
+ void call_VM_leaf(address entry_point,
+ Register arg_1, Register arg_2);
+ void call_VM_leaf(address entry_point,
+ Register arg_1, Register arg_2, Register arg_3);
// last Java Frame (fills frame anchor)
- void set_last_Java_frame(Register thread, Register last_java_sp, Register last_java_fp, address last_java_pc);
+ void set_last_Java_frame(Register thread,
+ Register last_java_sp,
+ Register last_java_fp,
+ address last_java_pc);
+
+ // thread in the default location (r15_thread on 64bit)
+ void set_last_Java_frame(Register last_java_sp,
+ Register last_java_fp,
+ address last_java_pc);
+
void reset_last_Java_frame(Register thread, bool clear_fp, bool clear_pc);
+ // thread in the default location (r15_thread on 64bit)
+ void reset_last_Java_frame(bool clear_fp, bool clear_pc);
+
// Stores
void store_check(Register obj); // store check for obj - register is destroyed afterwards
void store_check(Register obj, Address dst); // same as above, dst is exact store location (reg. is destroyed)
@@ -1165,18 +1541,48 @@ class MacroAssembler: public Assembler {
void movbool(Address dst, Register src);
void testbool(Register dst);
- // Int division/reminder for Java
+ // oop manipulations
+ void load_klass(Register dst, Register src);
+ void store_klass(Register dst, Register src);
+
+ void load_prototype_header(Register dst, Register src);
+
+#ifdef _LP64
+ void store_klass_gap(Register dst, Register src);
+
+ void load_heap_oop(Register dst, Address src);
+ void store_heap_oop(Address dst, Register src);
+ void encode_heap_oop(Register r);
+ void decode_heap_oop(Register r);
+ void encode_heap_oop_not_null(Register r);
+ void decode_heap_oop_not_null(Register r);
+ void encode_heap_oop_not_null(Register dst, Register src);
+ void decode_heap_oop_not_null(Register dst, Register src);
+
+ void set_narrow_oop(Register dst, jobject obj);
+
+ // if heap base register is used - reinit it with the correct value
+ void reinit_heapbase();
+#endif // _LP64
+
+ // Int division/remainder for Java
// (as idivl, but checks for special case as described in JVM spec.)
// returns idivl instruction offset for implicit exception handling
int corrected_idivl(Register reg);
+ // Long division/remainder for Java
+ // (as idivq, but checks for special case as described in JVM spec.)
+ // returns idivq instruction offset for implicit exception handling
+ int corrected_idivq(Register reg);
+
void int3();
+ // Long operation macros for a 32bit cpu
// Long negation for Java
void lneg(Register hi, Register lo);
// Long multiplication for Java
- // (destroys contents of rax, rbx, rcx and rdx)
+ // (destroys contents of eax, ebx, ecx and edx)
void lmul(int x_rsp_offset, int y_rsp_offset); // rdx:rax = x * y
// Long shifts for Java
@@ -1188,6 +1594,16 @@ class MacroAssembler: public Assembler {
// (semantics as described in JVM spec.)
void lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo); // x_hi = lcmp(x, y)
+
+ // misc
+
+ // Sign extension
+ void sign_extend_short(Register reg);
+ void sign_extend_byte(Register reg);
+
+ // Division by power of 2, rounding towards 0
+ void division_with_shift(Register reg, int shift_value);
+
// Compares the top-most stack entries on the FPU stack and sets the eflags as follows:
//
// CF (corresponds to C0) if x < y
@@ -1255,13 +1671,6 @@ class MacroAssembler: public Assembler {
void push_CPU_state();
void pop_CPU_state();
- // Sign extension
- void sign_extend_short(Register reg);
- void sign_extend_byte(Register reg);
-
- // Division by power of 2, rounding towards 0
- void division_with_shift(Register reg, int shift_value);
-
// Round up to a power of two
void round_to(Register reg, int modulus);
@@ -1291,17 +1700,31 @@ class MacroAssembler: public Assembler {
void set_word_if_not_zero(Register reg); // sets reg to 1 if not zero, otherwise 0
// Debugging
- void verify_oop(Register reg, const char* s = "broken oop"); // only if +VerifyOops
+
+ // only if +VerifyOops
+ void verify_oop(Register reg, const char* s = "broken oop");
void verify_oop_addr(Address addr, const char * s = "broken oop addr");
- void verify_FPU(int stack_depth, const char* s = "illegal FPU state"); // only if +VerifyFPU
- void stop(const char* msg); // prints msg, dumps registers and stops execution
- void warn(const char* msg); // prints msg and continues
- static void debug(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
+ // only if +VerifyFPU
+ void verify_FPU(int stack_depth, const char* s = "illegal FPU state");
+
+ // prints msg, dumps registers and stops execution
+ void stop(const char* msg);
+
+ // prints msg and continues
+ void warn(const char* msg);
+
+ static void debug32(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg);
+ static void debug64(char* msg, int64_t pc, int64_t regs[]);
+
void os_breakpoint();
+
void untested() { stop("untested"); }
+
void unimplemented(const char* what = "") { char* b = new char[1024]; jio_snprintf(b, sizeof(b), "unimplemented: %s", what); stop(b); }
+
void should_not_reach_here() { stop("should not reach here"); }
+
void print_CPU_state();
// Stack overflow checking
@@ -1348,9 +1771,20 @@ class MacroAssembler: public Assembler {
// Arithmetics
- void cmp8(AddressLiteral src1, int8_t imm);
- // QQQ renamed to drag out the casting of address to int32_t/intptr_t
+ void addptr(Address dst, int32_t src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)) ; }
+ void addptr(Address dst, Register src);
+
+ void addptr(Register dst, Address src) { LP64_ONLY(addq(dst, src)) NOT_LP64(addl(dst, src)); }
+ void addptr(Register dst, int32_t src);
+ void addptr(Register dst, Register src);
+
+ void andptr(Register dst, int32_t src);
+ void andptr(Register src1, Register src2) { LP64_ONLY(andq(src1, src2)) NOT_LP64(andl(src1, src2)) ; }
+
+ void cmp8(AddressLiteral src1, int imm);
+
+ // renamed to drag out the casting of address to int32_t/intptr_t
void cmp32(Register src1, int32_t imm);
void cmp32(AddressLiteral src1, int32_t imm);
@@ -1359,16 +1793,63 @@ class MacroAssembler: public Assembler {
void cmp32(Register src1, Address src2);
+#ifndef _LP64
+ void cmpoop(Address dst, jobject obj);
+ void cmpoop(Register dst, jobject obj);
+#endif // _LP64
+
// NOTE src2 must be the lval. This is NOT an mem-mem compare
void cmpptr(Address src1, AddressLiteral src2);
void cmpptr(Register src1, AddressLiteral src2);
- void cmpoop(Address dst, jobject obj);
- void cmpoop(Register dst, jobject obj);
+ void cmpptr(Register src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
+ void cmpptr(Register src1, Address src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
+ // void cmpptr(Address src1, Register src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
+
+ void cmpptr(Register src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
+ void cmpptr(Address src1, int32_t src2) { LP64_ONLY(cmpq(src1, src2)) NOT_LP64(cmpl(src1, src2)) ; }
+
+ // cmp64 to avoild hiding cmpq
+ void cmp64(Register src1, AddressLiteral src);
+
+ void cmpxchgptr(Register reg, Address adr);
+
+ void locked_cmpxchgptr(Register reg, AddressLiteral adr);
+
+
+ void imulptr(Register dst, Register src) { LP64_ONLY(imulq(dst, src)) NOT_LP64(imull(dst, src)); }
+
+
+ void negptr(Register dst) { LP64_ONLY(negq(dst)) NOT_LP64(negl(dst)); }
+
+ void notptr(Register dst) { LP64_ONLY(notq(dst)) NOT_LP64(notl(dst)); }
+
+ void shlptr(Register dst, int32_t shift);
+ void shlptr(Register dst) { LP64_ONLY(shlq(dst)) NOT_LP64(shll(dst)); }
+
+ void shrptr(Register dst, int32_t shift);
+ void shrptr(Register dst) { LP64_ONLY(shrq(dst)) NOT_LP64(shrl(dst)); }
+
+ void sarptr(Register dst) { LP64_ONLY(sarq(dst)) NOT_LP64(sarl(dst)); }
+ void sarptr(Register dst, int32_t src) { LP64_ONLY(sarq(dst, src)) NOT_LP64(sarl(dst, src)); }
+
+ void subptr(Address dst, int32_t src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
+
+ void subptr(Register dst, Address src) { LP64_ONLY(subq(dst, src)) NOT_LP64(subl(dst, src)); }
+ void subptr(Register dst, int32_t src);
+ void subptr(Register dst, Register src);
+
+
+ void sbbptr(Address dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
+ void sbbptr(Register dst, int32_t src) { LP64_ONLY(sbbq(dst, src)) NOT_LP64(sbbl(dst, src)); }
+
+ void xchgptr(Register src1, Register src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
+ void xchgptr(Register src1, Address src2) { LP64_ONLY(xchgq(src1, src2)) NOT_LP64(xchgl(src1, src2)) ; }
+
+ void xaddptr(Address src1, Register src2) { LP64_ONLY(xaddq(src1, src2)) NOT_LP64(xaddl(src1, src2)) ; }
- void cmpxchgptr(Register reg, AddressLiteral adr);
// Helper functions for statistics gathering.
// Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
@@ -1378,8 +1859,21 @@ class MacroAssembler: public Assembler {
void lea(Register dst, AddressLiteral adr);
void lea(Address dst, AddressLiteral adr);
+ void lea(Register dst, Address adr) { Assembler::lea(dst, adr); }
+
+ void leal32(Register dst, Address src) { leal(dst, src); }
+
+ void test32(Register src1, AddressLiteral src2);
- void test32(Register dst, AddressLiteral src);
+ void orptr(Register dst, Address src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
+ void orptr(Register dst, Register src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
+ void orptr(Register dst, int32_t src) { LP64_ONLY(orq(dst, src)) NOT_LP64(orl(dst, src)); }
+
+ void testptr(Register src, int32_t imm32) { LP64_ONLY(testq(src, imm32)) NOT_LP64(testl(src, imm32)); }
+ void testptr(Register src1, Register src2);
+
+ void xorptr(Register dst, Register src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
+ void xorptr(Register dst, Address src) { LP64_ONLY(xorq(dst, src)) NOT_LP64(xorl(dst, src)); }
// Calls
@@ -1431,11 +1925,19 @@ class MacroAssembler: public Assembler {
void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
void ldmxcsr(AddressLiteral src);
+private:
+ // these are private because users should be doing movflt/movdbl
+
void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); }
void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); }
void movss(XMMRegister dst, AddressLiteral src);
+ void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
+ void movlpd(XMMRegister dst, AddressLiteral src);
+
+public:
+
void movsd(XMMRegister dst, XMMRegister src) { Assembler::movsd(dst, src); }
void movsd(Address dst, XMMRegister src) { Assembler::movsd(dst, src); }
void movsd(XMMRegister dst, Address src) { Assembler::movsd(dst, src); }
@@ -1461,6 +1963,11 @@ class MacroAssembler: public Assembler {
// Data
+ void cmov(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmovl(cc, dst, src)); }
+
+ void cmovptr(Condition cc, Register dst, Address src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmovl(cc, dst, src)); }
+ void cmovptr(Condition cc, Register dst, Register src) { LP64_ONLY(cmovq(cc, dst, src)) NOT_LP64(cmovl(cc, dst, src)); }
+
void movoop(Register dst, jobject obj);
void movoop(Address dst, jobject obj);
@@ -1468,17 +1975,48 @@ class MacroAssembler: public Assembler {
// can this do an lea?
void movptr(Register dst, ArrayAddress src);
+ void movptr(Register dst, Address src);
+
void movptr(Register dst, AddressLiteral src);
+ void movptr(Register dst, intptr_t src);
+ void movptr(Register dst, Register src);
+ void movptr(Address dst, intptr_t src);
+
+ void movptr(Address dst, Register src);
+
+#ifdef _LP64
+ // Generally the next two are only used for moving NULL
+ // Although there are situations in initializing the mark word where
+ // they could be used. They are dangerous.
+
+ // They only exist on LP64 so that int32_t and intptr_t are not the same
+ // and we have ambiguous declarations.
+
+ void movptr(Address dst, int32_t imm32);
+ void movptr(Register dst, int32_t imm32);
+#endif // _LP64
+
// to avoid hiding movl
void mov32(AddressLiteral dst, Register src);
void mov32(Register dst, AddressLiteral src);
+
// to avoid hiding movb
void movbyte(ArrayAddress dst, int src);
// Can push value or effective address
void pushptr(AddressLiteral src);
+ void pushptr(Address src) { LP64_ONLY(pushq(src)) NOT_LP64(pushl(src)); }
+ void popptr(Address src) { LP64_ONLY(popq(src)) NOT_LP64(popl(src)); }
+
+ void pushoop(jobject obj);
+
+ // sign extend as need a l to ptr sized element
+ void movl2ptr(Register dst, Address src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(movl(dst, src)); }
+ void movl2ptr(Register dst, Register src) { LP64_ONLY(movslq(dst, src)) NOT_LP64(if (dst != src) movl(dst, src)); }
+
+
#undef VIRTUAL
};
diff --git a/src/cpu/x86/vm/assembler_x86_64.inline.hpp b/src/cpu/x86/vm/assembler_x86.inline.hpp
index 3a705ea3e..27c2b6add 100644
--- a/src/cpu/x86/vm/assembler_x86_64.inline.hpp
+++ b/src/cpu/x86/vm/assembler_x86.inline.hpp
@@ -1,5 +1,5 @@
/*
- * Copyright 2003-2005 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -22,12 +22,6 @@
*
*/
-inline void Assembler::emit_long64(jlong x) {
- *(jlong*) _code_pos = x;
- _code_pos += sizeof(jlong);
- code_section()->set_end(_code_pos);
-}
-
inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
unsigned char op = branch[0];
assert(op == 0xE8 /* call */ ||
@@ -69,18 +63,25 @@ inline void MacroAssembler::pd_print_patched_instruction(address branch) {
}
#endif // ndef PRODUCT
-inline void MacroAssembler::movptr(Address dst, intptr_t src) {
-#ifdef _LP64
- Assembler::mov64(dst, src);
-#else
- Assembler::movl(dst, src);
-#endif // _LP64
-}
+#ifndef _LP64
+inline int Assembler::prefix_and_encode(int reg_enc, bool byteinst) { return reg_enc; }
+inline int Assembler::prefixq_and_encode(int reg_enc) { return reg_enc; }
-inline void MacroAssembler::movptr(Register dst, intptr_t src) {
-#ifdef _LP64
- Assembler::mov64(dst, src);
+inline int Assembler::prefix_and_encode(int dst_enc, int src_enc, bool byteinst) { return dst_enc << 3 | src_enc; }
+inline int Assembler::prefixq_and_encode(int dst_enc, int src_enc) { return dst_enc << 3 | src_enc; }
+
+inline void Assembler::prefix(Register reg) {}
+inline void Assembler::prefix(Address adr) {}
+inline void Assembler::prefixq(Address adr) {}
+
+inline void Assembler::prefix(Address adr, Register reg, bool byteinst) {}
+inline void Assembler::prefixq(Address adr, Register reg) {}
+
+inline void Assembler::prefix(Address adr, XMMRegister reg) {}
#else
- Assembler::movl(dst, src);
-#endif // _LP64
+inline void Assembler::emit_long64(jlong x) {
+ *(jlong*) _code_pos = x;
+ _code_pos += sizeof(jlong);
+ code_section()->set_end(_code_pos);
}
+#endif // _LP64
diff --git a/src/cpu/x86/vm/assembler_x86_32.cpp b/src/cpu/x86/vm/assembler_x86_32.cpp
deleted file mode 100644
index 91c2e9ab1..000000000
--- a/src/cpu/x86/vm/assembler_x86_32.cpp
+++ /dev/null
@@ -1,5001 +0,0 @@
-/*
- * Copyright 1997-2008 Sun Microsystems, Inc. All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- */
-
-#include "incls/_precompiled.incl"
-#include "incls/_assembler_x86_32.cpp.incl"
-
-// Implementation of AddressLiteral
-
-AddressLiteral::AddressLiteral(address target, relocInfo::relocType rtype) {
- _is_lval = false;
- _target = target;
- switch (rtype) {
- case relocInfo::oop_type:
- // Oops are a special case. Normally they would be their own section
- // but in cases like icBuffer they are literals in the code stream that
- // we don't have a section for. We use none so that we get a literal address
- // which is always patchable.
- break;
- case relocInfo::external_word_type:
- _rspec = external_word_Relocation::spec(target);
- break;
- case relocInfo::internal_word_type:
- _rspec = internal_word_Relocation::spec(target);
- break;
- case relocInfo::opt_virtual_call_type:
- _rspec = opt_virtual_call_Relocation::spec();
- break;
- case relocInfo::static_call_type:
- _rspec = static_call_Relocation::spec();
- break;
- case relocInfo::runtime_call_type:
- _rspec = runtime_call_Relocation::spec();
- break;
- case relocInfo::poll_type:
- case relocInfo::poll_return_type:
- _rspec = Relocation::spec_simple(rtype);
- break;
- case relocInfo::none:
- break;
- default:
- ShouldNotReachHere();
- break;
- }
-}
-
-// Implementation of Address
-
-Address Address::make_array(ArrayAddress adr) {
-#ifdef _LP64
- // Not implementable on 64bit machines
- // Should have been handled higher up the call chain.
- ShouldNotReachHere();
-#else
- AddressLiteral base = adr.base();
- Address index = adr.index();
- assert(index._disp == 0, "must not have disp"); // maybe it can?
- Address array(index._base, index._index, index._scale, (intptr_t) base.target());
- array._rspec = base._rspec;
- return array;
-#endif // _LP64
-}
-
-#ifndef _LP64
-
-// exceedingly dangerous constructor
-Address::Address(address loc, RelocationHolder spec) {
- _base = noreg;
- _index = noreg;
- _scale = no_scale;
- _disp = (intptr_t) loc;
- _rspec = spec;
-}
-#endif // _LP64
-
-// Convert the raw encoding form into the form expected by the constructor for
-// Address. An index of 4 (rsp) corresponds to having no index, so convert
-// that to noreg for the Address constructor.
-Address Address::make_raw(int base, int index, int scale, int disp) {
- bool valid_index = index != rsp->encoding();
- if (valid_index) {
- Address madr(as_Register(base), as_Register(index), (Address::ScaleFactor)scale, in_ByteSize(disp));
- return madr;
- } else {
- Address madr(as_Register(base), noreg, Address::no_scale, in_ByteSize(disp));
- return madr;
- }
-}
-
-// Implementation of Assembler
-
-int AbstractAssembler::code_fill_byte() {
- return (u_char)'\xF4'; // hlt
-}
-
-// make this go away someday
-void Assembler::emit_data(jint data, relocInfo::relocType rtype, int format) {
- if (rtype == relocInfo::none)
- emit_long(data);
- else emit_data(data, Relocation::spec_simple(rtype), format);
-}
-
-
-void Assembler::emit_data(jint data, RelocationHolder const& rspec, int format) {
- assert(imm32_operand == 0, "default format must be imm32 in this file");
- assert(inst_mark() != NULL, "must be inside InstructionMark");
- if (rspec.type() != relocInfo::none) {
- #ifdef ASSERT
- check_relocation(rspec, format);
- #endif
- // Do not use AbstractAssembler::relocate, which is not intended for
- // embedded words. Instead, relocate to the enclosing instruction.
-
- // hack. call32 is too wide for mask so use disp32
- if (format == call32_operand)
- code_section()->relocate(inst_mark(), rspec, disp32_operand);
- else
- code_section()->relocate(inst_mark(), rspec, format);
- }
- emit_long(data);
-}
-
-
-void Assembler::emit_arith_b(int op1, int op2, Register dst, int imm8) {
- assert(dst->has_byte_register(), "must have byte register");
- assert(isByte(op1) && isByte(op2), "wrong opcode");
- assert(isByte(imm8), "not a byte");
- assert((op1 & 0x01) == 0, "should be 8bit operation");
- emit_byte(op1);
- emit_byte(op2 | dst->encoding());
- emit_byte(imm8);
-}
-
-
-void Assembler::emit_arith(int op1, int op2, Register dst, int imm32) {
- assert(isByte(op1) && isByte(op2), "wrong opcode");
- assert((op1 & 0x01) == 1, "should be 32bit operation");
- assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
- if (is8bit(imm32)) {
- emit_byte(op1 | 0x02); // set sign bit
- emit_byte(op2 | dst->encoding());
- emit_byte(imm32 & 0xFF);
- } else {
- emit_byte(op1);
- emit_byte(op2 | dst->encoding());
- emit_long(imm32);
- }
-}
-
-// immediate-to-memory forms
-void Assembler::emit_arith_operand(int op1, Register rm, Address adr, int imm32) {
- assert((op1 & 0x01) == 1, "should be 32bit operation");
- assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
- if (is8bit(imm32)) {
- emit_byte(op1 | 0x02); // set sign bit
- emit_operand(rm,adr);
- emit_byte(imm32 & 0xFF);
- } else {
- emit_byte(op1);
- emit_operand(rm,adr);
- emit_long(imm32);
- }
-}
-
-void Assembler::emit_arith(int op1, int op2, Register dst, jobject obj) {
- assert(isByte(op1) && isByte(op2), "wrong opcode");
- assert((op1 & 0x01) == 1, "should be 32bit operation");
- assert((op1 & 0x02) == 0, "sign-extension bit should not be set");
- InstructionMark im(this);
- emit_byte(op1);
- emit_byte(op2 | dst->encoding());
- emit_data((int)obj, relocInfo::oop_type, 0);
-}
-
-
-void Assembler::emit_arith(int op1, int op2, Register dst, Register src) {
- assert(isByte(op1) && isByte(op2), "wrong opcode");
- emit_byte(op1);
- emit_byte(op2 | dst->encoding() << 3 | src->encoding());
-}
-
-
-void Assembler::emit_operand(Register reg,
- Register base,
- Register index,
- Address::ScaleFactor scale,
- int disp,
- RelocationHolder const& rspec) {
-
- relocInfo::relocType rtype = (relocInfo::relocType) rspec.type();
- if (base->is_valid()) {
- if (index->is_valid()) {
- assert(scale != Address::no_scale, "inconsistent address");
- // [base + index*scale + disp]
- if (disp == 0 && rtype == relocInfo::none && base != rbp) {
- // [base + index*scale]
- // [00 reg 100][ss index base]
- assert(index != rsp, "illegal addressing mode");
- emit_byte(0x04 | reg->encoding() << 3);
- emit_byte(scale << 6 | index->encoding() << 3 | base->encoding());
- } else if (is8bit(disp) && rtype == relocInfo::none) {
- // [base + index*scale + imm8]
- // [01 reg 100][ss index base] imm8
- assert(index != rsp, "illegal addressing mode");
- emit_byte(0x44 | reg->encoding() << 3);
- emit_byte(scale << 6 | index->encoding() << 3 | base->encoding());
- emit_byte(disp & 0xFF);
- } else {
- // [base + index*scale + imm32]
- // [10 reg 100][ss index base] imm32
- assert(index != rsp, "illegal addressing mode");
- emit_byte(0x84 | reg->encoding() << 3);
- emit_byte(scale << 6 | index->encoding() << 3 | base->encoding());
- emit_data(disp, rspec, disp32_operand);
- }
- } else if (base == rsp) {
- // [esp + disp]
- if (disp == 0 && rtype == relocInfo::none) {
- // [esp]
- // [00 reg 100][00 100 100]
- emit_byte(0x04 | reg->encoding() << 3);
- emit_byte(0x24);
- } else if (is8bit(disp) && rtype == relocInfo::none) {
- // [esp + imm8]
- // [01 reg 100][00 100 100] imm8
- emit_byte(0x44 | reg->encoding() << 3);
- emit_byte(0x24);
- emit_byte(disp & 0xFF);
- } else {
- // [esp + imm32]
- // [10 reg 100][00 100 100] imm32
- emit_byte(0x84 | reg->encoding() << 3);
- emit_byte(0x24);
- emit_data(disp, rspec, disp32_operand);
- }
- } else {
- // [base + disp]
- assert(base != rsp, "illegal addressing mode");
- if (disp == 0 && rtype == relocInfo::none && base != rbp) {
- // [base]
- // [00 reg base]
- assert(base != rbp, "illegal addressing mode");
- emit_byte(0x00 | reg->encoding() << 3 | base->encoding());
- } else if (is8bit(disp) && rtype == relocInfo::none) {
- // [base + imm8]
- // [01 reg base] imm8
- emit_byte(0x40 | reg->encoding() << 3 | base->encoding());
- emit_byte(disp & 0xFF);
- } else {
- // [base + imm32]
- // [10 reg base] imm32
- emit_byte(0x80 | reg->encoding() << 3 | base->encoding());
- emit_data(disp, rspec, disp32_operand);
- }
- }
- } else {
- if (index->is_valid()) {
- assert(scale != Address::no_scale, "inconsistent address");
- // [index*scale + disp]
- // [00 reg 100][ss index 101] imm32
- assert(index != rsp, "illegal addressing mode");
- emit_byte(0x04 | reg->encoding() << 3);
- emit_byte(scale << 6 | index->encoding() << 3 | 0x05);
- emit_data(disp, rspec, disp32_operand);
- } else {
- // [disp]
- // [00 reg 101] imm32
- emit_byte(0x05 | reg->encoding() << 3);
- emit_data(disp, rspec, disp32_operand);
- }
- }
-}
-
-// Secret local extension to Assembler::WhichOperand:
-#define end_pc_operand (_WhichOperand_limit)
-
-address Assembler::locate_operand(address inst, WhichOperand which) {
- // Decode the given instruction, and return the address of
- // an embedded 32-bit operand word.
-
- // If "which" is disp32_operand, selects the displacement portion
- // of an effective address specifier.
- // If "which" is imm32_operand, selects the trailing immediate constant.
- // If "which" is call32_operand, selects the displacement of a call or jump.
- // Caller is responsible for ensuring that there is such an operand,
- // and that it is 32 bits wide.
-
- // If "which" is end_pc_operand, find the end of the instruction.
-
- address ip = inst;
-
- debug_only(bool has_imm32 = false);
- int tail_size = 0; // other random bytes (#32, #16, etc.) at end of insn
-
- again_after_prefix:
- switch (0xFF & *ip++) {
-
- // These convenience macros generate groups of "case" labels for the switch.
- #define REP4(x) (x)+0: case (x)+1: case (x)+2: case (x)+3
- #define REP8(x) (x)+0: case (x)+1: case (x)+2: case (x)+3: \
- case (x)+4: case (x)+5: case (x)+6: case (x)+7
- #define REP16(x) REP8((x)+0): \
- case REP8((x)+8)
-
- case CS_segment:
- case SS_segment:
- case DS_segment:
- case ES_segment:
- case FS_segment:
- case GS_segment:
- assert(ip == inst+1, "only one prefix allowed");
- goto again_after_prefix;
-
- case 0xFF: // pushl a; decl a; incl a; call a; jmp a
- case 0x88: // movb a, r
- case 0x89: // movl a, r
- case 0x8A: // movb r, a
- case 0x8B: // movl r, a
- case 0x8F: // popl a
- break;
-
- case 0x68: // pushl #32(oop?)
- if (which == end_pc_operand) return ip + 4;
- assert(which == imm32_operand, "pushl has no disp32");
- return ip; // not produced by emit_operand
-
- case 0x66: // movw ... (size prefix)
- switch (0xFF & *ip++) {
- case 0x8B: // movw r, a
- case 0x89: // movw a, r
- break;
- case 0xC7: // movw a, #16
- tail_size = 2; // the imm16
- break;
- case 0x0F: // several SSE/SSE2 variants
- ip--; // reparse the 0x0F
- goto again_after_prefix;
- default:
- ShouldNotReachHere();
- }
- break;
-
- case REP8(0xB8): // movl r, #32(oop?)
- if (which == end_pc_operand) return ip + 4;
- assert(which == imm32_operand || which == disp32_operand, "");
- return ip;
-
- case 0x69: // imul r, a, #32
- case 0xC7: // movl a, #32(oop?)
- tail_size = 4;
- debug_only(has_imm32 = true); // has both kinds of operands!
- break;
-
- case 0x0F: // movx..., etc.
- switch (0xFF & *ip++) {
- case 0x12: // movlps
- case 0x28: // movaps
- case 0x2E: // ucomiss
- case 0x2F: // comiss
- case 0x54: // andps
- case 0x55: // andnps
- case 0x56: // orps
- case 0x57: // xorps
- case 0x6E: // movd
- case 0x7E: // movd
- case 0xAE: // ldmxcsr a
- // amd side says it these have both operands but that doesn't
- // appear to be true.
- // debug_only(has_imm32 = true); // has both kinds of operands!
- break;
-
- case 0xAD: // shrd r, a, %cl
- case 0xAF: // imul r, a
- case 0xBE: // movsxb r, a
- case 0xBF: // movsxw r, a
- case 0xB6: // movzxb r, a
- case 0xB7: // movzxw r, a
- case REP16(0x40): // cmovl cc, r, a
- case 0xB0: // cmpxchgb
- case 0xB1: // cmpxchg
- case 0xC1: // xaddl
- case 0xC7: // cmpxchg8
- case REP16(0x90): // setcc a
- // fall out of the switch to decode the address
- break;
- case 0xAC: // shrd r, a, #8
- tail_size = 1; // the imm8
- break;
- case REP16(0x80): // jcc rdisp32
- if (which == end_pc_operand) return ip + 4;
- assert(which == call32_operand, "jcc has no disp32 or imm32");
- return ip;
- default:
- ShouldNotReachHere();
- }
- break;
-
- case 0x81: // addl a, #32; addl r, #32
- // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
- // in the case of cmpl, the imm32 might be an oop
- tail_size = 4;
- debug_only(has_imm32 = true); // has both kinds of operands!
- break;
-
- case 0x85: // test r/m, r
- break;
-
- case 0x83: // addl a, #8; addl r, #8
- // also: orl, adcl, sbbl, andl, subl, xorl, cmpl
- tail_size = 1;
- break;
-
- case 0x9B:
- switch (0xFF & *ip++) {
- case 0xD9: // fnstcw a
- break;
- default:
- ShouldNotReachHere();
- }
- break;
-
- case REP4(0x00): // addb a, r; addl a, r; addb r, a; addl r, a
- case REP4(0x10): // adc...
- case REP4(0x20): // and...
- case REP4(0x30): // xor...
- case REP4(0x08): // or...
- case REP4(0x18): // sbb...
- case REP4(0x28): // sub...
- case REP4(0x38): // cmp...
- case 0xF7: // mull a
- case 0x8D: // leal r, a
- case 0x87: // xchg r, a
- break;
-
- case 0xC1: // sal a, #8; sar a, #8; shl a, #8; shr a, #8
- case 0xC6: // movb a, #8
- case 0x80: // cmpb a, #8
- case 0x6B: // imul r, a, #8
- tail_size = 1; // the imm8
- break;
-
- case 0xE8: // call rdisp32
- case 0xE9: // jmp rdisp32
- if (which == end_pc_operand) return ip + 4;
- assert(which == call32_operand, "call has no disp32 or imm32");
- return ip;
-
- case 0xD1: // sal a, 1; sar a, 1; shl a, 1; shr a, 1
- case 0xD3: // sal a, %cl; sar a, %cl; shl a, %cl; shr a, %cl
- case 0xD9: // fld_s a; fst_s a; fstp_s a; fldcw a
- case 0xDD: // fld_d a; fst_d a; fstp_d a
- case 0xDB: // fild_s a; fistp_s a; fld_x a; fstp_x a
- case 0xDF: // fild_d a; fistp_d a
- case 0xD8: // fadd_s a; fsubr_s a; fmul_s a; fdivr_s a; fcomp_s a
- case 0xDC: // fadd_d a; fsubr_d a; fmul_d a; fdivr_d a; fcomp_d a
- case 0xDE: // faddp_d a; fsubrp_d a; fmulp_d a; fdivrp_d a; fcompp_d a
- break;
-
- case 0xF3: // For SSE
- case 0xF2: // For SSE2
- ip++; ip++;
- break;
-
- default:
- ShouldNotReachHere();
-
- #undef REP8
- #undef REP16
- }
-
- assert(which != call32_operand, "instruction is not a call, jmp, or jcc");
- assert(which != imm32_operand || has_imm32, "instruction has no imm32 field");
-
- // parse the output of emit_operand
- int op2 = 0xFF & *ip++;
- int base = op2 & 0x07;
- int op3 = -1;
- const int b100 = 4;
- const int b101 = 5;
- if (base == b100 && (op2 >> 6) != 3) {
- op3 = 0xFF & *ip++;
- base = op3 & 0x07; // refetch the base
- }
- // now ip points at the disp (if any)
-
- switch (op2 >> 6) {
- case 0:
- // [00 reg 100][ss index base]
- // [00 reg 100][00 100 rsp]
- // [00 reg base]
- // [00 reg 100][ss index 101][disp32]
- // [00 reg 101] [disp32]
-
- if (base == b101) {
- if (which == disp32_operand)
- return ip; // caller wants the disp32
- ip += 4; // skip the disp32
- }
- break;
-
- case 1:
- // [01 reg 100][ss index base][disp8]
- // [01 reg 100][00 100 rsp][disp8]
- // [01 reg base] [disp8]
- ip += 1; // skip the disp8
- break;
-
- case 2:
- // [10 reg 100][ss index base][disp32]
- // [10 reg 100][00 100 rsp][disp32]
- // [10 reg base] [disp32]
- if (which == disp32_operand)
- return ip; // caller wants the disp32
- ip += 4; // skip the disp32
- break;
-
- case 3:
- // [11 reg base] (not a memory addressing mode)
- break;
- }
-
- if (which == end_pc_operand) {
- return ip + tail_size;
- }
-
- assert(which == imm32_operand, "instruction has only an imm32 field");
- return ip;
-}
-
-address Assembler::locate_next_instruction(address inst) {
- // Secretly share code with locate_operand:
- return locate_operand(inst, end_pc_operand);
-}
-
-
-#ifdef ASSERT
-void Assembler::check_relocation(RelocationHolder const& rspec, int format) {
- address inst = inst_mark();
- assert(inst != NULL && inst < pc(), "must point to beginning of instruction");
- address opnd;
-
- Relocation* r = rspec.reloc();
- if (r->type() == relocInfo::none) {
- return;
- } else if (r->is_call() || format == call32_operand) {
- // assert(format == imm32_operand, "cannot specify a nonzero format");
- opnd = locate_operand(inst, call32_operand);
- } else if (r->is_data()) {
- assert(format == imm32_operand || format == disp32_operand, "format ok");
- opnd = locate_operand(inst, (WhichOperand)format);
- } else {
- assert(format == imm32_operand, "cannot specify a format");
- return;
- }
- assert(opnd == pc(), "must put operand where relocs can find it");
-}
-#endif
-
-
-
-void Assembler::emit_operand(Register reg, Address adr) {
- emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
-}
-
-
-void Assembler::emit_farith(int b1, int b2, int i) {
- assert(isByte(b1) && isByte(b2), "wrong opcode");
- assert(0 <= i && i < 8, "illegal stack offset");
- emit_byte(b1);
- emit_byte(b2 + i);
-}
-
-
-void Assembler::pushad() {
- emit_byte(0x60);
-}
-
-void Assembler::popad() {
- emit_byte(0x61);
-}
-
-void Assembler::pushfd() {
- emit_byte(0x9C);
-}
-
-void Assembler::popfd() {
- emit_byte(0x9D);
-}
-
-void Assembler::pushl(int imm32) {
- emit_byte(0x68);
- emit_long(imm32);
-}
-
-#ifndef _LP64
-void Assembler::push_literal32(int32_t imm32, RelocationHolder const& rspec) {
- InstructionMark im(this);
- emit_byte(0x68);
- emit_data(imm32, rspec, 0);
-}
-#endif // _LP64
-
-void Assembler::pushl(Register src) {
- emit_byte(0x50 | src->encoding());
-}
-
-
-void Assembler::pushl(Address src) {
- InstructionMark im(this);
- emit_byte(0xFF);
- emit_operand(rsi, src);
-}
-
-void Assembler::popl(Register dst) {
- emit_byte(0x58 | dst->encoding());
-}
-
-
-void Assembler::popl(Address dst) {
- InstructionMark im(this);
- emit_byte(0x8F);
- emit_operand(rax, dst);
-}
-
-
-void Assembler::prefix(Prefix p) {
- a_byte(p);
-}
-
-
-void Assembler::movb(Register dst, Address src) {
- assert(dst->has_byte_register(), "must have byte register");
- InstructionMark im(this);
- emit_byte(0x8A);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movb(Address dst, int imm8) {
- InstructionMark im(this);
- emit_byte(0xC6);
- emit_operand(rax, dst);
- emit_byte(imm8);
-}
-
-
-void Assembler::movb(Address dst, Register src) {
- assert(src->has_byte_register(), "must have byte register");
- InstructionMark im(this);
- emit_byte(0x88);
- emit_operand(src, dst);
-}
-
-
-void Assembler::movw(Address dst, int imm16) {
- InstructionMark im(this);
-
- emit_byte(0x66); // switch to 16-bit mode
- emit_byte(0xC7);
- emit_operand(rax, dst);
- emit_word(imm16);
-}
-
-
-void Assembler::movw(Register dst, Address src) {
- InstructionMark im(this);
- emit_byte(0x66);
- emit_byte(0x8B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movw(Address dst, Register src) {
- InstructionMark im(this);
- emit_byte(0x66);
- emit_byte(0x89);
- emit_operand(src, dst);
-}
-
-
-void Assembler::movl(Register dst, int imm32) {
- emit_byte(0xB8 | dst->encoding());
- emit_long(imm32);
-}
-
-#ifndef _LP64
-void Assembler::mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec) {
-
- InstructionMark im(this);
- emit_byte(0xB8 | dst->encoding());
- emit_data((int)imm32, rspec, 0);
-}
-#endif // _LP64
-
-void Assembler::movl(Register dst, Register src) {
- emit_byte(0x8B);
- emit_byte(0xC0 | (dst->encoding() << 3) | src->encoding());
-}
-
-
-void Assembler::movl(Register dst, Address src) {
- InstructionMark im(this);
- emit_byte(0x8B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movl(Address dst, int imm32) {
- InstructionMark im(this);
- emit_byte(0xC7);
- emit_operand(rax, dst);
- emit_long(imm32);
-}
-
-#ifndef _LP64
-void Assembler::mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec) {
- InstructionMark im(this);
- emit_byte(0xC7);
- emit_operand(rax, dst);
- emit_data((int)imm32, rspec, 0);
-}
-#endif // _LP64
-
-void Assembler::movl(Address dst, Register src) {
- InstructionMark im(this);
- emit_byte(0x89);
- emit_operand(src, dst);
-}
-
-void Assembler::movsxb(Register dst, Address src) {
- InstructionMark im(this);
- emit_byte(0x0F);
- emit_byte(0xBE);
- emit_operand(dst, src);
-}
-
-void Assembler::movsxb(Register dst, Register src) {
- assert(src->has_byte_register(), "must have byte register");
- emit_byte(0x0F);
- emit_byte(0xBE);
- emit_byte(0xC0 | (dst->encoding() << 3) | src->encoding());
-}
-
-
-void Assembler::movsxw(Register dst, Address src) {
- InstructionMark im(this);
- emit_byte(0x0F);
- emit_byte(0xBF);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movsxw(Register dst, Register src) {
- emit_byte(0x0F);
- emit_byte(0xBF);
- emit_byte(0xC0 | (dst->encoding() << 3) | src->encoding());
-}
-
-
-void Assembler::movzxb(Register dst, Address src) {
- InstructionMark im(this);
- emit_byte(0x0F);
- emit_byte(0xB6);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxb(Register dst, Register src) {
- assert(src->has_byte_register(), "must have byte register");
- emit_byte(0x0F);
- emit_byte(0xB6);
- emit_byte(0xC0 | (dst->encoding() << 3) | src->encoding());
-}
-
-
-void Assembler::movzxw(Register dst, Address src) {
- InstructionMark im(this);
- emit_byte(0x0F);
- emit_byte(0xB7);
- emit_operand(dst, src);
-}
-
-
-void Assembler::movzxw(Register dst, Register src) {
- emit_byte(0x0F);
- emit_byte(0xB7);
- emit_byte(0xC0 | (dst->encoding() << 3) | src->encoding());
-}
-
-
-void Assembler::cmovl(Condition cc, Register dst, Register src) {
- guarantee(VM_Version::supports_cmov(), "illegal instruction");
- emit_byte(0x0F);
- emit_byte(0x40 | cc);
- emit_byte(0xC0 | (dst->encoding() << 3) | src->encoding());
-}
-
-
-void Assembler::cmovl(Condition cc, Register dst, Address src) {
- guarantee(VM_Version::supports_cmov(), "illegal instruction");
- // The code below seems to be wrong - however the manual is inconclusive
- // do not use for now (remember to enable all callers when fixing this)
- Unimplemented();
- // wrong bytes?
- InstructionMark im(this);
- emit_byte(0x0F);
- emit_byte(0x40 | cc);
- emit_operand(dst, src);
-}
-
-
-void Assembler::prefetcht0(Address src) {
- assert(VM_Version::supports_sse(), "must support");
- InstructionMark im(this);
- emit_byte(0x0F);
- emit_byte(0x18);
- emit_operand(rcx, src); // 1, src
-}
-
-
-void Assembler::prefetcht1(Address src) {
- assert(VM_Version::supports_sse(), "must support");
- InstructionMark im(this);
- emit_byte(0x0F);
- emit_byte(0x18);
- emit_operand(rdx, src); // 2, src
-}
-
-
-void Assembler::prefetcht2(Address src) {
- assert(VM_Version::supports_sse(), "must support");
- InstructionMark im(this);
- emit_byte(0x0F);
- emit_byte(0x18);
- emit_operand(rbx, src); // 3, src
-}
-
-
-void Assembler::prefetchnta(Address src) {
- assert(VM_Version::supports_sse2(), "must support");
- InstructionMark im(this);
- emit_byte(0x0F);
- emit_byte(0x18);
- emit_operand(rax, src); // 0, src
-}
-
-
-void Assembler::prefetchw(Address src) {
- assert(VM_Version::supports_3dnow(), "must support");
- InstructionMark im(this);
- emit_byte(0x0F);
- emit_byte(0x0D);
- emit_operand(rcx, src); // 1, src
-}
-
-
-void Assembler::prefetchr(Address src) {
- assert(VM_Version::supports_3dnow(), "must support");
- InstructionMark im(this);
- emit_byte(0x0F);
- emit_byte(0x0D);
- emit_operand(rax, src); // 0, src
-}
-
-
-void Assembler::adcl(Register dst, int imm32) {
- emit_arith(0x81, 0xD0, dst, imm32);
-}
-
-
-void Assembler::adcl(Register dst, Address src) {
- InstructionMark im(this);
- emit_byte(0x13);
- emit_operand(dst, src);
-}
-
-
-void Assembler::adcl(Register dst, Register src) {
- emit_arith(0x13, 0xC0, dst, src);
-}
-
-
-void Assembler::addl(Address dst, int imm32) {
- InstructionMark im(this);
- emit_arith_operand(0x81,rax,dst,imm32);
-}
-
-
-void Assembler::addl(Address dst, Register src) {
- InstructionMark im(this);
- emit_byte(0x01);
- emit_operand(src, dst);
-}
-
-
-void Assembler::addl(Register dst, int imm32) {
- emit_arith(0x81, 0xC0, dst, imm32);
-}
-
-
-void Assembler::addl(Register dst, Address src) {
- InstructionMark im(this);
- emit_byte(0x03);
- emit_operand(dst, src);
-}
-
-
-void Assembler::addl(Register dst, Register src) {
- emit_arith(0x03, 0xC0, dst, src);
-}
-
-
-void Assembler::andl(Register dst, int imm32) {
- emit_arith(0x81, 0xE0, dst, imm32);
-}
-
-
-void Assembler::andl(Register dst, Address src) {
- InstructionMark im(this);
- emit_byte(0x23);
- emit_operand(dst, src);
-}
-
-
-void Assembler::andl(Register dst, Register src) {
- emit_arith(0x23, 0xC0, dst, src);
-}
-
-
-void Assembler::cmpb(Address dst, int imm8) {
- InstructionMark im(this);
- emit_byte(0x80);
- emit_operand(rdi, dst);
- emit_byte(imm8);
-}
-
-void Assembler::cmpw(Address dst, int imm16) {
- InstructionMark im(this);
- emit_byte(0x66);
- emit_byte(0x81);
- emit_operand(rdi, dst);
- emit_word(imm16);
-}
-
-void Assembler::cmpl(Address dst, int imm32) {
- InstructionMark im(this);
- emit_byte(0x81);
- emit_operand(rdi, dst);
- emit_long(imm32);
-}
-
-#ifndef _LP64
-void Assembler::cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec) {
- InstructionMark im(this);
- emit_byte(0x81);
- emit_byte(0xF8 | src1->encoding());
- emit_data(imm32, rspec, 0);
-}
-
-void Assembler::cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec) {
- InstructionMark im(this);
- emit_byte(0x81);
- emit_operand(rdi, src1);
- emit_data(imm32, rspec, 0);
-}
-#endif // _LP64
-
-
-void Assembler::cmpl(Register dst, int imm32) {
- emit_arith(0x81, 0xF8, dst, imm32);
-}
-
-
-void Assembler::cmpl(Register dst, Register src) {
- emit_arith(0x3B, 0xC0, dst, src);
-}
-
-
-void Assembler::cmpl(Register dst, Address src) {
- InstructionMark im(this);
- emit_byte(0x3B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::decl(Register dst) {
- // Don't use it directly. Use MacroAssembler::decrement() instead.
- emit_byte(0x48 | dst->encoding());
-}
-
-
-void Assembler::decl(Address dst) {
- // Don't use it directly. Use MacroAssembler::decrement() instead.
- InstructionMark im(this);
- emit_byte(0xFF);
- emit_operand(rcx, dst);
-}
-
-
-void Assembler::idivl(Register src) {
- emit_byte(0xF7);
- emit_byte(0xF8 | src->encoding());
-}
-
-
-void Assembler::cdql() {
- emit_byte(0x99);
-}
-
-
-void Assembler::imull(Register dst, Register src) {
- emit_byte(0x0F);
- emit_byte(0xAF);
- emit_byte(0xC0 | dst->encoding() << 3 | src->encoding());
-}
-
-
-void Assembler::imull(Register dst, Register src, int value) {
- if (is8bit(value)) {
- emit_byte(0x6B);
- emit_byte(0xC0 | dst->encoding() << 3 | src->encoding());
- emit_byte(value);
- } else {
- emit_byte(0x69);
- emit_byte(0xC0 | dst->encoding() << 3 | src->encoding());
- emit_long(value);
- }
-}
-
-
-void Assembler::incl(Register dst) {
- // Don't use it directly. Use MacroAssembler::increment() instead.
- emit_byte(0x40 | dst->encoding());
-}
-
-
-void Assembler::incl(Address dst) {
- // Don't use it directly. Use MacroAssembler::increment() instead.
- InstructionMark im(this);
- emit_byte(0xFF);
- emit_operand(rax, dst);
-}
-
-
-void Assembler::leal(Register dst, Address src) {
- InstructionMark im(this);
- emit_byte(0x8D);
- emit_operand(dst, src);
-}
-
-void Assembler::mull(Address src) {
- InstructionMark im(this);
- emit_byte(0xF7);
- emit_operand(rsp, src);
-}
-
-
-void Assembler::mull(Register src) {
- emit_byte(0xF7);
- emit_byte(0xE0 | src->encoding());
-}
-
-
-void Assembler::negl(Register dst) {
- emit_byte(0xF7);
- emit_byte(0xD8 | dst->encoding());
-}
-
-
-void Assembler::notl(Register dst) {
- emit_byte(0xF7);
- emit_byte(0xD0 | dst->encoding());
-}
-
-
-void Assembler::orl(Address dst, int imm32) {
- InstructionMark im(this);
- emit_byte(0x81);
- emit_operand(rcx, dst);
- emit_long(imm32);
-}
-
-void Assembler::orl(Register dst, int imm32) {
- emit_arith(0x81, 0xC8, dst, imm32);
-}
-
-
-void Assembler::orl(Register dst, Address src) {
- InstructionMark im(this);
- emit_byte(0x0B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::orl(Register dst, Register src) {
- emit_arith(0x0B, 0xC0, dst, src);
-}
-
-
-void Assembler::rcll(Register dst, int imm8) {
- assert(isShiftCount(imm8), "illegal shift count");
- if (imm8 == 1) {
- emit_byte(0xD1);
- emit_byte(0xD0 | dst->encoding());
- } else {
- emit_byte(0xC1);
- emit_byte(0xD0 | dst->encoding());
- emit_byte(imm8);
- }
-}
-
-
-void Assembler::sarl(Register dst, int imm8) {
- assert(isShiftCount(imm8), "illegal shift count");
- if (imm8 == 1) {
- emit_byte(0xD1);
- emit_byte(0xF8 | dst->encoding());
- } else {
- emit_byte(0xC1);
- emit_byte(0xF8 | dst->encoding());
- emit_byte(imm8);
- }
-}
-
-
-void Assembler::sarl(Register dst) {
- emit_byte(0xD3);
- emit_byte(0xF8 | dst->encoding());
-}
-
-
-void Assembler::sbbl(Address dst, int imm32) {
- InstructionMark im(this);
- emit_arith_operand(0x81,rbx,dst,imm32);
-}
-
-
-void Assembler::sbbl(Register dst, int imm32) {
- emit_arith(0x81, 0xD8, dst, imm32);
-}
-
-
-void Assembler::sbbl(Register dst, Address src) {
- InstructionMark im(this);
- emit_byte(0x1B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::sbbl(Register dst, Register src) {
- emit_arith(0x1B, 0xC0, dst, src);
-}
-
-
-void Assembler::shldl(Register dst, Register src) {
- emit_byte(0x0F);
- emit_byte(0xA5);
- emit_byte(0xC0 | src->encoding() << 3 | dst->encoding());
-}
-
-
-void Assembler::shll(Register dst, int imm8) {
- assert(isShiftCount(imm8), "illegal shift count");
- if (imm8 == 1 ) {
- emit_byte(0xD1);
- emit_byte(0xE0 | dst->encoding());
- } else {
- emit_byte(0xC1);
- emit_byte(0xE0 | dst->encoding());
- emit_byte(imm8);
- }
-}
-
-
-void Assembler::shll(Register dst) {
- emit_byte(0xD3);
- emit_byte(0xE0 | dst->encoding());
-}
-
-
-void Assembler::shrdl(Register dst, Register src) {
- emit_byte(0x0F);
- emit_byte(0xAD);
- emit_byte(0xC0 | src->encoding() << 3 | dst->encoding());
-}
-
-
-void Assembler::shrl(Register dst, int imm8) {
- assert(isShiftCount(imm8), "illegal shift count");
- emit_byte(0xC1);
- emit_byte(0xE8 | dst->encoding());
- emit_byte(imm8);
-}
-
-
-void Assembler::shrl(Register dst) {
- emit_byte(0xD3);
- emit_byte(0xE8 | dst->encoding());
-}
-
-
-void Assembler::subl(Address dst, int imm32) {
- if (is8bit(imm32)) {
- InstructionMark im(this);
- emit_byte(0x83);
- emit_operand(rbp, dst);
- emit_byte(imm32 & 0xFF);
- } else {
- InstructionMark im(this);
- emit_byte(0x81);
- emit_operand(rbp, dst);
- emit_long(imm32);
- }
-}
-
-
-void Assembler::subl(Register dst, int imm32) {
- emit_arith(0x81, 0xE8, dst, imm32);
-}
-
-
-void Assembler::subl(Address dst, Register src) {
- InstructionMark im(this);
- emit_byte(0x29);
- emit_operand(src, dst);
-}
-
-
-void Assembler::subl(Register dst, Address src) {
- InstructionMark im(this);
- emit_byte(0x2B);
- emit_operand(dst, src);
-}
-
-
-void Assembler::subl(Register dst, Register src) {
- emit_arith(0x2B, 0xC0, dst, src);
-}
-
-
-void Assembler::testb(Register dst, int imm8) {
- assert(dst->has_byte_register(), "must have byte register");
- emit_arith_b(0xF6, 0xC0, dst, imm8);
-}
-
-
-void Assembler::testl(Register dst, int imm32) {
- // not using emit_arith because test
- // doesn't support sign-extension of
- // 8bit operands
- if (dst->encoding() == 0) {
- emit_byte(0xA9);
- } else {
- emit_byte(0xF7);
- emit_byte(0xC0 | dst->encoding());
- }
- emit_long(imm32);
-}
-
-
-void Assembler::testl(Register dst, Register src) {
- emit_arith(0x85, 0xC0, dst, src);
-}
-
-void Assembler::testl(Register dst, Address src) {
- InstructionMark im(this);
- emit_byte(0x85);
- emit_operand(dst, src);
-}
-
-void Assembler::xaddl(Address dst, Register src) {
- InstructionMark im(this);
- emit_byte(0x0F);
- emit_byte(0xC1);
- emit_operand(src, dst);
-}
-
-void Assembler::xorl(Register dst, int imm32) {
- emit_arith(0x81, 0xF0, dst, imm32);
-}
-
-
-void Assembler::xorl(Register dst, Address src) {
- InstructionMark im(this);
- emit_byte(0x33);
- emit_operand(dst, src);
-}
-
-
-void Assembler::xorl(Register dst, Register src) {
- emit_arith(0x33, 0xC0, dst, src);
-}
-
-
-void Assembler::bswap(Register reg) {
- emit_byte(0x0F);
- emit_byte(0xC8 | reg->encoding());
-}
-
-
-void Assembler::lock() {
- if (Atomics & 1) {
- // Emit either nothing, a NOP, or a NOP: prefix
- emit_byte(0x90) ;
- } else {
- emit_byte(0xF0);
- }
-}
-
-
-void Assembler::xchg(Register reg, Address adr) {
- InstructionMark im(this);
- emit_byte(0x87);
- emit_operand(reg, adr);
-}
-
-
-void Assembler::xchgl(Register dst, Register src) {
- emit_byte(0x87);
- emit_byte(0xc0 | dst->encoding() << 3 | src->encoding());
-}
-
-
-// The 32-bit cmpxchg compares the value at adr with the contents of rax,
-// and stores reg into adr if so; otherwise, the value at adr is loaded into rax,.
-// The ZF is set if the compared values were equal, and cleared otherwise.
-void Assembler::cmpxchg(Register reg, Address adr) {
- if (Atomics & 2) {
- // caveat: no instructionmark, so this isn't relocatable.
- // Emit a synthetic, non-atomic, CAS equivalent.
- // Beware. The synthetic form sets all ICCs, not just ZF.
- // cmpxchg r,[m] is equivalent to rax, = CAS (m, rax, r)
- cmpl (rax, adr) ;
- movl (rax, adr) ;
- if (reg != rax) {
- Label L ;
- jcc (Assembler::notEqual, L) ;
- movl (adr, reg) ;
- bind (L) ;
- }
- } else {
- InstructionMark im(this);
- emit_byte(0x0F);
- emit_byte(0xB1);
- emit_operand(reg, adr);
- }
-}
-
-// The 64-bit cmpxchg compares the value at adr with the contents of rdx:rax,
-// and stores rcx:rbx into adr if so; otherwise, the value at adr is loaded
-// into rdx:rax. The ZF is set if the compared values were equal, and cleared otherwise.
-void Assembler::cmpxchg8(Address adr) {
- InstructionMark im(this);
- emit_byte(0x0F);
- emit_byte(0xc7);
- emit_operand(rcx, adr);
-}
-
-void Assembler::hlt() {
- emit_byte(0xF4);
-}
-
-
-void Assembler::addr_nop_4() {
- // 4 bytes: NOP DWORD PTR [EAX+0]
- emit_byte(0x0F);
- emit_byte(0x1F);
- emit_byte(0x40); // emit_rm(cbuf, 0x1, EAX_enc, EAX_enc);
- emit_byte(0); // 8-bits offset (1 byte)
-}
-
-void Assembler::addr_nop_5() {
- // 5 bytes: NOP DWORD PTR [EAX+EAX*0+0] 8-bits offset
- emit_byte(0x0F);
- emit_byte(0x1F);
- emit_byte(0x44); // emit_rm(cbuf, 0x1, EAX_enc, 0x4);
- emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
- emit_byte(0); // 8-bits offset (1 byte)
-}
-
-void Assembler::addr_nop_7() {
- // 7 bytes: NOP DWORD PTR [EAX+0] 32-bits offset
- emit_byte(0x0F);
- emit_byte(0x1F);
- emit_byte(0x80); // emit_rm(cbuf, 0x2, EAX_enc, EAX_enc);
- emit_long(0); // 32-bits offset (4 bytes)
-}
-
-void Assembler::addr_nop_8() {
- // 8 bytes: NOP DWORD PTR [EAX+EAX*0+0] 32-bits offset
- emit_byte(0x0F);
- emit_byte(0x1F);
- emit_byte(0x84); // emit_rm(cbuf, 0x2, EAX_enc, 0x4);
- emit_byte(0x00); // emit_rm(cbuf, 0x0, EAX_enc, EAX_enc);
- emit_long(0); // 32-bits offset (4 bytes)
-}
-
-void Assembler::nop(int i) {
- assert(i > 0, " ");
- if (UseAddressNop && VM_Version::is_intel()) {
- //
- // Using multi-bytes nops "0x0F 0x1F [address]" for Intel
- // 1: 0x90
- // 2: 0x66 0x90
- // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
- // 4: 0x0F 0x1F 0x40 0x00
- // 5: 0x0F 0x1F 0x44 0x00 0x00
- // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
- // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
- // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
- // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
- // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
- // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
-
- // The rest coding is Intel specific - don't use consecutive address nops
-
- // 12: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
- // 13: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
- // 14: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
- // 15: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x66 0x66 0x66 0x90
-
- while(i >= 15) {
- // For Intel don't generate consecutive addess nops (mix with regular nops)
- i -= 15;
- emit_byte(0x66); // size prefix
- emit_byte(0x66); // size prefix
- emit_byte(0x66); // size prefix
- addr_nop_8();
- emit_byte(0x66); // size prefix
- emit_byte(0x66); // size prefix
- emit_byte(0x66); // size prefix
- emit_byte(0x90); // nop
- }
- switch (i) {
- case 14:
- emit_byte(0x66); // size prefix
- case 13:
- emit_byte(0x66); // size prefix
- case 12:
- addr_nop_8();
- emit_byte(0x66); // size prefix
- emit_byte(0x66); // size prefix
- emit_byte(0x66); // size prefix
- emit_byte(0x90); // nop
- break;
- case 11:
- emit_byte(0x66); // size prefix
- case 10:
- emit_byte(0x66); // size prefix
- case 9:
- emit_byte(0x66); // size prefix
- case 8:
- addr_nop_8();
- break;
- case 7:
- addr_nop_7();
- break;
- case 6:
- emit_byte(0x66); // size prefix
- case 5:
- addr_nop_5();
- break;
- case 4:
- addr_nop_4();
- break;
- case 3:
- // Don't use "0x0F 0x1F 0x00" - need patching safe padding
- emit_byte(0x66); // size prefix
- case 2:
- emit_byte(0x66); // size prefix
- case 1:
- emit_byte(0x90); // nop
- break;
- default:
- assert(i == 0, " ");
- }
- return;
- }
- if (UseAddressNop && VM_Version::is_amd()) {
- //
- // Using multi-bytes nops "0x0F 0x1F [address]" for AMD.
- // 1: 0x90
- // 2: 0x66 0x90
- // 3: 0x66 0x66 0x90 (don't use "0x0F 0x1F 0x00" - need patching safe padding)
- // 4: 0x0F 0x1F 0x40 0x00
- // 5: 0x0F 0x1F 0x44 0x00 0x00
- // 6: 0x66 0x0F 0x1F 0x44 0x00 0x00
- // 7: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
- // 8: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
- // 9: 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
- // 10: 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
- // 11: 0x66 0x66 0x66 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
-
- // The rest coding is AMD specific - use consecutive address nops
-
- // 12: 0x66 0x0F 0x1F 0x44 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
- // 13: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x66 0x0F 0x1F 0x44 0x00 0x00
- // 14: 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
- // 15: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x80 0x00 0x00 0x00 0x00
- // 16: 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00 0x0F 0x1F 0x84 0x00 0x00 0x00 0x00 0x00
- // Size prefixes (0x66) are added for larger sizes
-
- while(i >= 22) {
- i -= 11;
- emit_byte(0x66); // size prefix
- emit_byte(0x66); // size prefix
- emit_byte(0x66); // size prefix
- addr_nop_8();
- }
- // Generate first nop for size between 21-12
- switch (i) {
- case 21:
- i -= 1;
- emit_byte(0x66); // size prefix
- case 20:
- case 19:
- i -= 1;
- emit_byte(0x66); // size prefix
- case 18:
- case 17:
- i -= 1;
- emit_byte(0x66); // size prefix
- case 16:
- case 15:
- i -= 8;
- addr_nop_8();
- break;
- case 14:
- case 13:
- i -= 7;
- addr_nop_7();
- break;
- case 12:
- i -= 6;
- emit_byte(0x66); // size prefix
- addr_nop_5();
- break;
- default:
- assert(i < 12, " ");
- }
-
- // Generate second nop for size between 11-1
- switch (i) {
- case 11:
- emit_byte(0x66); // size prefix
- case 10:
- emit_byte(0x66); // size prefix
- case 9:
- emit_byte(0x66); // size prefix
- case 8:
- addr_nop_8();
- break;
- case 7:
- addr_nop_7();
- break;
- case 6:
- emit_byte(0x66); // size prefix
- case 5:
- addr_nop_5();
- break;
- case 4:
- addr_nop_4();
- break;
- case 3:
- // Don't use "0x0F 0x1F 0x00" - need patching safe padding
- emit_byte(0x66); // size prefix
- case 2:
- emit_byte(0x66); // size prefix
- case 1:
- emit_byte(0x90); // nop
- break;
- default:
- assert(i == 0, " ");
- }
- return;
- }
-
- // Using nops with size prefixes "0x66 0x90".
- // From AMD Optimization Guide:
- // 1: 0x90
- // 2: 0x66 0x90
- // 3: 0x66 0x66 0x90
- // 4: 0x66 0x66 0x66 0x90
- // 5: 0x66 0x66 0x90 0x66 0x90
- // 6: 0x66 0x66 0x90 0x66 0x66 0x90
- // 7: 0x66 0x66 0x66 0x90 0x66 0x66 0x90
- // 8: 0x66 0x66 0x66 0x90 0x66 0x66 0x66 0x90
- // 9: 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
- // 10: 0x66 0x66 0x66 0x90 0x66 0x66 0x90 0x66 0x66 0x90
- //
- while(i > 12) {
- i -= 4;
- emit_byte(0x66); // size prefix
- emit_byte(0x66);
- emit_byte(0x66);
- emit_byte(0x90); // nop
- }
- // 1 - 12 nops
- if(i > 8) {
- if(i > 9) {
- i -= 1;
- emit_byte(0x66);
- }
- i -= 3;
- emit_byte(0x66);
- emit_byte(0x66);
- emit_byte(0x90);
- }
- // 1 - 8 nops
- if(i > 4) {
- if(i > 6) {
- i -= 1;
- emit_byte(0x66);
- }
- i -= 3;
- emit_byte(0x66);
- emit_byte(0x66);
- emit_byte(0x90);
- }
- switch (i) {
- case 4:
- emit_byte(0x66);
- case 3:
- emit_byte(0x66);
- case 2:
- emit_byte(0x66);
- case 1:
- emit_byte(0x90);
- break;
- default:
- assert(i == 0, " ");
- }
-}
-
-void Assembler::ret(int imm16) {
- if (imm16 == 0) {
- emit_byte(0xC3);
- } else {
- emit_byte(0xC2);
- emit_word(imm16);
- }
-}
-
-
-void Assembler::set_byte_if_not_zero(Register dst) {
- emit_byte(0x0F);
- emit_byte(0x95);
- emit_byte(0xE0 | dst->encoding());
-}
-
-
-// copies a single word from [esi] to [edi]
-void Assembler::smovl() {
- emit_byte(0xA5);
-}
-
-// copies data from [esi] to [edi] using rcx double words (m32)
-void Assembler::rep_movl() {
- emit_byte(0xF3);
- emit_byte(0xA5);
-}
-
-
-// sets rcx double words (m32) with rax, value at [edi]
-void Assembler::rep_set() {
- emit_byte(0xF3);
- emit_byte(0xAB);
-}
-
-// scans rcx double words (m32) at [edi] for occurance of rax,
-void Assembler::repne_scan() {
- emit_byte(0xF2);
- emit_byte(0xAF);
-}
-
-
-void Assembler::setb(Condition cc, Register dst) {
- assert(0 <= cc && cc < 16, "illegal cc");
- emit_byte(0x0F);
- emit_byte(0x90 | cc);
- emit_byte(0xC0 | dst->encoding());
-}
-
-void Assembler::cld() {
- emit_byte(0xfc);
-}
-
-void Assembler::std() {
- emit_byte(0xfd);
-}
-
-void Assembler::emit_raw (unsigned char b) {
- emit_byte (b) ;
-}
-
-// Serializes memory.
-void Assembler::membar() {
- // Memory barriers are only needed on multiprocessors
- if (os::is_MP()) {
- if( VM_Version::supports_sse2() ) {
- emit_byte( 0x0F ); // MFENCE; faster blows no regs
- emit_byte( 0xAE );
- emit_byte( 0xF0 );
- } else {
- // All usable chips support "locked" instructions which suffice
- // as barriers, and are much faster than the alternative of
- // using cpuid instruction. We use here a locked add [esp],0.
- // This is conveniently otherwise a no-op except for blowing
- // flags (which we save and restore.)
- pushfd(); // Save eflags register
- lock();
- addl(Address(rsp, 0), 0);// Assert the lock# signal here
- popfd(); // Restore eflags register
- }
- }
-}
-
-// Identify processor type and features
-void Assembler::cpuid() {
- // Note: we can't assert VM_Version::supports_cpuid() here
- // because this instruction is used in the processor
- // identification code.
- emit_byte( 0x0F );
- emit_byte( 0xA2 );
-}
-
-void Assembler::call(Label& L, relocInfo::relocType rtype) {
- if (L.is_bound()) {
- const int long_size = 5;
- int offs = target(L) - pc();
- assert(offs <= 0, "assembler error");
- InstructionMark im(this);
- // 1110 1000 #32-bit disp
- emit_byte(0xE8);
- emit_data(offs - long_size, rtype, 0);
- } else {
- InstructionMark im(this);
- // 1110 1000 #32-bit disp
- L.add_patch_at(code(), locator());
- emit_byte(0xE8);
- emit_data(int(0), rtype, 0);
- }
-}
-
-void Assembler::call(Register dst) {
- emit_byte(0xFF);
- emit_byte(0xD0 | dst->encoding());
-}
-
-
-void Assembler::call(Address adr) {
- InstructionMark im(this);
- relocInfo::relocType rtype = adr.reloc();
- if (rtype != relocInfo::runtime_call_type) {
- emit_byte(0xFF);
- emit_operand(rdx, adr);
- } else {
- assert(false, "ack");
- }
-
-}
-
-void Assembler::call_literal(address dest, RelocationHolder const& rspec) {
- InstructionMark im(this);
- emit_byte(0xE8);
- intptr_t disp = dest - (_code_pos + sizeof(int32_t));
- assert(dest != NULL, "must have a target");
- emit_data(disp, rspec, call32_operand);
-
-}
-
-void Assembler::jmp(Register entry) {
- emit_byte(0xFF);
- emit_byte(0xE0 | entry->encoding());
-}
-
-
-void Assembler::jmp(Address adr) {
- InstructionMark im(this);
- emit_byte(0xFF);
- emit_operand(rsp, adr);
-}
-
-void Assembler::jmp_literal(address dest, RelocationHolder const& rspec) {
- InstructionMark im(this);
- emit_byte(0xE9);
- assert(dest != NULL, "must have a target");
- intptr_t disp = dest - (_code_pos + sizeof(int32_t));
- emit_data(disp, rspec.reloc(), call32_operand);
-}
-
-void Assembler::jmp(Label& L, relocInfo::relocType rtype) {
- if (L.is_bound()) {
- address entry = target(L);
- assert(entry != NULL, "jmp most probably wrong");
- InstructionMark im(this);
- const int short_size = 2;
- const int long_size = 5;
- intptr_t offs = entry - _code_pos;
- if (rtype == relocInfo::none && is8bit(offs - short_size)) {
- emit_byte(0xEB);
- emit_byte((offs - short_size) & 0xFF);
- } else {
- emit_byte(0xE9);
- emit_long(offs - long_size);
- }
- } else {
- // By default, forward jumps are always 32-bit displacements, since
- // we can't yet know where the label will be bound. If you're sure that
- // the forward jump will not run beyond 256 bytes, use jmpb to
- // force an 8-bit displacement.
- InstructionMark im(this);
- relocate(rtype);
- L.add_patch_at(code(), locator());
- emit_byte(0xE9);
- emit_long(0);
- }
-}
-
-void Assembler::jmpb(Label& L) {
- if (L.is_bound()) {
- const int short_size = 2;
- address entry = target(L);
- assert(is8bit((entry - _code_pos) + short_size),
- "Dispacement too large for a short jmp");
- assert(entry != NULL, "jmp most probably wrong");
- intptr_t offs = entry - _code_pos;
- emit_byte(0xEB);
- emit_byte((offs - short_size) & 0xFF);
- } else {
- InstructionMark im(this);
- L.add_patch_at(code(), locator());
- emit_byte(0xEB);
- emit_byte(0);
- }
-}
-
-void Assembler::jcc(Condition cc, Label& L, relocInfo::relocType rtype) {
- InstructionMark im(this);
- relocate(rtype);
- assert((0 <= cc) && (cc < 16), "illegal cc");
- if (L.is_bound()) {
- address dst = target(L);
- assert(dst != NULL, "jcc most probably wrong");
-
- const int short_size = 2;
- const int long_size = 6;
- int offs = (int)dst - ((int)_code_pos);
- if (rtype == relocInfo::none && is8bit(offs - short_size)) {
- // 0111 tttn #8-bit disp
- emit_byte(0x70 | cc);
- emit_byte((offs - short_size) & 0xFF);
- } else {
- // 0000 1111 1000 tttn #32-bit disp
- emit_byte(0x0F);
- emit_byte(0x80 | cc);
- emit_long(offs - long_size);
- }
- } else {
- // Note: could eliminate cond. jumps to this jump if condition
- // is the same however, seems to be rather unlikely case.
- // Note: use jccb() if label to be bound is very close to get
- // an 8-bit displacement
- L.add_patch_at(code(), locator());
- emit_byte(0x0F);
- emit_byte(0x80 | cc);
- emit_long(0);
- }
-}
-
-void Assembler::jccb(Condition cc, Label& L) {
- if (L.is_bound()) {
- const int short_size = 2;
- address entry = target(L);
- assert(is8bit((intptr_t)entry - ((intptr_t)_code_pos + short_size)),
- "Dispacement too large for a short jmp");
- intptr_t offs = (intptr_t)entry - (intptr_t)_code_pos;
- // 0111 tttn #8-bit disp
- emit_byte(0x70 | cc);
- emit_byte((offs - short_size) & 0xFF);
- jcc(cc, L);
- } else {
- InstructionMark im(this);
- L.add_patch_at(code(), locator());
- emit_byte(0x70 | cc);
- emit_byte(0);
- }
-}
-
-// FPU instructions
-
-void Assembler::fld1() {
- emit_byte(0xD9);
- emit_byte(0xE8);
-}
-
-
-void Assembler::fldz() {
- emit_byte(0xD9);
- emit_byte(0xEE);
-}
-
-
-void Assembler::fld_s(Address adr) {
- InstructionMark im(this);
- emit_byte(0xD9);
- emit_operand(rax, adr);
-}
-
-
-void Assembler::fld_s (int index) {
- emit_farith(0xD9, 0xC0, index);
-}
-
-
-void Assembler::fld_d(Address adr) {
- InstructionMark im(this);
- emit_byte(0xDD);
- emit_operand(rax, adr);
-}
-
-
-void Assembler::fld_x(Address adr) {
- InstructionMark im(this);
- emit_byte(0xDB);
- emit_operand(rbp, adr);
-}
-
-
-void Assembler::fst_s(Address adr) {
- InstructionMark im(this);
- emit_byte(0xD9);
- emit_operand(rdx, adr);
-}
-
-
-void Assembler::fst_d(Address adr) {
- InstructionMark im(this);
- emit_byte(0xDD);
- emit_operand(rdx, adr);
-}
-
-
-void Assembler::fstp_s(Address adr) {
- InstructionMark im(this);
- emit_byte(0xD9);
- emit_operand(rbx, adr);
-}
-
-
-void Assembler::fstp_d(Address adr) {
- InstructionMark im(this);
- emit_byte(0xDD);
- emit_operand(rbx, adr);
-}
-
-
-void Assembler::fstp_x(Address adr) {
- InstructionMark im(this);
- emit_byte(0xDB);
- emit_operand(rdi, adr);
-}
-
-
-void Assembler::fstp_d(int index) {
- emit_farith(0xDD, 0xD8, index);
-}
-
-
-void Assembler::fild_s(Address adr) {
- InstructionMark im(this);
- emit_byte(0xDB);
- emit_operand(rax, adr);
-}
-
-
-void Assembler::fild_d(Address adr) {
- InstructionMark im(this);
- emit_byte(0xDF);
- emit_operand(rbp, adr);
-}
-
-
-void Assembler::fistp_s(Address adr) {
- InstructionMark im(this);
- emit_byte(0xDB);
- emit_operand(rbx, adr);
-}
-
-
-void Assembler::fistp_d(Address adr) {
- InstructionMark im(this);
- emit_byte(0xDF);
- emit_operand(rdi, adr);
-}
-
-
-void Assembler::fist_s(Address adr) {
- InstructionMark im(this);
- emit_byte(0xDB);
- emit_operand(rdx, adr);
-}
-
-
-void Assembler::fabs() {
- emit_byte(0xD9);
- emit_byte(0xE1);
-}
-
-
-void Assembler::fldln2() {
- emit_byte(0xD9);
- emit_byte(0xED);
-}
-
-void Assembler::fyl2x() {
- emit_byte(0xD9);
- emit_byte(0xF1);
-}
-
-
-void Assembler::fldlg2() {
- emit_byte(0xD9);
- emit_byte(0xEC);
-}
-
-
-void Assembler::flog() {
- fldln2();
- fxch();
- fyl2x();
-}
-
-
-void Assembler::flog10() {
- fldlg2();
- fxch();
- fyl2x();
-}
-
-
-void Assembler::fsin() {
- emit_byte(0xD9);
- emit_byte(0xFE);
-}
-
-
-void Assembler::fcos() {
- emit_byte(0xD9);
- emit_byte(0xFF);
-}
-
-void Assembler::ftan() {
- emit_byte(0xD9);
- emit_byte(0xF2);
- emit_byte(0xDD);
- emit_byte(0xD8);
-}
-
-void Assembler::fsqrt() {
- emit_byte(0xD9);
- emit_byte(0xFA);
-}
-
-
-void Assembler::fchs() {
- emit_byte(0xD9);
- emit_byte(0xE0);
-}
-
-
-void Assembler::fadd_s(Address src) {
- InstructionMark im(this);
- emit_byte(0xD8);
- emit_operand(rax, src);
-}
-
-
-void Assembler::fadd_d(Address src) {
- InstructionMark im(this);
- emit_byte(0xDC);
- emit_operand(rax, src);
-}
-
-
-void Assembler::fadd(int i) {
- emit_farith(0xD8, 0xC0, i);
-}
-
-
-void Assembler::fadda(int i) {
- emit_farith(0xDC, 0xC0, i);
-}
-
-
-void Assembler::fsub_d(Address src) {
- InstructionMark im(this);
- emit_byte(0xDC);
- emit_operand(rsp, src);
-}
-
-
-void Assembler::fsub_s(Address src) {
- InstructionMark im(this);
- emit_byte(0xD8);
- emit_operand(rsp, src);
-}
-
-
-void Assembler::fsubr_s(Address src) {
- InstructionMark im(this);
- emit_byte(0xD8);
- emit_operand(rbp, src);
-}
-
-
-void Assembler::fsubr_d(Address src) {
- InstructionMark im(this);
- emit_byte(0xDC);
- emit_operand(rbp, src);
-}
-
-
-void Assembler::fmul_s(Address src) {
- InstructionMark im(this);
- emit_byte(0xD8);
- emit_operand(rcx, src);
-}
-
-
-void Assembler::fmul_d(Address src) {
- InstructionMark im(this);
- emit_byte(0xDC);
- emit_operand(rcx, src);
-}
-
-
-void Assembler::fmul(int i) {
- emit_farith(0xD8, 0xC8, i);
-}
-
-
-void Assembler::fmula(int i) {
- emit_farith(0xDC, 0xC8, i);
-}
-
-
-void Assembler::fdiv_s(Address src) {
- InstructionMark im(this);
- emit_byte(0xD8);
- emit_operand(rsi, src);
-}
-
-
-void Assembler::fdiv_d(Address src) {
- InstructionMark im(this);
- emit_byte(0xDC);
- emit_operand(rsi, src);
-}
-
-
-void Assembler::fdivr_s(Address src) {
- InstructionMark im(this);
- emit_byte(0xD8);
- emit_operand(rdi, src);
-}
-
-
-void Assembler::fdivr_d(Address src) {
- InstructionMark im(this);
- emit_byte(0xDC);
- emit_operand(rdi, src);
-}
-
-
-void Assembler::fsub(int i) {
- emit_farith(0xD8, 0xE0, i);
-}
-
-
-void Assembler::fsuba(int i) {
- emit_farith(0xDC, 0xE8, i);
-}
-
-
-void Assembler::fsubr(int i) {
- emit_farith(0xD8, 0xE8, i);
-}
-
-
-void Assembler::fsubra(int i) {
- emit_farith(0xDC, 0xE0, i);
-}
-
-
-void Assembler::fdiv(int i) {
- emit_farith(0xD8, 0xF0, i);
-}
-
-
-void Assembler::fdiva(int i) {
- emit_farith(0xDC, 0xF8, i);
-}
-
-
-void Assembler::fdivr(int i) {
- emit_farith(0xD8, 0xF8, i);
-}
-
-
-void Assembler::fdivra(int i) {
- emit_farith(0xDC, 0xF0, i);
-}
-
-
-// Note: The Intel manual (Pentium Processor User's Manual, Vol.3, 1994)
-// is erroneous for some of the floating-point instructions below.
-
-void Assembler::fdivp(int i) {
- emit_farith(0xDE, 0xF8, i); // ST(0) <- ST(0) / ST(1) and pop (Intel manual wrong)
-}
-
-
-void Assembler::fdivrp(int i) {
- emit_farith(0xDE, 0xF0, i); // ST(0) <- ST(1) / ST(0) and pop (Intel manual wrong)
-}
-
-
-void Assembler::fsubp(int i) {
- emit_farith(0xDE, 0xE8, i); // ST(0) <- ST(0) - ST(1) and pop (Intel manual wrong)
-}
-
-
-void Assembler::fsubrp(int i) {
- emit_farith(0xDE, 0xE0, i); // ST(0) <- ST(1) - ST(0) and pop (Intel manual wrong)
-}
-
-
-void Assembler::faddp(int i) {
- emit_farith(0xDE, 0xC0, i);
-}
-
-
-void Assembler::fmulp(int i) {
- emit_farith(0xDE, 0xC8, i);
-}
-
-
-void Assembler::fprem() {
- emit_byte(0xD9);
- emit_byte(0xF8);
-}
-
-
-void Assembler::fprem1() {
- emit_byte(0xD9);
- emit_byte(0xF5);
-}
-
-
-void Assembler::fxch(int i) {
- emit_farith(0xD9, 0xC8, i);
-}
-
-
-void Assembler::fincstp() {
- emit_byte(0xD9);
- emit_byte(0xF7);
-}
-
-
-void Assembler::fdecstp() {
- emit_byte(0xD9);
- emit_byte(0xF6);
-}
-
-
-void Assembler::ffree(int i) {
- emit_farith(0xDD, 0xC0, i);
-}
-
-
-void Assembler::fcomp_s(Address src) {
- InstructionMark im(this);
- emit_byte(0xD8);
- emit_operand(rbx, src);
-}
-
-
-void Assembler::fcomp_d(Address src) {
- InstructionMark im(this);
- emit_byte(0xDC);
- emit_operand(rbx, src);
-}
-
-
-void Assembler::fcom(int i) {
- emit_farith(0xD8, 0xD0, i);
-}
-
-
-void Assembler::fcomp(int i) {
- emit_farith(0xD8, 0xD8, i);
-}
-
-
-void Assembler::fcompp() {
- emit_byte(0xDE);
- emit_byte(0xD9);
-}
-
-
-void Assembler::fucomi(int i) {
- // make sure the instruction is supported (introduced for P6, together with cmov)
- guarantee(VM_Version::supports_cmov(), "illegal instruction");
- emit_farith(0xDB, 0xE8, i);
-}
-
-
-void Assembler::fucomip(int i) {
- // make sure the instruction is supported (introduced for P6, together with cmov)
- guarantee(VM_Version::supports_cmov(), "illegal instruction");
- emit_farith(0xDF, 0xE8, i);
-}
-
-
-void Assembler::ftst() {
- emit_byte(0xD9);
- emit_byte(0xE4);
-}
-
-
-void Assembler::fnstsw_ax() {
- emit_byte(0xdF);
- emit_byte(0xE0);
-}
-
-
-void Assembler::fwait() {
- emit_byte(0x9B);
-}
-
-
-void Assembler::finit() {
- emit_byte(0x9B);
- emit_byte(0xDB);
- emit_byte(0xE3);
-}
-
-
-void Assembler::fldcw(Address src) {
- InstructionMark im(this);
- emit_byte(0xd9);
- emit_operand(rbp, src);
-}
-
-
-void Assembler::fnstcw(Address src) {
- InstructionMark im(this);
- emit_byte(0x9B);
- emit_byte(0xD9);
- emit_operand(rdi, src);
-}
-
-void Assembler::fnsave(Address dst) {
- InstructionMark im(this);
- emit_byte(0xDD);
- emit_operand(rsi, dst);
-}
-
-
-void Assembler::frstor(Address src) {
- InstructionMark im(this);
- emit_byte(0xDD);
- emit_operand(rsp, src);
-}
-
-
-void Assembler::fldenv(Address src) {
- InstructionMark im(this);
- emit_byte(0xD9);
- emit_operand(rsp, src);
-}
-
-
-void Assembler::sahf() {
- emit_byte(0x9E);
-}
-
-// MMX operations
-void Assembler::emit_operand(MMXRegister reg, Address adr) {
- emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
-}
-
-void Assembler::movq( MMXRegister dst, Address src ) {
- assert( VM_Version::supports_mmx(), "" );
- emit_byte(0x0F);
- emit_byte(0x6F);
- emit_operand(dst,src);
-}
-
-void Assembler::movq( Address dst, MMXRegister src ) {
- assert( VM_Version::supports_mmx(), "" );
- emit_byte(0x0F);
- emit_byte(0x7F);
- emit_operand(src,dst);
-}
-
-void Assembler::emms() {
- emit_byte(0x0F);
- emit_byte(0x77);
-}
-
-
-
-
-// SSE and SSE2 instructions
-inline void Assembler::emit_sse_operand(XMMRegister reg, Address adr) {
- assert(((Register)reg)->encoding() == reg->encoding(), "otherwise typecast is invalid");
- emit_operand((Register)reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
-}
-inline void Assembler::emit_sse_operand(Register reg, Address adr) {
- emit_operand(reg, adr._base, adr._index, adr._scale, adr._disp, adr._rspec);
-}
-
-inline void Assembler::emit_sse_operand(XMMRegister dst, XMMRegister src) {
- emit_byte(0xC0 | dst->encoding() << 3 | src->encoding());
-}
-inline void Assembler::emit_sse_operand(XMMRegister dst, Register src) {
- emit_byte(0xC0 | dst->encoding() << 3 | src->encoding());
-}
-inline void Assembler::emit_sse_operand(Register dst, XMMRegister src) {
- emit_byte(0xC0 | dst->encoding() << 3 | src->encoding());
-}
-
-
-// Macro for creation of SSE2 instructions
-// The SSE2 instricution set is highly regular, so this macro saves
-// a lot of cut&paste
-// Each macro expansion creates two methods (same name with different
-// parameter list)
-//
-// Macro parameters:
-// * name: name of the created methods
-// * sse_version: either sse or sse2 for the assertion if instruction supported by processor
-// * prefix: first opcode byte of the instruction (or 0 if no prefix byte)
-// * opcode: last opcode byte of the instruction
-// * conversion instruction have parameters of type Register instead of XMMRegister,
-// so this can also configured with macro parameters
-#define emit_sse_instruction(name, sse_version, prefix, opcode, dst_register_type, src_register_type) \
- \
- void Assembler:: name (dst_register_type dst, Address src) { \
- assert(VM_Version::supports_##sse_version(), ""); \
- \
- InstructionMark im(this); \
- if (prefix != 0) emit_byte(prefix); \
- emit_byte(0x0F); \
- emit_byte(opcode); \
- emit_sse_operand(dst, src); \
- } \
- \
- void Assembler:: name (dst_register_type dst, src_register_type src) { \
- assert(VM_Version::supports_##sse_version(), ""); \
- \
- if (prefix != 0) emit_byte(prefix); \
- emit_byte(0x0F); \
- emit_byte(opcode); \
- emit_sse_operand(dst, src); \
- } \
-
-emit_sse_instruction(addss, sse, 0xF3, 0x58, XMMRegister, XMMRegister);
-emit_sse_instruction(addsd, sse2, 0xF2, 0x58, XMMRegister, XMMRegister)
-emit_sse_instruction(subss, sse, 0xF3, 0x5C, XMMRegister, XMMRegister)
-emit_sse_instruction(subsd, sse2, 0xF2, 0x5C, XMMRegister, XMMRegister)
-emit_sse_instruction(mulss, sse, 0xF3, 0x59, XMMRegister, XMMRegister)
-emit_sse_instruction(mulsd, sse2, 0xF2, 0x59, XMMRegister, XMMRegister)
-emit_sse_instruction(divss, sse, 0xF3, 0x5E, XMMRegister, XMMRegister)
-emit_sse_instruction(divsd, sse2, 0xF2, 0x5E, XMMRegister, XMMRegister)
-emit_sse_instruction(sqrtss, sse, 0xF3, 0x51, XMMRegister, XMMRegister)
-emit_sse_instruction(sqrtsd, sse2, 0xF2, 0x51, XMMRegister, XMMRegister)
-
-emit_sse_instruction(pxor, sse2, 0x66, 0xEF, XMMRegister, XMMRegister)
-
-emit_sse_instruction(comiss, sse, 0, 0x2F, XMMRegister, XMMRegister)
-emit_sse_instruction(comisd, sse2, 0x66, 0x2F, XMMRegister, XMMRegister)
-emit_sse_instruction(ucomiss, sse, 0, 0x2E, XMMRegister, XMMRegister)
-emit_sse_instruction(ucomisd, sse2, 0x66, 0x2E, XMMRegister, XMMRegister)
-
-emit_sse_instruction(cvtss2sd, sse2, 0xF3, 0x5A, XMMRegister, XMMRegister);
-emit_sse_instruction(cvtsd2ss, sse2, 0xF2, 0x5A, XMMRegister, XMMRegister)
-emit_sse_instruction(cvtsi2ss, sse, 0xF3, 0x2A, XMMRegister, Register);
-emit_sse_instruction(cvtsi2sd, sse2, 0xF2, 0x2A, XMMRegister, Register)
-emit_sse_instruction(cvtss2si, sse, 0xF3, 0x2D, Register, XMMRegister);
-emit_sse_instruction(cvtsd2si, sse2, 0xF2, 0x2D, Register, XMMRegister)
-emit_sse_instruction(cvttss2si, sse, 0xF3, 0x2C, Register, XMMRegister);
-emit_sse_instruction(cvttsd2si, sse2, 0xF2, 0x2C, Register, XMMRegister)
-
-emit_sse_instruction(movss, sse, 0xF3, 0x10, XMMRegister, XMMRegister)
-emit_sse_instruction(movsd, sse2, 0xF2, 0x10, XMMRegister, XMMRegister)
-
-emit_sse_instruction(movq, sse2, 0xF3, 0x7E, XMMRegister, XMMRegister);
-emit_sse_instruction(movd, sse2, 0x66, 0x6E, XMMRegister, Register);
-emit_sse_instruction(movdqa, sse2, 0x66, 0x6F, XMMRegister, XMMRegister);
-
-emit_sse_instruction(punpcklbw, sse2, 0x66, 0x60, XMMRegister, XMMRegister);
-
-
-// Instruction not covered by macro
-void Assembler::movq(Address dst, XMMRegister src) {
- assert(VM_Version::supports_sse2(), "");
-
- InstructionMark im(this);
- emit_byte(0x66);
- emit_byte(0x0F);
- emit_byte(0xD6);
- emit_sse_operand(src, dst);
-}
-
-void Assembler::movd(Address dst, XMMRegister src) {
- assert(VM_Version::supports_sse2(), "");
-
- InstructionMark im(this);
- emit_byte(0x66);
- emit_byte(0x0F);
- emit_byte(0x7E);
- emit_sse_operand(src, dst);
-}
-
-void Assembler::movd(Register dst, XMMRegister src) {
- assert(VM_Version::supports_sse2(), "");
-
- emit_byte(0x66);
- emit_byte(0x0F);
- emit_byte(0x7E);
- emit_sse_operand(src, dst);
-}
-
-void Assembler::movdqa(Address dst, XMMRegister src) {
- assert(VM_Version::supports_sse2(), "");
-
- InstructionMark im(this);
- emit_byte(0x66);
- emit_byte(0x0F);
- emit_byte(0x7F);
- emit_sse_operand(src, dst);
-}
-
-void Assembler::pshufd(XMMRegister dst, XMMRegister src, int mode) {
- assert(isByte(mode), "invalid value");
- assert(VM_Version::supports_sse2(), "");
-
- emit_byte(0x66);
- emit_byte(0x0F);
- emit_byte(0x70);
- emit_sse_operand(dst, src);
- emit_byte(mode & 0xFF);
-}
-
-void Assembler::pshufd(XMMRegister dst, Address src, int mode) {
- assert(isByte(mode), "invalid value");
- assert(VM_Version::supports_sse2(), "");
-
- InstructionMark im(this);
- emit_byte(0x66);
- emit_byte(0x0F);
- emit_byte(0x70);
- emit_sse_operand(dst, src);
- emit_byte(mode & 0xFF);
-}
-
-void Assembler::pshuflw(XMMRegister dst, XMMRegister src, int mode) {
- assert(isByte(mode), "invalid value");
- assert(VM_Version::supports_sse2(), "");
-
- emit_byte(0xF2);
- emit_byte(0x0F);
- emit_byte(0x70);
- emit_sse_operand(dst, src);
- emit_byte(mode & 0xFF);
-}
-
-void Assembler::pshuflw(XMMRegister dst, Address src, int mode) {
- assert(isByte(mode), "invalid value");
- assert(VM_Version::supports_sse2(), "");
-
- InstructionMark im(this);
- emit_byte(0xF2);
- emit_byte(0x0F);
- emit_byte(0x70);
- emit_sse_operand(dst, src);
- emit_byte(mode & 0xFF);
-}
-
-void Assembler::psrlq(XMMRegister dst, int shift) {
- assert(VM_Version::supports_sse2(), "");
-
- emit_byte(0x66);
- emit_byte(0x0F);
- emit_byte(0x73);
- emit_sse_operand(xmm2, dst);
- emit_byte(shift);
-}
-
-void Assembler::movss( Address dst, XMMRegister src ) {
- assert(VM_Version::supports_sse(), "");
-
- InstructionMark im(this);
- emit_byte(0xF3); // single
- emit_byte(0x0F);
- emit_byte(0x11); // store
- emit_sse_operand(src, dst);
-}
-
-void Assembler::movsd( Address dst, XMMRegister src ) {
- assert(VM_Version::supports_sse2(), "");
-
- InstructionMark im(this);
- emit_byte(0xF2); // double
- emit_byte(0x0F);
- emit_byte(0x11); // store
- emit_sse_operand(src,dst);
-}
-
-// New cpus require to use movaps and movapd to avoid partial register stall
-// when moving between registers.
-void Assembler::movaps(XMMRegister dst, XMMRegister src) {
- assert(VM_Version::supports_sse(), "");
-
- emit_byte(0x0F);
- emit_byte(0x28);
- emit_sse_operand(dst, src);
-}
-void Assembler::movapd(XMMRegister dst, XMMRegister src) {
- assert(VM_Version::supports_sse2(), "");
-
- emit_byte(0x66);
- emit_byte(0x0F);
- emit_byte(0x28);
- emit_sse_operand(dst, src);
-}
-
-// New cpus require to use movsd and movss to avoid partial register stall
-// when loading from memory. But for old Opteron use movlpd instead of movsd.
-// The selection is done in MacroAssembler::movdbl() and movflt().
-void Assembler::movlpd(XMMRegister dst, Address src) {
- assert(VM_Version::supports_sse(), "");
-
- InstructionMark im(this);
- emit_byte(0x66);
- emit_byte(0x0F);
- emit_byte(0x12);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::cvtdq2pd(XMMRegister dst, XMMRegister src) {
- assert(VM_Version::supports_sse2(), "");
-
- emit_byte(0xF3);
- emit_byte(0x0F);
- emit_byte(0xE6);
- emit_sse_operand(dst, src);
-}
-
-void Assembler::cvtdq2ps(XMMRegister dst, XMMRegister src) {
- assert(VM_Version::supports_sse2(), "");
-
- emit_byte(0x0F);
- emit_byte(0x5B);
- emit_sse_operand(dst, src);
-}
-
-emit_sse_instruction(andps, sse, 0, 0x54, XMMRegister, XMMRegister);
-emit_sse_instruction(andpd, sse2, 0x66, 0x54, XMMRegister, XMMRegister);
-emit_sse_instruction(andnps, sse, 0, 0x55, XMMRegister, XMMRegister);
-emit_sse_instruction(andnpd, sse2, 0x66, 0x55, XMMRegister, XMMRegister);
-emit_sse_instruction(orps, sse, 0, 0x56, XMMRegister, XMMRegister);
-emit_sse_instruction(orpd, sse2, 0x66, 0x56, XMMRegister, XMMRegister);
-emit_sse_instruction(xorps, sse, 0, 0x57, XMMRegister, XMMRegister);
-emit_sse_instruction(xorpd, sse2, 0x66, 0x57, XMMRegister, XMMRegister);
-
-
-void Assembler::ldmxcsr( Address src) {
- InstructionMark im(this);
- emit_byte(0x0F);
- emit_byte(0xAE);
- emit_operand(rdx /* 2 */, src);
-}
-
-void Assembler::stmxcsr( Address dst) {
- InstructionMark im(this);
- emit_byte(0x0F);
- emit_byte(0xAE);
- emit_operand(rbx /* 3 */, dst);
-}
-
-// Implementation of MacroAssembler
-
-Address MacroAssembler::as_Address(AddressLiteral adr) {
- // amd64 always does this as a pc-rel
- // we can be absolute or disp based on the instruction type
- // jmp/call are displacements others are absolute
- assert(!adr.is_lval(), "must be rval");
-
- return Address(adr.target(), adr.rspec());
-}
-
-Address MacroAssembler::as_Address(ArrayAddress adr) {
- return Address::make_array(adr);
-}
-
-void MacroAssembler::fat_nop() {
- // A 5 byte nop that is safe for patching (see patch_verified_entry)
- emit_byte(0x26); // es:
- emit_byte(0x2e); // cs:
- emit_byte(0x64); // fs:
- emit_byte(0x65); // gs:
- emit_byte(0x90);
-}
-
-// 32bit can do a case table jump in one instruction but we no longer allow the base
-// to be installed in the Address class
-void MacroAssembler::jump(ArrayAddress entry) {
- jmp(as_Address(entry));
-}
-
-void MacroAssembler::jump(AddressLiteral dst) {
- jmp_literal(dst.target(), dst.rspec());
-}
-
-void MacroAssembler::jump_cc(Condition cc, AddressLiteral dst) {
- assert((0 <= cc) && (cc < 16), "illegal cc");
-
- InstructionMark im(this);
-
- relocInfo::relocType rtype = dst.reloc();
- relocate(rtype);
- const int short_size = 2;
- const int long_size = 6;
- int offs = (int)dst.target() - ((int)_code_pos);
- if (rtype == relocInfo::none && is8bit(offs - short_size)) {
- // 0111 tttn #8-bit disp
- emit_byte(0x70 | cc);
- emit_byte((offs - short_size) & 0xFF);
- } else {
- // 0000 1111 1000 tttn #32-bit disp
- emit_byte(0x0F);
- emit_byte(0x80 | cc);
- emit_long(offs - long_size);
- }
-}
-
-// Calls
-void MacroAssembler::call(Label& L, relocInfo::relocType rtype) {
- Assembler::call(L, rtype);
-}
-
-void MacroAssembler::call(Register entry) {
- Assembler::call(entry);
-}
-
-void MacroAssembler::call(AddressLiteral entry) {
- Assembler::call_literal(entry.target(), entry.rspec());
-}
-
-
-void MacroAssembler::cmp8(AddressLiteral src1, int8_t imm) {
- Assembler::cmpb(as_Address(src1), imm);
-}
-
-void MacroAssembler::cmp32(AddressLiteral src1, int32_t imm) {
- Assembler::cmpl(as_Address(src1), imm);
-}
-
-void MacroAssembler::cmp32(Register src1, AddressLiteral src2) {
- if (src2.is_lval()) {
- cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
- } else {
- Assembler::cmpl(src1, as_Address(src2));
- }
-}
-
-void MacroAssembler::cmp32(Register src1, int32_t imm) {
- Assembler::cmpl(src1, imm);
-}
-
-void MacroAssembler::cmp32(Register src1, Address src2) {
- Assembler::cmpl(src1, src2);
-}
-
-void MacroAssembler::cmpoop(Address src1, jobject obj) {
- cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
-}
-
-void MacroAssembler::cmpoop(Register src1, jobject obj) {
- cmp_literal32(src1, (int32_t)obj, oop_Relocation::spec_for_immediate());
-}
-
-void MacroAssembler::cmpptr(Register src1, AddressLiteral src2) {
- if (src2.is_lval()) {
- // compare the effect address of src2 to src1
- cmp_literal32(src1, (int32_t)src2.target(), src2.rspec());
- } else {
- Assembler::cmpl(src1, as_Address(src2));
- }
-}
-
-void MacroAssembler::cmpptr(Address src1, AddressLiteral src2) {
- assert(src2.is_lval(), "not a mem-mem compare");
- cmp_literal32(src1, (int32_t) src2.target(), src2.rspec());
-}
-
-
-void MacroAssembler::cmpxchgptr(Register reg, AddressLiteral adr) {
- cmpxchg(reg, as_Address(adr));
-}
-
-void MacroAssembler::increment(AddressLiteral dst) {
- increment(as_Address(dst));
-}
-
-void MacroAssembler::increment(ArrayAddress dst) {
- increment(as_Address(dst));
-}
-
-void MacroAssembler::lea(Register dst, AddressLiteral adr) {
- // leal(dst, as_Address(adr));
- // see note in movl as to why we musr use a move
- mov_literal32(dst, (int32_t) adr.target(), adr.rspec());
-}
-
-void MacroAssembler::lea(Address dst, AddressLiteral adr) {
- // leal(dst, as_Address(adr));
- // see note in movl as to why we musr use a move
- mov_literal32(dst, (int32_t) adr.target(), adr.rspec());
-}
-
-void MacroAssembler::mov32(AddressLiteral dst, Register src) {
- Assembler::movl(as_Address(dst), src);
-}
-
-void MacroAssembler::mov32(Register dst, AddressLiteral src) {
- Assembler::movl(dst, as_Address(src));
-}
-
-void MacroAssembler::movbyte(ArrayAddress dst, int src) {
- movb(as_Address(dst), src);
-}
-
-void MacroAssembler::movoop(Address dst, jobject obj) {
- mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
-}
-
-void MacroAssembler::movoop(Register dst, jobject obj) {
- mov_literal32(dst, (int32_t)obj, oop_Relocation::spec_for_immediate());
-}
-
-void MacroAssembler::movptr(Register dst, AddressLiteral src) {
- if (src.is_lval()) {
- // essentially an lea
- mov_literal32(dst, (int32_t) src.target(), src.rspec());
- } else {
- // mov 32bits from an absolute address
- movl(dst, as_Address(src));
- }
-}
-
-void MacroAssembler::movptr(ArrayAddress dst, Register src) {
- movl(as_Address(dst), src);
-}
-
-void MacroAssembler::movptr(Register dst, ArrayAddress src) {
- movl(dst, as_Address(src));
-}
-
-void MacroAssembler::movflt(XMMRegister dst, AddressLiteral src) {
- movss(dst, as_Address(src));
-}
-
-void MacroAssembler::movdbl(XMMRegister dst, AddressLiteral src) {
- if (UseXmmLoadAndClearUpper) { movsd (dst, as_Address(src)); return; }
- else { movlpd(dst, as_Address(src)); return; }
-}
-
-void Assembler::pushoop(jobject obj) {
- push_literal32((int32_t)obj, oop_Relocation::spec_for_immediate());
-}
-
-
-void MacroAssembler::pushptr(AddressLiteral src) {
- if (src.is_lval()) {
- push_literal32((int32_t)src.target(), src.rspec());
- } else {
- pushl(as_Address(src));
- }
-}
-
-void MacroAssembler::test32(Register src1, AddressLiteral src2) {
- // src2 must be rval
- testl(src1, as_Address(src2));
-}
-
-// FPU
-
-void MacroAssembler::fld_x(AddressLiteral src) {
- Assembler::fld_x(as_Address(src));
-}
-
-void MacroAssembler::fld_d(AddressLiteral src) {
- fld_d(as_Address(src));
-}
-
-void MacroAssembler::fld_s(AddressLiteral src) {
- fld_s(as_Address(src));
-}
-
-void MacroAssembler::fldcw(AddressLiteral src) {
- Assembler::fldcw(as_Address(src));
-}
-
-void MacroAssembler::ldmxcsr(AddressLiteral src) {
- Assembler::ldmxcsr(as_Address(src));
-}
-
-// SSE
-
-void MacroAssembler::andpd(XMMRegister dst, AddressLiteral src) {
- andpd(dst, as_Address(src));
-}
-
-void MacroAssembler::comisd(XMMRegister dst, AddressLiteral src) {
- comisd(dst, as_Address(src));
-}
-
-void MacroAssembler::comiss(XMMRegister dst, AddressLiteral src) {
- comiss(dst, as_Address(src));
-}
-
-void MacroAssembler::movsd(XMMRegister dst, AddressLiteral src) {
- movsd(dst, as_Address(src));
-}
-
-void MacroAssembler::movss(XMMRegister dst, AddressLiteral src) {
- movss(dst, as_Address(src));
-}
-
-void MacroAssembler::xorpd(XMMRegister dst, AddressLiteral src) {
- xorpd(dst, as_Address(src));
-}
-
-void MacroAssembler::xorps(XMMRegister dst, AddressLiteral src) {
- xorps(dst, as_Address(src));
-}
-
-void MacroAssembler::ucomisd(XMMRegister dst, AddressLiteral src) {
- ucomisd(dst, as_Address(src));
-}
-
-void MacroAssembler::ucomiss(XMMRegister dst, AddressLiteral src) {
- ucomiss(dst, as_Address(src));
-}
-
-void MacroAssembler::null_check(Register reg, int offset) {
- if (needs_explicit_null_check(offset)) {
- // provoke OS NULL exception if reg = NULL by
- // accessing M[reg] w/o changing any (non-CC) registers
- cmpl(rax, Address(reg, 0));
- // Note: should probably use testl(rax, Address(reg, 0));
- // may be shorter code (however, this version of
- // testl needs to be implemented first)
- } else {
- // nothing to do, (later) access of M[reg + offset]
- // will provoke OS NULL exception if reg = NULL
- }
-}
-
-
-int MacroAssembler::load_unsigned_byte(Register dst, Address src) {
- // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
- // and "3.9 Partial Register Penalties", p. 22).
- int off;
- if (VM_Version::is_P6() || src.uses(dst)) {
- off = offset();
- movzxb(dst, src);
- } else {
- xorl(dst, dst);
- off = offset();
- movb(dst, src);
- }
- return off;
-}
-
-
-int MacroAssembler::load_unsigned_word(Register dst, Address src) {
- // According to Intel Doc. AP-526, "Zero-Extension of Short", p.16,
- // and "3.9 Partial Register Penalties", p. 22).
- int off;
- if (VM_Version::is_P6() || src.uses(dst)) {
- off = offset();
- movzxw(dst, src);
- } else {
- xorl(dst, dst);
- off = offset();
- movw(dst, src);
- }
- return off;
-}
-
-
-int MacroAssembler::load_signed_byte(Register dst, Address src) {
- int off;
- if (VM_Version::is_P6()) {
- off = offset();
- movsxb(dst, src);
- } else {
- off = load_unsigned_byte(dst, src);
- shll(dst, 24);
- sarl(dst, 24);
- }
- return off;
-}
-
-
-int MacroAssembler::load_signed_word(Register dst, Address src) {
- int off;
- if (VM_Version::is_P6()) {
- off = offset();
- movsxw(dst, src);
- } else {
- off = load_unsigned_word(dst, src);
- shll(dst, 16);
- sarl(dst, 16);
- }
- return off;
-}
-
-
-void MacroAssembler::extend_sign(Register hi, Register lo) {
- // According to Intel Doc. AP-526, "Integer Divide", p.18.
- if (VM_Version::is_P6() && hi == rdx && lo == rax) {
- cdql();
- } else {
- movl(hi, lo);
- sarl(hi, 31);
- }
-}
-
-
-void MacroAssembler::increment(Register reg, int value) {
- if (value == min_jint) {addl(reg, value); return; }
- if (value < 0) { decrement(reg, -value); return; }
- if (value == 0) { ; return; }
- if (value == 1 && UseIncDec) { incl(reg); return; }
- /* else */ { addl(reg, value) ; return; }
-}
-
-void MacroAssembler::increment(Address dst, int value) {
- if (value == min_jint) {addl(dst, value); return; }
- if (value < 0) { decrement(dst, -value); return; }
- if (value == 0) { ; return; }
- if (value == 1 && UseIncDec) { incl(dst); return; }
- /* else */ { addl(dst, value) ; return; }
-}
-
-void MacroAssembler::decrement(Register reg, int value) {
- if (value == min_jint) {subl(reg, value); return; }
- if (value < 0) { increment(reg, -value); return; }
- if (value == 0) { ; return; }
- if (value == 1 && UseIncDec) { decl(reg); return; }
- /* else */ { subl(reg, value) ; return; }
-}
-
-void MacroAssembler::decrement(Address dst, int value) {
- if (value == min_jint) {subl(dst, value); return; }
- if (value < 0) { increment(dst, -value); return; }
- if (value == 0) { ; return; }
- if (value == 1 && UseIncDec) { decl(dst); return; }
- /* else */ { subl(dst, value) ; return; }
-}
-
-void MacroAssembler::align(int modulus) {
- if (offset() % modulus != 0) nop(modulus - (offset() % modulus));
-}
-
-
-void MacroAssembler::enter() {
- pushl(rbp);
- movl(rbp, rsp);
-}
-
-
-void MacroAssembler::leave() {
- movl(rsp, rbp);
- popl(rbp);
-}
-
-void MacroAssembler::set_last_Java_frame(Register java_thread,
- Register last_java_sp,
- Register last_java_fp,
- address last_java_pc) {
- // determine java_thread register
- if (!java_thread->is_valid()) {
- java_thread = rdi;
- get_thread(java_thread);
- }
- // determine last_java_sp register
- if (!last_java_sp->is_valid()) {
- last_java_sp = rsp;
- }
-
- // last_java_fp is optional
-
- if (last_java_fp->is_valid()) {
- movl(Address(java_thread, JavaThread::last_Java_fp_offset()), last_java_fp);
- }
-
- // last_java_pc is optional
-
- if (last_java_pc != NULL) {
- lea(Address(java_thread,
- JavaThread::frame_anchor_offset() + JavaFrameAnchor::last_Java_pc_offset()),
- InternalAddress(last_java_pc));
-
- }
- movl(Address(java_thread, JavaThread::last_Java_sp_offset()), last_java_sp);
-}
-
-void MacroAssembler::reset_last_Java_frame(Register java_thread, bool clear_fp, bool clear_pc) {
- // determine java_thread register
- if (!java_thread->is_valid()) {
- java_thread = rdi;
- get_thread(java_thread);
- }
- // we must set sp to zero to clear frame
- movl(Address(java_thread, JavaThread::last_Java_sp_offset()), 0);
- if (clear_fp) {
- movl(Address(java_thread, JavaThread::last_Java_fp_offset()), 0);
- }
-
- if (clear_pc)
- movl(Address(java_thread, JavaThread::last_Java_pc_offset()), 0);
-
-}
-
-
-
-// Implementation of call_VM versions
-
-void MacroAssembler::call_VM_leaf_base(
- address entry_point,
- int number_of_arguments
-) {
- call(RuntimeAddress(entry_point));
- increment(rsp, number_of_arguments * wordSize);
-}
-
-
-void MacroAssembler::call_VM_base(
- Register oop_result,
- Register java_thread,
- Register last_java_sp,
- address entry_point,
- int number_of_arguments,
- bool check_exceptions
-) {
- // determine java_thread register
- if (!java_thread->is_valid()) {
- java_thread = rdi;
- get_thread(java_thread);
- }
- // determine last_java_sp register
- if (!last_java_sp->is_valid()) {
- last_java_sp = rsp;
- }
- // debugging support
- assert(number_of_arguments >= 0 , "cannot have negative number of arguments");
- assert(java_thread != oop_result , "cannot use the same register for java_thread & oop_result");
- assert(java_thread != last_java_sp, "cannot use the same register for java_thread & last_java_sp");
- // push java thread (becomes first argument of C function)
- pushl(java_thread);
- // set last Java frame before call
- assert(last_java_sp != rbp, "this code doesn't work for last_java_sp == rbp, which currently can't portably work anyway since C2 doesn't save rbp,");
- // Only interpreter should have to set fp
- set_last_Java_frame(java_thread, last_java_sp, rbp, NULL);
- // do the call
- call(RuntimeAddress(entry_point));
- // restore the thread (cannot use the pushed argument since arguments
- // may be overwritten by C code generated by an optimizing compiler);
- // however can use the register value directly if it is callee saved.
- if (java_thread == rdi || java_thread == rsi) {
- // rdi & rsi are callee saved -> nothing to do
-#ifdef ASSERT
- guarantee(java_thread != rax, "change this code");
- pushl(rax);
- { Label L;
- get_thread(rax);
- cmpl(java_thread, rax);
- jcc(Assembler::equal, L);
- stop("MacroAssembler::call_VM_base: rdi not callee saved?");
- bind(L);
- }
- popl(rax);
-#endif
- } else {
- get_thread(java_thread);
- }
- // reset last Java frame
- // Only interpreter should have to clear fp
- reset_last_Java_frame(java_thread, true, false);
- // discard thread and arguments
- addl(rsp, (1 + number_of_arguments)*wordSize);
-
-#ifndef CC_INTERP
- // C++ interp handles this in the interpreter
- check_and_handle_popframe(java_thread);
- check_and_handle_earlyret(java_thread);
-#endif /* CC_INTERP */
-
- if (check_exceptions) {
- // check for pending exceptions (java_thread is set upon return)
- cmpl(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD);
- jump_cc(Assembler::notEqual,
- RuntimeAddress(StubRoutines::forward_exception_entry()));
- }
-
- // get oop result if there is one and reset the value in the thread
- if (oop_result->is_valid()) {
- movl(oop_result, Address(java_thread, JavaThread::vm_result_offset()));
- movl(Address(java_thread, JavaThread::vm_result_offset()), NULL_WORD);
- verify_oop(oop_result);
- }
-}
-
-
-void MacroAssembler::check_and_handle_popframe(Register java_thread) {
-}
-
-void MacroAssembler::check_and_handle_earlyret(Register java_thread) {
-}
-
-void MacroAssembler::call_VM_helper(Register oop_result, address entry_point, int number_of_arguments, bool check_exceptions) {
- leal(rax, Address(rsp, (1 + number_of_arguments) * wordSize));
- call_VM_base(oop_result, noreg, rax, entry_point, number_of_arguments, check_exceptions);
-}
-
-
-void MacroAssembler::call_VM(Register oop_result, address entry_point, bool check_exceptions) {
- Label C, E;
- call(C, relocInfo::none);
- jmp(E);
-
- bind(C);
- call_VM_helper(oop_result, entry_point, 0, check_exceptions);
- ret(0);
-
- bind(E);
-}
-
-
-void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, bool check_exceptions) {
- Label C, E;
- call(C, relocInfo::none);
- jmp(E);
-
- bind(C);
- pushl(arg_1);
- call_VM_helper(oop_result, entry_point, 1, check_exceptions);
- ret(0);
-
- bind(E);
-}
-
-
-void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
- Label C, E;
- call(C, relocInfo::none);
- jmp(E);
-
- bind(C);
- pushl(arg_2);
- pushl(arg_1);
- call_VM_helper(oop_result, entry_point, 2, check_exceptions);
- ret(0);
-
- bind(E);
-}
-
-
-void MacroAssembler::call_VM(Register oop_result, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
- Label C, E;
- call(C, relocInfo::none);
- jmp(E);
-
- bind(C);
- pushl(arg_3);
- pushl(arg_2);
- pushl(arg_1);
- call_VM_helper(oop_result, entry_point, 3, check_exceptions);
- ret(0);
-
- bind(E);
-}
-
-
-void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, int number_of_arguments, bool check_exceptions) {
- call_VM_base(oop_result, noreg, last_java_sp, entry_point, number_of_arguments, check_exceptions);
-}
-
-
-void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, bool check_exceptions) {
- pushl(arg_1);
- call_VM(oop_result, last_java_sp, entry_point, 1, check_exceptions);
-}
-
-
-void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, bool check_exceptions) {
- pushl(arg_2);
- pushl(arg_1);
- call_VM(oop_result, last_java_sp, entry_point, 2, check_exceptions);
-}
-
-
-void MacroAssembler::call_VM(Register oop_result, Register last_java_sp, address entry_point, Register arg_1, Register arg_2, Register arg_3, bool check_exceptions) {
- pushl(arg_3);
- pushl(arg_2);
- pushl(arg_1);
- call_VM(oop_result, last_java_sp, entry_point, 3, check_exceptions);
-}
-
-
-void MacroAssembler::call_VM_leaf(address entry_point, int number_of_arguments) {
- call_VM_leaf_base(entry_point, number_of_arguments);
-}
-
-
-void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1) {
- pushl(arg_1);
- call_VM_leaf(entry_point, 1);
-}
-
-
-void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
- pushl(arg_2);
- pushl(arg_1);
- call_VM_leaf(entry_point, 2);
-}
-
-
-void MacroAssembler::call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
- pushl(arg_3);
- pushl(arg_2);
- pushl(arg_1);
- call_VM_leaf(entry_point, 3);
-}
-
-
-// Calls to C land
-//
-// When entering C land, the rbp, & rsp of the last Java frame have to be recorded
-// in the (thread-local) JavaThread object. When leaving C land, the last Java fp
-// has to be reset to 0. This is required to allow proper stack traversal.
-
-void MacroAssembler::store_check(Register obj) {
- // Does a store check for the oop in register obj. The content of
- // register obj is destroyed afterwards.
- store_check_part_1(obj);
- store_check_part_2(obj);
-}
-
-
-void MacroAssembler::store_check(Register obj, Address dst) {
- store_check(obj);
-}
-
-
-// split the store check operation so that other instructions can be scheduled inbetween
-void MacroAssembler::store_check_part_1(Register obj) {
- BarrierSet* bs = Universe::heap()->barrier_set();
- assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
- shrl(obj, CardTableModRefBS::card_shift);
-}
-
-
-void MacroAssembler::store_check_part_2(Register obj) {
- BarrierSet* bs = Universe::heap()->barrier_set();
- assert(bs->kind() == BarrierSet::CardTableModRef, "Wrong barrier set kind");
- CardTableModRefBS* ct = (CardTableModRefBS*)bs;
- assert(sizeof(*ct->byte_map_base) == sizeof(jbyte), "adjust this code");
-
- // The calculation for byte_map_base is as follows:
- // byte_map_base = _byte_map - (uintptr_t(low_bound) >> card_shift);
- // So this essentially converts an address to a displacement and
- // it will never need to be relocated. On 64bit however the value may be too
- // large for a 32bit displacement
-
- intptr_t disp = (intptr_t) ct->byte_map_base;
- Address cardtable(noreg, obj, Address::times_1, disp);
- movb(cardtable, 0);
-}
-
-
-void MacroAssembler::c2bool(Register x) {
- // implements x == 0 ? 0 : 1
- // note: must only look at least-significant byte of x
- // since C-style booleans are stored in one byte
- // only! (was bug)
- andl(x, 0xFF);
- setb(Assembler::notZero, x);
-}
-
-
-int MacroAssembler::corrected_idivl(Register reg) {
- // Full implementation of Java idiv and irem; checks for
- // special case as described in JVM spec., p.243 & p.271.
- // The function returns the (pc) offset of the idivl
- // instruction - may be needed for implicit exceptions.
- //
- // normal case special case
- //
- // input : rax,: dividend min_int
- // reg: divisor (may not be rax,/rdx) -1
- //
- // output: rax,: quotient (= rax, idiv reg) min_int
- // rdx: remainder (= rax, irem reg) 0
- assert(reg != rax && reg != rdx, "reg cannot be rax, or rdx register");
- const int min_int = 0x80000000;
- Label normal_case, special_case;
-
- // check for special case
- cmpl(rax, min_int);
- jcc(Assembler::notEqual, normal_case);
- xorl(rdx, rdx); // prepare rdx for possible special case (where remainder = 0)
- cmpl(reg, -1);
- jcc(Assembler::equal, special_case);
-
- // handle normal case
- bind(normal_case);
- cdql();
- int idivl_offset = offset();
- idivl(reg);
-
- // normal and special case exit
- bind(special_case);
-
- return idivl_offset;
-}
-
-
-void MacroAssembler::lneg(Register hi, Register lo) {
- negl(lo);
- adcl(hi, 0);
- negl(hi);
-}
-
-
-void MacroAssembler::lmul(int x_rsp_offset, int y_rsp_offset) {
- // Multiplication of two Java long values stored on the stack
- // as illustrated below. Result is in rdx:rax.
- //
- // rsp ---> [ ?? ] \ \
- // .... | y_rsp_offset |
- // [ y_lo ] / (in bytes) | x_rsp_offset
- // [ y_hi ] | (in bytes)
- // .... |
- // [ x_lo ] /
- // [ x_hi ]
- // ....
- //
- // Basic idea: lo(result) = lo(x_lo * y_lo)
- // hi(result) = hi(x_lo * y_lo) + lo(x_hi * y_lo) + lo(x_lo * y_hi)
- Address x_hi(rsp, x_rsp_offset + wordSize); Address x_lo(rsp, x_rsp_offset);
- Address y_hi(rsp, y_rsp_offset + wordSize); Address y_lo(rsp, y_rsp_offset);
- Label quick;
- // load x_hi, y_hi and check if quick
- // multiplication is possible
- movl(rbx, x_hi);
- movl(rcx, y_hi);
- movl(rax, rbx);
- orl(rbx, rcx); // rbx, = 0 <=> x_hi = 0 and y_hi = 0
- jcc(Assembler::zero, quick); // if rbx, = 0 do quick multiply
- // do full multiplication
- // 1st step
- mull(y_lo); // x_hi * y_lo
- movl(rbx, rax); // save lo(x_hi * y_lo) in rbx,
- // 2nd step
- movl(rax, x_lo);
- mull(rcx); // x_lo * y_hi
- addl(rbx, rax); // add lo(x_lo * y_hi) to rbx,
- // 3rd step
- bind(quick); // note: rbx, = 0 if quick multiply!
- movl(rax, x_lo);
- mull(y_lo); // x_lo * y_lo
- addl(rdx, rbx); // correct hi(x_lo * y_lo)
-}
-
-
-void MacroAssembler::lshl(Register hi, Register lo) {
- // Java shift left long support (semantics as described in JVM spec., p.305)
- // (basic idea for shift counts s >= n: x << s == (x << n) << (s - n))
- // shift value is in rcx !
- assert(hi != rcx, "must not use rcx");
- assert(lo != rcx, "must not use rcx");
- const Register s = rcx; // shift count
- const int n = BitsPerWord;
- Label L;
- andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
- cmpl(s, n); // if (s < n)
- jcc(Assembler::less, L); // else (s >= n)
- movl(hi, lo); // x := x << n
- xorl(lo, lo);
- // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
- bind(L); // s (mod n) < n
- shldl(hi, lo); // x := x << s
- shll(lo);
-}
-
-
-void MacroAssembler::lshr(Register hi, Register lo, bool sign_extension) {
- // Java shift right long support (semantics as described in JVM spec., p.306 & p.310)
- // (basic idea for shift counts s >= n: x >> s == (x >> n) >> (s - n))
- assert(hi != rcx, "must not use rcx");
- assert(lo != rcx, "must not use rcx");
- const Register s = rcx; // shift count
- const int n = BitsPerWord;
- Label L;
- andl(s, 0x3f); // s := s & 0x3f (s < 0x40)
- cmpl(s, n); // if (s < n)
- jcc(Assembler::less, L); // else (s >= n)
- movl(lo, hi); // x := x >> n
- if (sign_extension) sarl(hi, 31);
- else xorl(hi, hi);
- // Note: subl(s, n) is not needed since the Intel shift instructions work rcx mod n!
- bind(L); // s (mod n) < n
- shrdl(lo, hi); // x := x >> s
- if (sign_extension) sarl(hi);
- else shrl(hi);
-}
-
-
-// Note: y_lo will be destroyed
-void MacroAssembler::lcmp2int(Register x_hi, Register x_lo, Register y_hi, Register y_lo) {
- // Long compare for Java (semantics as described in JVM spec.)
- Label high, low, done;
-
- cmpl(x_hi, y_hi);
- jcc(Assembler::less, low);
- jcc(Assembler::greater, high);
- // x_hi is the return register
- xorl(x_hi, x_hi);
- cmpl(x_lo, y_lo);
- jcc(Assembler::below, low);
- jcc(Assembler::equal, done);
-
- bind(high);
- xorl(x_hi, x_hi);
- increment(x_hi);
- jmp(done);
-
- bind(low);
- xorl(x_hi, x_hi);
- decrement(x_hi);
-
- bind(done);
-}
-
-
-void MacroAssembler::save_rax(Register tmp) {
- if (tmp == noreg) pushl(rax);
- else if (tmp != rax) movl(tmp, rax);
-}
-
-
-void MacroAssembler::restore_rax(Register tmp) {
- if (tmp == noreg) popl(rax);
- else if (tmp != rax) movl(rax, tmp);
-}
-
-
-void MacroAssembler::fremr(Register tmp) {
- save_rax(tmp);
- { Label L;
- bind(L);
- fprem();
- fwait(); fnstsw_ax();
- sahf();
- jcc(Assembler::parity, L);
- }
- restore_rax(tmp);
- // Result is in ST0.
- // Note: fxch & fpop to get rid of ST1
- // (otherwise FPU stack could overflow eventually)
- fxch(1);
- fpop();
-}
-
-
-static const double pi_4 = 0.7853981633974483;
-
-void MacroAssembler::trigfunc(char trig, int num_fpu_regs_in_use) {
- // A hand-coded argument reduction for values in fabs(pi/4, pi/2)
- // was attempted in this code; unfortunately it appears that the
- // switch to 80-bit precision and back causes this to be
- // unprofitable compared with simply performing a runtime call if
- // the argument is out of the (-pi/4, pi/4) range.
-
- Register tmp = noreg;
- if (!VM_Version::supports_cmov()) {
- // fcmp needs a temporary so preserve rbx,
- tmp = rbx;
- pushl(tmp);
- }
-
- Label slow_case, done;
-
- // x ?<= pi/4
- fld_d(ExternalAddress((address)&pi_4));
- fld_s(1); // Stack: X PI/4 X
- fabs(); // Stack: |X| PI/4 X
- fcmp(tmp);
- jcc(Assembler::above, slow_case);
-
- // fastest case: -pi/4 <= x <= pi/4
- switch(trig) {
- case 's':
- fsin();
- break;
- case 'c':
- fcos();
- break;
- case 't':
- ftan();
- break;
- default:
- assert(false, "bad intrinsic");
- break;
- }
- jmp(done);
-
- // slow case: runtime call
- bind(slow_case);
- // Preserve registers across runtime call
- pushad();
- int incoming_argument_and_return_value_offset = -1;
- if (num_fpu_regs_in_use > 1) {
- // Must preserve all other FPU regs (could alternatively convert
- // SharedRuntime::dsin and dcos into assembly routines known not to trash
- // FPU state, but can not trust C compiler)
- NEEDS_CLEANUP;
- // NOTE that in this case we also push the incoming argument to
- // the stack and restore it later; we also use this stack slot to
- // hold the return value from dsin or dcos.
- for (int i = 0; i < num_fpu_regs_in_use; i++) {
- subl(rsp, wordSize*2);
- fstp_d(Address(rsp, 0));
- }
- incoming_argument_and_return_value_offset = 2*wordSize*(num_fpu_regs_in_use-1);
- fld_d(Address(rsp, incoming_argument_and_return_value_offset));
- }
- subl(rsp, wordSize*2);
- fstp_d(Address(rsp, 0));
- // NOTE: we must not use call_VM_leaf here because that requires a
- // complete interpreter frame in debug mode -- same bug as 4387334
- NEEDS_CLEANUP;
- // Need to add stack banging before this runtime call if it needs to
- // be taken; however, there is no generic stack banging routine at
- // the MacroAssembler level
- switch(trig) {
- case 's':
- {
- call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dsin)));
- }
- break;
- case 'c':
- {
- call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dcos)));
- }
- break;
- case 't':
- {
- call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtan)));
- }
- break;
- default:
- assert(false, "bad intrinsic");
- break;
- }
- addl(rsp, wordSize * 2);
- if (num_fpu_regs_in_use > 1) {
- // Must save return value to stack and then restore entire FPU stack
- fstp_d(Address(rsp, incoming_argument_and_return_value_offset));
- for (int i = 0; i < num_fpu_regs_in_use; i++) {
- fld_d(Address(rsp, 0));
- addl(rsp, wordSize*2);
- }
- }
- popad();
-
- // Come here with result in F-TOS
- bind(done);
-
- if (tmp != noreg) {
- popl(tmp);
- }
-}
-
-void MacroAssembler::jC2(Register tmp, Label& L) {
- // set parity bit if FPU flag C2 is set (via rax)
- save_rax(tmp);
- fwait(); fnstsw_ax();
- sahf();
- restore_rax(tmp);
- // branch
- jcc(Assembler::parity, L);
-}
-
-
-void MacroAssembler::jnC2(Register tmp, Label& L) {
- // set parity bit if FPU flag C2 is set (via rax)
- save_rax(tmp);
- fwait(); fnstsw_ax();
- sahf();
- restore_rax(tmp);
- // branch
- jcc(Assembler::noParity, L);
-}
-
-
-void MacroAssembler::fcmp(Register tmp) {
- fcmp(tmp, 1, true, true);
-}
-
-
-void MacroAssembler::fcmp(Register tmp, int index, bool pop_left, bool pop_right) {
- assert(!pop_right || pop_left, "usage error");
- if (VM_Version::supports_cmov()) {
- assert(tmp == noreg, "unneeded temp");
- if (pop_left) {
- fucomip(index);
- } else {
- fucomi(index);
- }
- if (pop_right) {
- fpop();
- }
- } else {
- assert(tmp != noreg, "need temp");
- if (pop_left) {
- if (pop_right) {
- fcompp();
- } else {
- fcomp(index);
- }
- } else {
- fcom(index);
- }
- // convert FPU condition into eflags condition via rax,
- save_rax(tmp);
- fwait(); fnstsw_ax();
- sahf();
- restore_rax(tmp);
- }
- // condition codes set as follows:
- //
- // CF (corresponds to C0) if x < y
- // PF (corresponds to C2) if unordered
- // ZF (corresponds to C3) if x = y
-}
-
-
-void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less) {
- fcmp2int(dst, unordered_is_less, 1, true, true);
-}
-
-
-void MacroAssembler::fcmp2int(Register dst, bool unordered_is_less, int index, bool pop_left, bool pop_right) {
- fcmp(VM_Version::supports_cmov() ? noreg : dst, index, pop_left, pop_right);
- Label L;
- if (unordered_is_less) {
- movl(dst, -1);
- jcc(Assembler::parity, L);
- jcc(Assembler::below , L);
- movl(dst, 0);
- jcc(Assembler::equal , L);
- increment(dst);
- } else { // unordered is greater
- movl(dst, 1);
- jcc(Assembler::parity, L);
- jcc(Assembler::above , L);
- movl(dst, 0);
- jcc(Assembler::equal , L);
- decrement(dst);
- }
- bind(L);
-}
-
-void MacroAssembler::cmpss2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
- ucomiss(opr1, opr2);
-
- Label L;
- if (unordered_is_less) {
- movl(dst, -1);
- jcc(Assembler::parity, L);
- jcc(Assembler::below , L);
- movl(dst, 0);
- jcc(Assembler::equal , L);
- increment(dst);
- } else { // unordered is greater
- movl(dst, 1);
- jcc(Assembler::parity, L);
- jcc(Assembler::above , L);
- movl(dst, 0);
- jcc(Assembler::equal , L);
- decrement(dst);
- }
- bind(L);
-}
-
-void MacroAssembler::cmpsd2int(XMMRegister opr1, XMMRegister opr2, Register dst, bool unordered_is_less) {
- ucomisd(opr1, opr2);
-
- Label L;
- if (unordered_is_less) {
- movl(dst, -1);
- jcc(Assembler::parity, L);
- jcc(Assembler::below , L);
- movl(dst, 0);
- jcc(Assembler::equal , L);
- increment(dst);
- } else { // unordered is greater
- movl(dst, 1);
- jcc(Assembler::parity, L);
- jcc(Assembler::above , L);
- movl(dst, 0);
- jcc(Assembler::equal , L);
- decrement(dst);
- }
- bind(L);
-}
-
-
-
-void MacroAssembler::fpop() {
- ffree();
- fincstp();
-}
-
-
-void MacroAssembler::sign_extend_short(Register reg) {
- if (VM_Version::is_P6()) {
- movsxw(reg, reg);
- } else {
- shll(reg, 16);
- sarl(reg, 16);
- }
-}
-
-
-void MacroAssembler::sign_extend_byte(Register reg) {
- if (VM_Version::is_P6() && reg->has_byte_register()) {
- movsxb(reg, reg);
- } else {
- shll(reg, 24);
- sarl(reg, 24);
- }
-}
-
-
-void MacroAssembler::division_with_shift (Register reg, int shift_value) {
- assert (shift_value > 0, "illegal shift value");
- Label _is_positive;
- testl (reg, reg);
- jcc (Assembler::positive, _is_positive);
- int offset = (1 << shift_value) - 1 ;
-
- increment(reg, offset);
-
- bind (_is_positive);
- sarl(reg, shift_value);
-}
-
-
-void MacroAssembler::round_to(Register reg, int modulus) {
- addl(reg, modulus - 1);
- andl(reg, -modulus);
-}
-
-// C++ bool manipulation
-
-void MacroAssembler::movbool(Register dst, Address src) {
- if(sizeof(bool) == 1)
- movb(dst, src);
- else if(sizeof(bool) == 2)
- movw(dst, src);
- else if(sizeof(bool) == 4)
- movl(dst, src);
- else
- // unsupported
- ShouldNotReachHere();
-}
-
-void MacroAssembler::movbool(Address dst, bool boolconst) {
- if(sizeof(bool) == 1)
- movb(dst, (int) boolconst);
- else if(sizeof(bool) == 2)
- movw(dst, (int) boolconst);
- else if(sizeof(bool) == 4)
- movl(dst, (int) boolconst);
- else
- // unsupported
- ShouldNotReachHere();
-}
-
-void MacroAssembler::movbool(Address dst, Register src) {
- if(sizeof(bool) == 1)
- movb(dst, src);
- else if(sizeof(bool) == 2)
- movw(dst, src);
- else if(sizeof(bool) == 4)
- movl(dst, src);
- else
- // unsupported
- ShouldNotReachHere();
-}
-
-void MacroAssembler::testbool(Register dst) {
- if(sizeof(bool) == 1)
- testb(dst, (int) 0xff);
- else if(sizeof(bool) == 2) {
- // testw implementation needed for two byte bools
- ShouldNotReachHere();
- } else if(sizeof(bool) == 4)
- testl(dst, dst);
- else
- // unsupported
- ShouldNotReachHere();
-}
-
-void MacroAssembler::verify_oop(Register reg, const char* s) {
- if (!VerifyOops) return;
- // Pass register number to verify_oop_subroutine
- char* b = new char[strlen(s) + 50];
- sprintf(b, "verify_oop: %s: %s", reg->name(), s);
- pushl(rax); // save rax,
- pushl(reg); // pass register argument
- ExternalAddress buffer((address) b);
- pushptr(buffer.addr());
- // call indirectly to solve generation ordering problem
- movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
- call(rax);
-}
-
-
-void MacroAssembler::verify_oop_addr(Address addr, const char* s) {
- if (!VerifyOops) return;
- // QQQ fix this
- // Address adjust(addr.base(), addr.index(), addr.scale(), addr.disp() + BytesPerWord);
- // Pass register number to verify_oop_subroutine
- char* b = new char[strlen(s) + 50];
- sprintf(b, "verify_oop_addr: %s", s);
- pushl(rax); // save rax,
- // addr may contain rsp so we will have to adjust it based on the push
- // we just did
- if (addr.uses(rsp)) {
- leal(rax, addr);
- pushl(Address(rax, BytesPerWord));
- } else {
- pushl(addr);
- }
- ExternalAddress buffer((address) b);
- // pass msg argument
- pushptr(buffer.addr());
- // call indirectly to solve generation ordering problem
- movptr(rax, ExternalAddress(StubRoutines::verify_oop_subroutine_entry_address()));
- call(rax);
- // Caller pops the arguments and restores rax, from the stack
-}
-
-
-void MacroAssembler::stop(const char* msg) {
- ExternalAddress message((address)msg);
- // push address of message
- pushptr(message.addr());
- { Label L; call(L, relocInfo::none); bind(L); } // push eip
- pushad(); // push registers
- call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug)));
- hlt();
-}
-
-
-void MacroAssembler::warn(const char* msg) {
- push_CPU_state();
-
- ExternalAddress message((address) msg);
- // push address of message
- pushptr(message.addr());
-
- call(RuntimeAddress(CAST_FROM_FN_PTR(address, warning)));
- addl(rsp, wordSize); // discard argument
- pop_CPU_state();
-}
-
-
-void MacroAssembler::debug(int rdi, int rsi, int rbp, int rsp, int rbx, int rdx, int rcx, int rax, int eip, char* msg) {
- // In order to get locks to work, we need to fake a in_VM state
- JavaThread* thread = JavaThread::current();
- JavaThreadState saved_state = thread->thread_state();
- thread->set_thread_state(_thread_in_vm);
- if (ShowMessageBoxOnError) {
- JavaThread* thread = JavaThread::current();
- JavaThreadState saved_state = thread->thread_state();
- thread->set_thread_state(_thread_in_vm);
- ttyLocker ttyl;
- if (CountBytecodes || TraceBytecodes || StopInterpreterAt) {
- BytecodeCounter::print();
- }
- // To see where a verify_oop failed, get $ebx+40/X for this frame.
- // This is the value of eip which points to where verify_oop will return.
- if (os::message_box(msg, "Execution stopped, print registers?")) {
- tty->print_cr("eip = 0x%08x", eip);
- tty->print_cr("rax, = 0x%08x", rax);
- tty->print_cr("rbx, = 0x%08x", rbx);
- tty->print_cr("rcx = 0x%08x", rcx);
- tty->print_cr("rdx = 0x%08x", rdx);
- tty->print_cr("rdi = 0x%08x", rdi);
- tty->print_cr("rsi = 0x%08x", rsi);
- tty->print_cr("rbp, = 0x%08x", rbp);
- tty->print_cr("rsp = 0x%08x", rsp);
- BREAKPOINT;
- }
- } else {
- ::tty->print_cr("=============== DEBUG MESSAGE: %s ================\n", msg);
- assert(false, "DEBUG MESSAGE");
- }
- ThreadStateTransition::transition(thread, _thread_in_vm, saved_state);
-}
-
-
-
-void MacroAssembler::os_breakpoint() {
- // instead of directly emitting a breakpoint, call os:breakpoint for better debugability
- // (e.g., MSVC can't call ps() otherwise)
- call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-}
-
-
-void MacroAssembler::push_fTOS() {
- subl(rsp, 2 * wordSize);
- fstp_d(Address(rsp, 0));
-}
-
-
-void MacroAssembler::pop_fTOS() {
- fld_d(Address(rsp, 0));
- addl(rsp, 2 * wordSize);
-}
-
-
-void MacroAssembler::empty_FPU_stack() {
- if (VM_Version::supports_mmx()) {
- emms();
- } else {
- for (int i = 8; i-- > 0; ) ffree(i);
- }
-}
-
-
-class ControlWord {
- public:
- int32_t _value;
-
- int rounding_control() const { return (_value >> 10) & 3 ; }
- int precision_control() const { return (_value >> 8) & 3 ; }
- bool precision() const { return ((_value >> 5) & 1) != 0; }
- bool underflow() const { return ((_value >> 4) & 1) != 0; }
- bool overflow() const { return ((_value >> 3) & 1) != 0; }
- bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
- bool denormalized() const { return ((_value >> 1) & 1) != 0; }
- bool invalid() const { return ((_value >> 0) & 1) != 0; }
-
- void print() const {
- // rounding control
- const char* rc;
- switch (rounding_control()) {
- case 0: rc = "round near"; break;
- case 1: rc = "round down"; break;
- case 2: rc = "round up "; break;
- case 3: rc = "chop "; break;
- };
- // precision control
- const char* pc;
- switch (precision_control()) {
- case 0: pc = "24 bits "; break;
- case 1: pc = "reserved"; break;
- case 2: pc = "53 bits "; break;
- case 3: pc = "64 bits "; break;
- };
- // flags
- char f[9];
- f[0] = ' ';
- f[1] = ' ';
- f[2] = (precision ()) ? 'P' : 'p';
- f[3] = (underflow ()) ? 'U' : 'u';
- f[4] = (overflow ()) ? 'O' : 'o';
- f[5] = (zero_divide ()) ? 'Z' : 'z';
- f[6] = (denormalized()) ? 'D' : 'd';
- f[7] = (invalid ()) ? 'I' : 'i';
- f[8] = '\x0';
- // output
- printf("%04x masks = %s, %s, %s", _value & 0xFFFF, f, rc, pc);
- }
-
-};
-
-
-class StatusWord {
- public:
- int32_t _value;
-
- bool busy() const { return ((_value >> 15) & 1) != 0; }
- bool C3() const { return ((_value >> 14) & 1) != 0; }
- bool C2() const { return ((_value >> 10) & 1) != 0; }
- bool C1() const { return ((_value >> 9) & 1) != 0; }
- bool C0() const { return ((_value >> 8) & 1) != 0; }
- int top() const { return (_value >> 11) & 7 ; }
- bool error_status() const { return ((_value >> 7) & 1) != 0; }
- bool stack_fault() const { return ((_value >> 6) & 1) != 0; }
- bool precision() const { return ((_value >> 5) & 1) != 0; }
- bool underflow() const { return ((_value >> 4) & 1) != 0; }
- bool overflow() const { return ((_value >> 3) & 1) != 0; }
- bool zero_divide() const { return ((_value >> 2) & 1) != 0; }
- bool denormalized() const { return ((_value >> 1) & 1) != 0; }
- bool invalid() const { return ((_value >> 0) & 1) != 0; }
-
- void print() const {
- // condition codes
- char c[5];
- c[0] = (C3()) ? '3' : '-';
- c[1] = (C2()) ? '2' : '-';
- c[2] = (C1()) ? '1' : '-';
- c[3] = (C0()) ? '0' : '-';
- c[4] = '\x0';
- // flags
- char f[9];
- f[0] = (error_status()) ? 'E' : '-';
- f[1] = (stack_fault ()) ? 'S' : '-';
- f[2] = (precision ()) ? 'P' : '-';
- f[3] = (underflow ()) ? 'U' : '-';
- f[4] = (overflow ()) ? 'O' : '-';
- f[5] = (zero_divide ()) ? 'Z' : '-';
- f[6] = (denormalized()) ? 'D' : '-';
- f[7] = (invalid ()) ? 'I' : '-';
- f[8] = '\x0';
- // output
- printf("%04x flags = %s, cc = %s, top = %d", _value & 0xFFFF, f, c, top());
- }
-
-};
-
-
-class TagWord {
- public:
- int32_t _value;
-
- int tag_at(int i) const { return (_value >> (i*2)) & 3; }
-
- void print() const {
- printf("%04x", _value & 0xFFFF);
- }
-
-};
-
-
-class FPU_Register {
- public:
- int32_t _m0;
- int32_t _m1;
- int16_t _ex;
-
- bool is_indefinite() const {
- return _ex == -1 && _m1 == (int32_t)0xC0000000 && _m0 == 0;
- }
-
- void print() const {
- char sign = (_ex < 0) ? '-' : '+';
- const char* kind = (_ex == 0x7FFF || _ex == (int16_t)-1) ? "NaN" : " ";
- printf("%c%04hx.%08x%08x %s", sign, _ex, _m1, _m0, kind);
- };
-
-};
-
-
-class FPU_State {
- public:
- enum {
- register_size = 10,
- number_of_registers = 8,
- register_mask = 7
- };
-
- ControlWord _control_word;
- StatusWord _status_word;
- TagWord _tag_word;
- int32_t _error_offset;
- int32_t _error_selector;
- int32_t _data_offset;
- int32_t _data_selector;
- int8_t _register[register_size * number_of_registers];
-
- int tag_for_st(int i) const { return _tag_word.tag_at((_status_word.top() + i) & register_mask); }
- FPU_Register* st(int i) const { return (FPU_Register*)&_register[register_size * i]; }
-
- const char* tag_as_string(int tag) const {
- switch (tag) {
- case 0: return "valid";
- case 1: return "zero";
- case 2: return "special";
- case 3: return "empty";
- }
- ShouldNotReachHere()
- return NULL;
- }
-
- void print() const {
- // print computation registers
- { int t = _status_word.top();
- for (int i = 0; i < number_of_registers; i++) {
- int j = (i - t) & register_mask;
- printf("%c r%d = ST%d = ", (j == 0 ? '*' : ' '), i, j);
- st(j)->print();
- printf(" %s\n", tag_as_string(_tag_word.tag_at(i)));
- }
- }
- printf("\n");
- // print control registers
- printf("ctrl = "); _control_word.print(); printf("\n");
- printf("stat = "); _status_word .print(); printf("\n");
- printf("tags = "); _tag_word .print(); printf("\n");
- }
-
-};
-
-
-class Flag_Register {
- public:
- int32_t _value;
-
- bool overflow() const { return ((_value >> 11) & 1) != 0; }
- bool direction() const { return ((_value >> 10) & 1) != 0; }
- bool sign() const { return ((_value >> 7) & 1) != 0; }
- bool zero() const { return ((_value >> 6) & 1) != 0; }
- bool auxiliary_carry() const { return ((_value >> 4) & 1) != 0; }
- bool parity() const { return ((_value >> 2) & 1) != 0; }
- bool carry() const { return ((_value >> 0) & 1) != 0; }
-
- void print() const {
- // flags
- char f[8];
- f[0] = (overflow ()) ? 'O' : '-';
- f[1] = (direction ()) ? 'D' : '-';
- f[2] = (sign ()) ? 'S' : '-';
- f[3] = (zero ()) ? 'Z' : '-';
- f[4] = (auxiliary_carry()) ? 'A' : '-';
- f[5] = (parity ()) ? 'P' : '-';
- f[6] = (carry ()) ? 'C' : '-';
- f[7] = '\x0';
- // output
- printf("%08x flags = %s", _value, f);
- }
-
-};
-
-
-class IU_Register {
- public:
- int32_t _value;
-
- void print() const {
- printf("%08x %11d", _value, _value);
- }
-
-};
-
-
-class IU_State {
- public:
- Flag_Register _eflags;
- IU_Register _rdi;
- IU_Register _rsi;
- IU_Register _rbp;
- IU_Register _rsp;
- IU_Register _rbx;
- IU_Register _rdx;
- IU_Register _rcx;
- IU_Register _rax;
-
- void print() const {
- // computation registers
- printf("rax, = "); _rax.print(); printf("\n");
- printf("rbx, = "); _rbx.print(); printf("\n");
- printf("rcx = "); _rcx.print(); printf("\n");
- printf("rdx = "); _rdx.print(); printf("\n");
- printf("rdi = "); _rdi.print(); printf("\n");
- printf("rsi = "); _rsi.print(); printf("\n");
- printf("rbp, = "); _rbp.print(); printf("\n");
- printf("rsp = "); _rsp.print(); printf("\n");
- printf("\n");
- // control registers
- printf("flgs = "); _eflags.print(); printf("\n");
- }
-};
-
-
-class CPU_State {
- public:
- FPU_State _fpu_state;
- IU_State _iu_state;
-
- void print() const {
- printf("--------------------------------------------------\n");
- _iu_state .print();
- printf("\n");
- _fpu_state.print();
- printf("--------------------------------------------------\n");
- }
-
-};
-
-
-static void _print_CPU_state(CPU_State* state) {
- state->print();
-};
-
-
-void MacroAssembler::print_CPU_state() {
- push_CPU_state();
- pushl(rsp); // pass CPU state
- call(RuntimeAddress(CAST_FROM_FN_PTR(address, _print_CPU_state)));
- addl(rsp, wordSize); // discard argument
- pop_CPU_state();
-}
-
-
-static bool _verify_FPU(int stack_depth, char* s, CPU_State* state) {
- static int counter = 0;
- FPU_State* fs = &state->_fpu_state;
- counter++;
- // For leaf calls, only verify that the top few elements remain empty.
- // We only need 1 empty at the top for C2 code.
- if( stack_depth < 0 ) {
- if( fs->tag_for_st(7) != 3 ) {
- printf("FPR7 not empty\n");
- state->print();
- assert(false, "error");
- return false;
- }
- return true; // All other stack states do not matter
- }
-
- assert((fs->_control_word._value & 0xffff) == StubRoutines::_fpu_cntrl_wrd_std,
- "bad FPU control word");
-
- // compute stack depth
- int i = 0;
- while (i < FPU_State::number_of_registers && fs->tag_for_st(i) < 3) i++;
- int d = i;
- while (i < FPU_State::number_of_registers && fs->tag_for_st(i) == 3) i++;
- // verify findings
- if (i != FPU_State::number_of_registers) {
- // stack not contiguous
- printf("%s: stack not contiguous at ST%d\n", s, i);
- state->print();
- assert(false, "error");
- return false;
- }
- // check if computed stack depth corresponds to expected stack depth
- if (stack_depth < 0) {
- // expected stack depth is -stack_depth or less
- if (d > -stack_depth) {
- // too many elements on the stack
- printf("%s: <= %d stack elements expected but found %d\n", s, -stack_depth, d);
- state->print();
- assert(false, "error");
- return false;
- }
- } else {
- // expected stack depth is stack_depth
- if (d != stack_depth) {
- // wrong stack depth
- printf("%s: %d stack elements expected but found %d\n", s, stack_depth, d);
- state->print();
- assert(false, "error");
- return false;
- }
- }
- // everything is cool
- return true;
-}
-
-
-void MacroAssembler::verify_FPU(int stack_depth, const char* s) {
- if (!VerifyFPU) return;
- push_CPU_state();
- pushl(rsp); // pass CPU state
- ExternalAddress msg((address) s);
- // pass message string s
- pushptr(msg.addr());
- pushl(stack_depth); // pass stack depth
- call(RuntimeAddress(CAST_FROM_FN_PTR(address, _verify_FPU)));
- addl(rsp, 3 * wordSize); // discard arguments
- // check for error
- { Label L;
- testl(rax, rax);
- jcc(Assembler::notZero, L);
- int3(); // break if error condition
- bind(L);
- }
- pop_CPU_state();
-}
-
-
-void MacroAssembler::push_IU_state() {
- pushad();
- pushfd();
-}
-
-
-void MacroAssembler::pop_IU_state() {
- popfd();
- popad();
-}
-
-
-void MacroAssembler::push_FPU_state() {
- subl(rsp, FPUStateSizeInWords * wordSize);
- fnsave(Address(rsp, 0));
- fwait();
-}
-
-
-void MacroAssembler::pop_FPU_state() {
- frstor(Address(rsp, 0));
- addl(rsp, FPUStateSizeInWords * wordSize);
-}
-
-
-void MacroAssembler::push_CPU_state() {
- push_IU_state();
- push_FPU_state();
-}
-
-
-void MacroAssembler::pop_CPU_state() {
- pop_FPU_state();
- pop_IU_state();
-}
-
-
-void MacroAssembler::push_callee_saved_registers() {
- pushl(rsi);
- pushl(rdi);
- pushl(rdx);
- pushl(rcx);
-}
-
-
-void MacroAssembler::pop_callee_saved_registers() {
- popl(rcx);
- popl(rdx);
- popl(rdi);
- popl(rsi);
-}
-
-
-void MacroAssembler::set_word_if_not_zero(Register dst) {
- xorl(dst, dst);
- set_byte_if_not_zero(dst);
-}
-
-// Write serialization page so VM thread can do a pseudo remote membar.
-// We use the current thread pointer to calculate a thread specific
-// offset to write to within the page. This minimizes bus traffic
-// due to cache line collision.
-void MacroAssembler::serialize_memory(Register thread, Register tmp) {
- movl(tmp, thread);
- shrl(tmp, os::get_serialize_page_shift_count());
- andl(tmp, (os::vm_page_size() - sizeof(int)));
-
- Address index(noreg, tmp, Address::times_1);
- ExternalAddress page(os::get_memory_serialize_page());
-
- movptr(ArrayAddress(page, index), tmp);
-}
-
-
-void MacroAssembler::verify_tlab() {
-#ifdef ASSERT
- if (UseTLAB && VerifyOops) {
- Label next, ok;
- Register t1 = rsi;
- Register thread_reg = rbx;
-
- pushl(t1);
- pushl(thread_reg);
- get_thread(thread_reg);
-
- movl(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
- cmpl(t1, Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())));
- jcc(Assembler::aboveEqual, next);
- stop("assert(top >= start)");
- should_not_reach_here();
-
- bind(next);
- movl(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
- cmpl(t1, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
- jcc(Assembler::aboveEqual, ok);
- stop("assert(top <= end)");
- should_not_reach_here();
-
- bind(ok);
- popl(thread_reg);
- popl(t1);
- }
-#endif
-}
-
-
-// Defines obj, preserves var_size_in_bytes
-void MacroAssembler::eden_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes,
- Register t1, Label& slow_case) {
- assert(obj == rax, "obj must be in rax, for cmpxchg");
- assert_different_registers(obj, var_size_in_bytes, t1);
- Register end = t1;
- Label retry;
- bind(retry);
- ExternalAddress heap_top((address) Universe::heap()->top_addr());
- movptr(obj, heap_top);
- if (var_size_in_bytes == noreg) {
- leal(end, Address(obj, con_size_in_bytes));
- } else {
- leal(end, Address(obj, var_size_in_bytes, Address::times_1));
- }
- // if end < obj then we wrapped around => object too long => slow case
- cmpl(end, obj);
- jcc(Assembler::below, slow_case);
- cmpptr(end, ExternalAddress((address) Universe::heap()->end_addr()));
- jcc(Assembler::above, slow_case);
- // Compare obj with the top addr, and if still equal, store the new top addr in
- // end at the address of the top addr pointer. Sets ZF if was equal, and clears
- // it otherwise. Use lock prefix for atomicity on MPs.
- if (os::is_MP()) {
- lock();
- }
- cmpxchgptr(end, heap_top);
- jcc(Assembler::notEqual, retry);
-}
-
-
-// Defines obj, preserves var_size_in_bytes, okay for t2 == var_size_in_bytes.
-void MacroAssembler::tlab_allocate(Register obj, Register var_size_in_bytes, int con_size_in_bytes,
- Register t1, Register t2, Label& slow_case) {
- assert_different_registers(obj, t1, t2);
- assert_different_registers(obj, var_size_in_bytes, t1);
- Register end = t2;
- Register thread = t1;
-
- verify_tlab();
-
- get_thread(thread);
-
- movl(obj, Address(thread, JavaThread::tlab_top_offset()));
- if (var_size_in_bytes == noreg) {
- leal(end, Address(obj, con_size_in_bytes));
- } else {
- leal(end, Address(obj, var_size_in_bytes, Address::times_1));
- }
- cmpl(end, Address(thread, JavaThread::tlab_end_offset()));
- jcc(Assembler::above, slow_case);
-
- // update the tlab top pointer
- movl(Address(thread, JavaThread::tlab_top_offset()), end);
-
- // recover var_size_in_bytes if necessary
- if (var_size_in_bytes == end) {
- subl(var_size_in_bytes, obj);
- }
- verify_tlab();
-}
-
-// Preserves rbx, and rdx.
-void MacroAssembler::tlab_refill(Label& retry, Label& try_eden, Label& slow_case) {
- Register top = rax;
- Register t1 = rcx;
- Register t2 = rsi;
- Register thread_reg = rdi;
- assert_different_registers(top, thread_reg, t1, t2, /* preserve: */ rbx, rdx);
- Label do_refill, discard_tlab;
-
- if (CMSIncrementalMode || !Universe::heap()->supports_inline_contig_alloc()) {
- // No allocation in the shared eden.
- jmp(slow_case);
- }
-
- get_thread(thread_reg);
-
- movl(top, Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())));
- movl(t1, Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())));
-
- // calculate amount of free space
- subl(t1, top);
- shrl(t1, LogHeapWordSize);
-
- // Retain tlab and allocate object in shared space if
- // the amount free in the tlab is too large to discard.
- cmpl(t1, Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())));
- jcc(Assembler::lessEqual, discard_tlab);
-
- // Retain
- movl(t2, ThreadLocalAllocBuffer::refill_waste_limit_increment());
- addl(Address(thread_reg, in_bytes(JavaThread::tlab_refill_waste_limit_offset())), t2);
- if (TLABStats) {
- // increment number of slow_allocations
- addl(Address(thread_reg, in_bytes(JavaThread::tlab_slow_allocations_offset())), 1);
- }
- jmp(try_eden);
-
- bind(discard_tlab);
- if (TLABStats) {
- // increment number of refills
- addl(Address(thread_reg, in_bytes(JavaThread::tlab_number_of_refills_offset())), 1);
- // accumulate wastage -- t1 is amount free in tlab
- addl(Address(thread_reg, in_bytes(JavaThread::tlab_fast_refill_waste_offset())), t1);
- }
-
- // if tlab is currently allocated (top or end != null) then
- // fill [top, end + alignment_reserve) with array object
- testl (top, top);
- jcc(Assembler::zero, do_refill);
-
- // set up the mark word
- movl(Address(top, oopDesc::mark_offset_in_bytes()), (int)markOopDesc::prototype()->copy_set_hash(0x2));
- // set the length to the remaining space
- subl(t1, typeArrayOopDesc::header_size(T_INT));
- addl(t1, ThreadLocalAllocBuffer::alignment_reserve());
- shll(t1, log2_intptr(HeapWordSize/sizeof(jint)));
- movl(Address(top, arrayOopDesc::length_offset_in_bytes()), t1);
- // set klass to intArrayKlass
- // dubious reloc why not an oop reloc?
- movptr(t1, ExternalAddress((address) Universe::intArrayKlassObj_addr()));
- movl(Address(top, oopDesc::klass_offset_in_bytes()), t1);
-
- // refill the tlab with an eden allocation
- bind(do_refill);
- movl(t1, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
- shll(t1, LogHeapWordSize);
- // add object_size ??
- eden_allocate(top, t1, 0, t2, slow_case);
-
- // Check that t1 was preserved in eden_allocate.
-#ifdef ASSERT
- if (UseTLAB) {
- Label ok;
- Register tsize = rsi;
- assert_different_registers(tsize, thread_reg, t1);
- pushl(tsize);
- movl(tsize, Address(thread_reg, in_bytes(JavaThread::tlab_size_offset())));
- shll(tsize, LogHeapWordSize);
- cmpl(t1, tsize);
- jcc(Assembler::equal, ok);
- stop("assert(t1 != tlab size)");
- should_not_reach_here();
-
- bind(ok);
- popl(tsize);
- }
-#endif
- movl(Address(thread_reg, in_bytes(JavaThread::tlab_start_offset())), top);
- movl(Address(thread_reg, in_bytes(JavaThread::tlab_top_offset())), top);
- addl(top, t1);
- subl(top, ThreadLocalAllocBuffer::alignment_reserve_in_bytes());
- movl(Address(thread_reg, in_bytes(JavaThread::tlab_end_offset())), top);
- verify_tlab();
- jmp(retry);
-}
-
-
-int MacroAssembler::biased_locking_enter(Register lock_reg, Register obj_reg, Register swap_reg, Register tmp_reg,
- bool swap_reg_contains_mark,
- Label& done, Label* slow_case,
- BiasedLockingCounters* counters) {
- assert(UseBiasedLocking, "why call this otherwise?");
- assert(swap_reg == rax, "swap_reg must be rax, for cmpxchg");
- assert_different_registers(lock_reg, obj_reg, swap_reg);
-
- if (PrintBiasedLockingStatistics && counters == NULL)
- counters = BiasedLocking::counters();
-
- bool need_tmp_reg = false;
- if (tmp_reg == noreg) {
- need_tmp_reg = true;
- tmp_reg = lock_reg;
- } else {
- assert_different_registers(lock_reg, obj_reg, swap_reg, tmp_reg);
- }
- assert(markOopDesc::age_shift == markOopDesc::lock_bits + markOopDesc::biased_lock_bits, "biased locking makes assumptions about bit layout");
- Address mark_addr (obj_reg, oopDesc::mark_offset_in_bytes());
- Address klass_addr (obj_reg, oopDesc::klass_offset_in_bytes());
- Address saved_mark_addr(lock_reg, 0);
-
- // Biased locking
- // See whether the lock is currently biased toward our thread and
- // whether the epoch is still valid
- // Note that the runtime guarantees sufficient alignment of JavaThread
- // pointers to allow age to be placed into low bits
- // First check to see whether biasing is even enabled for this object
- Label cas_label;
- int null_check_offset = -1;
- if (!swap_reg_contains_mark) {
- null_check_offset = offset();
- movl(swap_reg, mark_addr);
- }
- if (need_tmp_reg) {
- pushl(tmp_reg);
- }
- movl(tmp_reg, swap_reg);
- andl(tmp_reg, markOopDesc::biased_lock_mask_in_place);
- cmpl(tmp_reg, markOopDesc::biased_lock_pattern);
- if (need_tmp_reg) {
- popl(tmp_reg);
- }
- jcc(Assembler::notEqual, cas_label);
- // The bias pattern is present in the object's header. Need to check
- // whether the bias owner and the epoch are both still current.
- // Note that because there is no current thread register on x86 we
- // need to store off the mark word we read out of the object to
- // avoid reloading it and needing to recheck invariants below. This
- // store is unfortunate but it makes the overall code shorter and
- // simpler.
- movl(saved_mark_addr, swap_reg);
- if (need_tmp_reg) {
- pushl(tmp_reg);
- }
- get_thread(tmp_reg);
- xorl(swap_reg, tmp_reg);
- if (swap_reg_contains_mark) {
- null_check_offset = offset();
- }
- movl(tmp_reg, klass_addr);
- xorl(swap_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
- andl(swap_reg, ~((int) markOopDesc::age_mask_in_place));
- if (need_tmp_reg) {
- popl(tmp_reg);
- }
- if (counters != NULL) {
- cond_inc32(Assembler::zero,
- ExternalAddress((address)counters->biased_lock_entry_count_addr()));
- }
- jcc(Assembler::equal, done);
-
- Label try_revoke_bias;
- Label try_rebias;
-
- // At this point we know that the header has the bias pattern and
- // that we are not the bias owner in the current epoch. We need to
- // figure out more details about the state of the header in order to
- // know what operations can be legally performed on the object's
- // header.
-
- // If the low three bits in the xor result aren't clear, that means
- // the prototype header is no longer biased and we have to revoke
- // the bias on this object.
- testl(swap_reg, markOopDesc::biased_lock_mask_in_place);
- jcc(Assembler::notZero, try_revoke_bias);
-
- // Biasing is still enabled for this data type. See whether the
- // epoch of the current bias is still valid, meaning that the epoch
- // bits of the mark word are equal to the epoch bits of the
- // prototype header. (Note that the prototype header's epoch bits
- // only change at a safepoint.) If not, attempt to rebias the object
- // toward the current thread. Note that we must be absolutely sure
- // that the current epoch is invalid in order to do this because
- // otherwise the manipulations it performs on the mark word are
- // illegal.
- testl(swap_reg, markOopDesc::epoch_mask_in_place);
- jcc(Assembler::notZero, try_rebias);
-
- // The epoch of the current bias is still valid but we know nothing
- // about the owner; it might be set or it might be clear. Try to
- // acquire the bias of the object using an atomic operation. If this
- // fails we will go in to the runtime to revoke the object's bias.
- // Note that we first construct the presumed unbiased header so we
- // don't accidentally blow away another thread's valid bias.
- movl(swap_reg, saved_mark_addr);
- andl(swap_reg,
- markOopDesc::biased_lock_mask_in_place | markOopDesc::age_mask_in_place | markOopDesc::epoch_mask_in_place);
- if (need_tmp_reg) {
- pushl(tmp_reg);
- }
- get_thread(tmp_reg);
- orl(tmp_reg, swap_reg);
- if (os::is_MP()) {
- lock();
- }
- cmpxchg(tmp_reg, Address(obj_reg, 0));
- if (need_tmp_reg) {
- popl(tmp_reg);
- }
- // If the biasing toward our thread failed, this means that
- // another thread succeeded in biasing it toward itself and we
- // need to revoke that bias. The revocation will occur in the
- // interpreter runtime in the slow case.
- if (counters != NULL) {
- cond_inc32(Assembler::zero,
- ExternalAddress((address)counters->anonymously_biased_lock_entry_count_addr()));
- }
- if (slow_case != NULL) {
- jcc(Assembler::notZero, *slow_case);
- }
- jmp(done);
-
- bind(try_rebias);
- // At this point we know the epoch has expired, meaning that the
- // current "bias owner", if any, is actually invalid. Under these
- // circumstances _only_, we are allowed to use the current header's
- // value as the comparison value when doing the cas to acquire the
- // bias in the current epoch. In other words, we allow transfer of
- // the bias from one thread to another directly in this situation.
- //
- // FIXME: due to a lack of registers we currently blow away the age
- // bits in this situation. Should attempt to preserve them.
- if (need_tmp_reg) {
- pushl(tmp_reg);
- }
- get_thread(tmp_reg);
- movl(swap_reg, klass_addr);
- orl(tmp_reg, Address(swap_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
- movl(swap_reg, saved_mark_addr);
- if (os::is_MP()) {
- lock();
- }
- cmpxchg(tmp_reg, Address(obj_reg, 0));
- if (need_tmp_reg) {
- popl(tmp_reg);
- }
- // If the biasing toward our thread failed, then another thread
- // succeeded in biasing it toward itself and we need to revoke that
- // bias. The revocation will occur in the runtime in the slow case.
- if (counters != NULL) {
- cond_inc32(Assembler::zero,
- ExternalAddress((address)counters->rebiased_lock_entry_count_addr()));
- }
- if (slow_case != NULL) {
- jcc(Assembler::notZero, *slow_case);
- }
- jmp(done);
-
- bind(try_revoke_bias);
- // The prototype mark in the klass doesn't have the bias bit set any
- // more, indicating that objects of this data type are not supposed
- // to be biased any more. We are going to try to reset the mark of
- // this object to the prototype value and fall through to the
- // CAS-based locking scheme. Note that if our CAS fails, it means
- // that another thread raced us for the privilege of revoking the
- // bias of this particular object, so it's okay to continue in the
- // normal locking code.
- //
- // FIXME: due to a lack of registers we currently blow away the age
- // bits in this situation. Should attempt to preserve them.
- movl(swap_reg, saved_mark_addr);
- if (need_tmp_reg) {
- pushl(tmp_reg);
- }
- movl(tmp_reg, klass_addr);
- movl(tmp_reg, Address(tmp_reg, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
- if (os::is_MP()) {
- lock();
- }
- cmpxchg(tmp_reg, Address(obj_reg, 0));
- if (need_tmp_reg) {
- popl(tmp_reg);
- }
- // Fall through to the normal CAS-based lock, because no matter what
- // the result of the above CAS, some thread must have succeeded in
- // removing the bias bit from the object's header.
- if (counters != NULL) {
- cond_inc32(Assembler::zero,
- ExternalAddress((address)counters->revoked_lock_entry_count_addr()));
- }
-
- bind(cas_label);
-
- return null_check_offset;
-}
-
-
-void MacroAssembler::biased_locking_exit(Register obj_reg, Register temp_reg, Label& done) {
- assert(UseBiasedLocking, "why call this otherwise?");
-
- // Check for biased locking unlock case, which is a no-op
- // Note: we do not have to check the thread ID for two reasons.
- // First, the interpreter checks for IllegalMonitorStateException at
- // a higher level. Second, if the bias was revoked while we held the
- // lock, the object could not be rebiased toward another thread, so
- // the bias bit would be clear.
- movl(temp_reg, Address(obj_reg, oopDesc::mark_offset_in_bytes()));
- andl(temp_reg, markOopDesc::biased_lock_mask_in_place);
- cmpl(temp_reg, markOopDesc::biased_lock_pattern);
- jcc(Assembler::equal, done);
-}
-
-
-Assembler::Condition MacroAssembler::negate_condition(Assembler::Condition cond) {
- switch (cond) {
- // Note some conditions are synonyms for others
- case Assembler::zero: return Assembler::notZero;
- case Assembler::notZero: return Assembler::zero;
- case Assembler::less: return Assembler::greaterEqual;
- case Assembler::lessEqual: return Assembler::greater;
- case Assembler::greater: return Assembler::lessEqual;
- case Assembler::greaterEqual: return Assembler::less;
- case Assembler::below: return Assembler::aboveEqual;
- case Assembler::belowEqual: return Assembler::above;
- case Assembler::above: return Assembler::belowEqual;
- case Assembler::aboveEqual: return Assembler::below;
- case Assembler::overflow: return Assembler::noOverflow;
- case Assembler::noOverflow: return Assembler::overflow;
- case Assembler::negative: return Assembler::positive;
- case Assembler::positive: return Assembler::negative;
- case Assembler::parity: return Assembler::noParity;
- case Assembler::noParity: return Assembler::parity;
- }
- ShouldNotReachHere(); return Assembler::overflow;
-}
-
-
-void MacroAssembler::cond_inc32(Condition cond, AddressLiteral counter_addr) {
- Condition negated_cond = negate_condition(cond);
- Label L;
- jcc(negated_cond, L);
- atomic_incl(counter_addr);
- bind(L);
-}
-
-void MacroAssembler::atomic_incl(AddressLiteral counter_addr) {
- pushfd();
- if (os::is_MP())
- lock();
- increment(counter_addr);
- popfd();
-}
-
-SkipIfEqual::SkipIfEqual(
- MacroAssembler* masm, const bool* flag_addr, bool value) {
- _masm = masm;
- _masm->cmp8(ExternalAddress((address)flag_addr), value);
- _masm->jcc(Assembler::equal, _label);
-}
-
-SkipIfEqual::~SkipIfEqual() {
- _masm->bind(_label);
-}
-
-
-// Writes to stack successive pages until offset reached to check for
-// stack overflow + shadow pages. This clobbers tmp.
-void MacroAssembler::bang_stack_size(Register size, Register tmp) {
- movl(tmp, rsp);
- // Bang stack for total size given plus shadow page size.
- // Bang one page at a time because large size can bang beyond yellow and
- // red zones.
- Label loop;
- bind(loop);
- movl(Address(tmp, (-os::vm_page_size())), size );
- subl(tmp, os::vm_page_size());
- subl(size, os::vm_page_size());
- jcc(Assembler::greater, loop);
-
- // Bang down shadow pages too.
- // The -1 because we already subtracted 1 page.
- for (int i = 0; i< StackShadowPages-1; i++) {
- movl(Address(tmp, (-i*os::vm_page_size())), size );
- }
-}
diff --git a/src/cpu/x86/vm/assembler_x86_32.inline.hpp b/src/cpu/x86/vm/assembler_x86_32.inline.hpp
deleted file mode 100644
index 8e20a2e06..000000000
--- a/src/cpu/x86/vm/assembler_x86_32.inline.hpp
+++ /dev/null
@@ -1,64 +0,0 @@
-/*
- * Copyright 1997-2005 Sun Microsystems, Inc. All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- */
-
-inline void MacroAssembler::pd_patch_instruction(address branch, address target) {
- unsigned char op = branch[0];
- assert(op == 0xE8 /* call */ ||
- op == 0xE9 /* jmp */ ||
- op == 0xEB /* short jmp */ ||
- (op & 0xF0) == 0x70 /* short jcc */ ||
- op == 0x0F && (branch[1] & 0xF0) == 0x80 /* jcc */,
- "Invalid opcode at patch point");
-
- if (op == 0xEB || (op & 0xF0) == 0x70) {
- // short offset operators (jmp and jcc)
- char* disp = (char*) &branch[1];
- int imm8 = target - (address) &disp[1];
- guarantee(this->is8bit(imm8), "Short forward jump exceeds 8-bit offset");
- *disp = imm8;
- } else {
- int* disp = (int*) &branch[(op == 0x0F)? 2: 1];
- int imm32 = target - (address) &disp[1];
- *disp = imm32;
- }
-}
-
-#ifndef PRODUCT
-inline void MacroAssembler::pd_print_patched_instruction(address branch) {
- const char* s;
- unsigned char op = branch[0];
- if (op == 0xE8) {
- s = "call";
- } else if (op == 0xE9 || op == 0xEB) {
- s = "jmp";
- } else if ((op & 0xF0) == 0x70) {
- s = "jcc";
- } else if (op == 0x0F) {
- s = "jcc";
- } else {
- s = "????";
- }
- tty->print("%s (unresolved)", s);
-}
-#endif // ndef PRODUCT
diff --git a/src/cpu/x86/vm/assembler_x86_64.hpp b/src/cpu/x86/vm/assembler_x86_64.hpp
deleted file mode 100644
index bf509e02b..000000000
--- a/src/cpu/x86/vm/assembler_x86_64.hpp
+++ /dev/null
@@ -1,1477 +0,0 @@
-/*
- * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- */
-
-class BiasedLockingCounters;
-
-// Contains all the definitions needed for amd64 assembly code generation.
-
-#ifdef _LP64
-// Calling convention
-class Argument VALUE_OBJ_CLASS_SPEC {
- public:
- enum {
-#ifdef _WIN64
- n_int_register_parameters_c = 4, // rcx, rdx, r8, r9 (c_rarg0, c_rarg1, ...)
- n_float_register_parameters_c = 4, // xmm0 - xmm3 (c_farg0, c_farg1, ... )
-#else
- n_int_register_parameters_c = 6, // rdi, rsi, rdx, rcx, r8, r9 (c_rarg0, c_rarg1, ...)
- n_float_register_parameters_c = 8, // xmm0 - xmm7 (c_farg0, c_farg1, ... )
-#endif // _WIN64
- n_int_register_parameters_j = 6, // j_rarg0, j_rarg1, ...
- n_float_register_parameters_j = 8 // j_farg0, j_farg1, ...
- };
-};
-
-
-// Symbolically name the register arguments used by the c calling convention.
-// Windows is different from linux/solaris. So much for standards...
-
-#ifdef _WIN64
-
-REGISTER_DECLARATION(Register, c_rarg0, rcx);
-REGISTER_DECLARATION(Register, c_rarg1, rdx);
-REGISTER_DECLARATION(Register, c_rarg2, r8);
-REGISTER_DECLARATION(Register, c_rarg3, r9);
-
-REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0);
-REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1);
-REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2);
-REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3);
-
-#else
-
-REGISTER_DECLARATION(Register, c_rarg0, rdi);
-REGISTER_DECLARATION(Register, c_rarg1, rsi);
-REGISTER_DECLARATION(Register, c_rarg2, rdx);
-REGISTER_DECLARATION(Register, c_rarg3, rcx);
-REGISTER_DECLARATION(Register, c_rarg4, r8);
-REGISTER_DECLARATION(Register, c_rarg5, r9);
-
-REGISTER_DECLARATION(XMMRegister, c_farg0, xmm0);
-REGISTER_DECLARATION(XMMRegister, c_farg1, xmm1);
-REGISTER_DECLARATION(XMMRegister, c_farg2, xmm2);
-REGISTER_DECLARATION(XMMRegister, c_farg3, xmm3);
-REGISTER_DECLARATION(XMMRegister, c_farg4, xmm4);
-REGISTER_DECLARATION(XMMRegister, c_farg5, xmm5);
-REGISTER_DECLARATION(XMMRegister, c_farg6, xmm6);
-REGISTER_DECLARATION(XMMRegister, c_farg7, xmm7);
-
-#endif // _WIN64
-
-// Symbolically name the register arguments used by the Java calling convention.
-// We have control over the convention for java so we can do what we please.
-// What pleases us is to offset the java calling convention so that when
-// we call a suitable jni method the arguments are lined up and we don't
-// have to do little shuffling. A suitable jni method is non-static and a
-// small number of arguments (two fewer args on windows)
-//
-// |-------------------------------------------------------|
-// | c_rarg0 c_rarg1 c_rarg2 c_rarg3 c_rarg4 c_rarg5 |
-// |-------------------------------------------------------|
-// | rcx rdx r8 r9 rdi* rsi* | windows (* not a c_rarg)
-// | rdi rsi rdx rcx r8 r9 | solaris/linux
-// |-------------------------------------------------------|
-// | j_rarg5 j_rarg0 j_rarg1 j_rarg2 j_rarg3 j_rarg4 |
-// |-------------------------------------------------------|
-
-REGISTER_DECLARATION(Register, j_rarg0, c_rarg1);
-REGISTER_DECLARATION(Register, j_rarg1, c_rarg2);
-REGISTER_DECLARATION(Register, j_rarg2, c_rarg3);
-// Windows runs out of register args here
-#ifdef _WIN64
-REGISTER_DECLARATION(Register, j_rarg3, rdi);
-REGISTER_DECLARATION(Register, j_rarg4, rsi);
-#else
-REGISTER_DECLARATION(Register, j_rarg3, c_rarg4);
-REGISTER_DECLARATION(Register, j_rarg4, c_rarg5);
-#endif // _WIN64
-REGISTER_DECLARATION(Register, j_rarg5, c_rarg0);
-
-REGISTER_DECLARATION(XMMRegister, j_farg0, xmm0);
-REGISTER_DECLARATION(XMMRegister, j_farg1, xmm1);
-REGISTER_DECLARATION(XMMRegister, j_farg2, xmm2);
-REGISTER_DECLARATION(XMMRegister, j_farg3, xmm3);
-REGISTER_DECLARATION(XMMRegister, j_farg4, xmm4);
-REGISTER_DECLARATION(XMMRegister, j_farg5, xmm5);
-REGISTER_DECLARATION(XMMRegister, j_farg6, xmm6);
-REGISTER_DECLARATION(XMMRegister, j_farg7, xmm7);
-
-REGISTER_DECLARATION(Register, rscratch1, r10); // volatile
-REGISTER_DECLARATION(Register, rscratch2, r11); // volatile
-
-REGISTER_DECLARATION(Register, r12_heapbase, r12); // callee-saved
-REGISTER_DECLARATION(Register, r15_thread, r15); // callee-saved
-
-#endif // _LP64
-
-// Address is an abstraction used to represent a memory location
-// using any of the amd64 addressing modes with one object.
-//
-// Note: A register location is represented via a Register, not
-// via an address for efficiency & simplicity reasons.
-
-class ArrayAddress;
-
-class Address VALUE_OBJ_CLASS_SPEC {
- public:
- enum ScaleFactor {
- no_scale = -1,
- times_1 = 0,
- times_2 = 1,
- times_4 = 2,
- times_8 = 3
- };
-
- private:
- Register _base;
- Register _index;
- ScaleFactor _scale;
- int _disp;
- RelocationHolder _rspec;
-
- // Easily misused constructors make them private
- Address(int disp, address loc, relocInfo::relocType rtype);
- Address(int disp, address loc, RelocationHolder spec);
-
- public:
- // creation
- Address()
- : _base(noreg),
- _index(noreg),
- _scale(no_scale),
- _disp(0) {
- }
-
- // No default displacement otherwise Register can be implicitly
- // converted to 0(Register) which is quite a different animal.
-
- Address(Register base, int disp)
- : _base(base),
- _index(noreg),
- _scale(no_scale),
- _disp(disp) {
- }
-
- Address(Register base, Register index, ScaleFactor scale, int disp = 0)
- : _base (base),
- _index(index),
- _scale(scale),
- _disp (disp) {
- assert(!index->is_valid() == (scale == Address::no_scale),
- "inconsistent address");
- }
-
- // The following two overloads are used in connection with the
- // ByteSize type (see sizes.hpp). They simplify the use of
- // ByteSize'd arguments in assembly code. Note that their equivalent
- // for the optimized build are the member functions with int disp
- // argument since ByteSize is mapped to an int type in that case.
- //
- // Note: DO NOT introduce similar overloaded functions for WordSize
- // arguments as in the optimized mode, both ByteSize and WordSize
- // are mapped to the same type and thus the compiler cannot make a
- // distinction anymore (=> compiler errors).
-
-#ifdef ASSERT
- Address(Register base, ByteSize disp)
- : _base(base),
- _index(noreg),
- _scale(no_scale),
- _disp(in_bytes(disp)) {
- }
-
- Address(Register base, Register index, ScaleFactor scale, ByteSize disp)
- : _base(base),
- _index(index),
- _scale(scale),
- _disp(in_bytes(disp)) {
- assert(!index->is_valid() == (scale == Address::no_scale),
- "inconsistent address");
- }
-#endif // ASSERT
-
- // accessors
- bool uses(Register reg) const {
- return _base == reg || _index == reg;
- }
-
- // Convert the raw encoding form into the form expected by the constructor for
- // Address. An index of 4 (rsp) corresponds to having no index, so convert
- // that to noreg for the Address constructor.
- static Address make_raw(int base, int index, int scale, int disp);
-
- static Address make_array(ArrayAddress);
-
- private:
- bool base_needs_rex() const {
- return _base != noreg && _base->encoding() >= 8;
- }
-
- bool index_needs_rex() const {
- return _index != noreg &&_index->encoding() >= 8;
- }
-
- relocInfo::relocType reloc() const { return _rspec.type(); }
-
- friend class Assembler;
- friend class MacroAssembler;
- friend class LIR_Assembler; // base/index/scale/disp
-};
-
-//
-// AddressLiteral has been split out from Address because operands of this type
-// need to be treated specially on 32bit vs. 64bit platforms. By splitting it out
-// the few instructions that need to deal with address literals are unique and the
-// MacroAssembler does not have to implement every instruction in the Assembler
-// in order to search for address literals that may need special handling depending
-// on the instruction and the platform. As small step on the way to merging i486/amd64
-// directories.
-//
-class AddressLiteral VALUE_OBJ_CLASS_SPEC {
- friend class ArrayAddress;
- RelocationHolder _rspec;
- // Typically we use AddressLiterals we want to use their rval
- // However in some situations we want the lval (effect address) of the item.
- // We provide a special factory for making those lvals.
- bool _is_lval;
-
- // If the target is far we'll need to load the ea of this to
- // a register to reach it. Otherwise if near we can do rip
- // relative addressing.
-
- address _target;
-
- protected:
- // creation
- AddressLiteral()
- : _is_lval(false),
- _target(NULL)
- {}
-
- public:
-
-
- AddressLiteral(address target, relocInfo::relocType rtype);
-
- AddressLiteral(address target, RelocationHolder const& rspec)
- : _rspec(rspec),
- _is_lval(false),
- _target(target)
- {}
-
- AddressLiteral addr() {
- AddressLiteral ret = *this;
- ret._is_lval = true;
- return ret;
- }
-
-
- private:
-
- address target() { return _target; }
- bool is_lval() { return _is_lval; }
-
- relocInfo::relocType reloc() const { return _rspec.type(); }
- const RelocationHolder& rspec() const { return _rspec; }
-
- friend class Assembler;
- friend class MacroAssembler;
- friend class Address;
- friend class LIR_Assembler;
-};
-
-// Convience classes
-class RuntimeAddress: public AddressLiteral {
-
- public:
-
- RuntimeAddress(address target) : AddressLiteral(target, relocInfo::runtime_call_type) {}
-
-};
-
-class OopAddress: public AddressLiteral {
-
- public:
-
- OopAddress(address target) : AddressLiteral(target, relocInfo::oop_type){}
-
-};
-
-class ExternalAddress: public AddressLiteral {
-
- public:
-
- ExternalAddress(address target) : AddressLiteral(target, relocInfo::external_word_type){}
-
-};
-
-class InternalAddress: public AddressLiteral {
-
- public:
-
- InternalAddress(address target) : AddressLiteral(target, relocInfo::internal_word_type) {}
-
-};
-
-// x86 can do array addressing as a single operation since disp can be an absolute
-// address but amd64 can't [e.g. array_base(rx, ry:width) ]. We create a class
-// that expresses the concept but does extra magic on amd64 to get the final result
-
-class ArrayAddress VALUE_OBJ_CLASS_SPEC {
- private:
-
- AddressLiteral _base;
- Address _index;
-
- public:
-
- ArrayAddress() {};
- ArrayAddress(AddressLiteral base, Address index): _base(base), _index(index) {};
- AddressLiteral base() { return _base; }
- Address index() { return _index; }
-
-};
-
-// The amd64 Assembler: Pure assembler doing NO optimizations on
-// the instruction level (e.g. mov rax, 0 is not translated into xor
-// rax, rax!); i.e., what you write is what you get. The Assembler is
-// generating code into a CodeBuffer.
-
-const int FPUStateSizeInWords = 512 / wordSize;
-
-class Assembler : public AbstractAssembler {
- friend class AbstractAssembler; // for the non-virtual hack
- friend class StubGenerator;
-
-
- protected:
-#ifdef ASSERT
- void check_relocation(RelocationHolder const& rspec, int format);
-#endif
-
- inline void emit_long64(jlong x);
-
- void emit_data(jint data, relocInfo::relocType rtype, int format /* = 1 */);
- void emit_data(jint data, RelocationHolder const& rspec, int format /* = 1 */);
- void emit_data64(jlong data, relocInfo::relocType rtype, int format = 0);
- void emit_data64(jlong data, RelocationHolder const& rspec, int format = 0);
-
- // Helper functions for groups of instructions
- void emit_arith_b(int op1, int op2, Register dst, int imm8);
-
- void emit_arith(int op1, int op2, Register dst, int imm32);
- // only x86??
- void emit_arith(int op1, int op2, Register dst, jobject obj);
- void emit_arith(int op1, int op2, Register dst, Register src);
-
- void emit_operand(Register reg,
- Register base, Register index, Address::ScaleFactor scale,
- int disp,
- RelocationHolder const& rspec,
- int rip_relative_correction = 0);
- void emit_operand(Register reg, Address adr,
- int rip_relative_correction = 0);
- void emit_operand(XMMRegister reg,
- Register base, Register index, Address::ScaleFactor scale,
- int disp,
- RelocationHolder const& rspec,
- int rip_relative_correction = 0);
- void emit_operand(XMMRegister reg, Address adr,
- int rip_relative_correction = 0);
-
- // Immediate-to-memory forms
- void emit_arith_operand(int op1, Register rm, Address adr, int imm32);
-
- void emit_farith(int b1, int b2, int i);
-
- bool reachable(AddressLiteral adr);
-
- // These are all easily abused and hence protected
-
- // Make these disappear in 64bit mode since they would never be correct
-#ifndef _LP64
- void cmp_literal32(Register src1, int32_t imm32, RelocationHolder const& rspec);
- void cmp_literal32(Address src1, int32_t imm32, RelocationHolder const& rspec);
-
- void mov_literal32(Register dst, int32_t imm32, RelocationHolder const& rspec);
- void mov_literal32(Address dst, int32_t imm32, RelocationHolder const& rspec);
-
- void push_literal32(int32_t imm32, RelocationHolder const& rspec);
-#endif // _LP64
-
-
- void mov_literal64(Register dst, intptr_t imm64, RelocationHolder const& rspec);
-
- // These are unique in that we are ensured by the caller that the 32bit
- // relative in these instructions will always be able to reach the potentially
- // 64bit address described by entry. Since they can take a 64bit address they
- // don't have the 32 suffix like the other instructions in this class.
- void jmp_literal(address entry, RelocationHolder const& rspec);
- void call_literal(address entry, RelocationHolder const& rspec);
-
- public:
- enum Condition { // The amd64 condition codes used for conditional jumps/moves.
- zero = 0x4,
- notZero = 0x5,
- equal = 0x4,
- notEqual = 0x5,
- less = 0xc,
- lessEqual = 0xe,
- greater = 0xf,
- greaterEqual = 0xd,
- below = 0x2,
- belowEqual = 0x6,
- above = 0x7,
- aboveEqual = 0x3,
- overflow = 0x0,
- noOverflow = 0x1,
- carrySet = 0x2,
- carryClear = 0x3,
- negative = 0x8,
- positive = 0x9,
- parity = 0xa,
- noParity = 0xb
- };
-
- enum Prefix {
- // segment overrides
- // XXX remove segment prefixes
- CS_segment = 0x2e,
- SS_segment = 0x36,
- DS_segment = 0x3e,
- ES_segment = 0x26,
- FS_segment = 0x64,
- GS_segment = 0x65,
-
- REX = 0x40,
-
- REX_B = 0x41,
- REX_X = 0x42,
- REX_XB = 0x43,
- REX_R = 0x44,
- REX_RB = 0x45,
- REX_RX = 0x46,
- REX_RXB = 0x47,
-
- REX_W = 0x48,
-
- REX_WB = 0x49,
- REX_WX = 0x4A,
- REX_WXB = 0x4B,
- REX_WR = 0x4C,
- REX_WRB = 0x4D,
- REX_WRX = 0x4E,
- REX_WRXB = 0x4F
- };
-
- enum WhichOperand {
- // input to locate_operand, and format code for relocations
- imm64_operand = 0, // embedded 64-bit immediate operand
- disp32_operand = 1, // embedded 32-bit displacement
- call32_operand = 2, // embedded 32-bit self-relative displacement
-#ifndef AMD64
- _WhichOperand_limit = 3
-#else
- narrow_oop_operand = 3, // embedded 32-bit immediate narrow oop
- _WhichOperand_limit = 4
-#endif
- };
-
- public:
-
- // Creation
- Assembler(CodeBuffer* code)
- : AbstractAssembler(code) {
- }
-
- // Decoding
- static address locate_operand(address inst, WhichOperand which);
- static address locate_next_instruction(address inst);
-
- // Utilities
-
- static bool is_simm(int64_t x, int nbits) { return -( CONST64(1) << (nbits-1) ) <= x && x < ( CONST64(1) << (nbits-1) ); }
- static bool is_simm32 (int64_t x) { return x == (int64_t)(int32_t)x; }
-
-
- // Stack
- void pushaq();
- void popaq();
-
- void pushfq();
- void popfq();
-
- void pushq(int imm32);
-
- void pushq(Register src);
- void pushq(Address src);
-
- void popq(Register dst);
- void popq(Address dst);
-
- // Instruction prefixes
- void prefix(Prefix p);
-
- int prefix_and_encode(int reg_enc, bool byteinst = false);
- int prefixq_and_encode(int reg_enc);
-
- int prefix_and_encode(int dst_enc, int src_enc, bool byteinst = false);
- int prefixq_and_encode(int dst_enc, int src_enc);
-
- void prefix(Register reg);
- void prefix(Address adr);
- void prefixq(Address adr);
-
- void prefix(Address adr, Register reg, bool byteinst = false);
- void prefixq(Address adr, Register reg);
-
- void prefix(Address adr, XMMRegister reg);
-
- // Moves
- void movb(Register dst, Address src);
- void movb(Address dst, int imm8);
- void movb(Address dst, Register src);
-
- void movw(Address dst, int imm16);
- void movw(Register dst, Address src);
- void movw(Address dst, Register src);
-
- void movl(Register dst, int imm32);
- void movl(Register dst, Register src);
- void movl(Register dst, Address src);
- void movl(Address dst, int imm32);
- void movl(Address dst, Register src);
-
- void movq(Register dst, Register src);
- void movq(Register dst, Address src);
- void movq(Address dst, Register src);
- // These prevent using movq from converting a zero (like NULL) into Register
- // by giving the compiler two choices it can't resolve
- void movq(Address dst, void* dummy);
- void movq(Register dst, void* dummy);
-
- void mov64(Register dst, intptr_t imm64);
- void mov64(Address dst, intptr_t imm64);
-
- void movsbl(Register dst, Address src);
- void movsbl(Register dst, Register src);
- void movswl(Register dst, Address src);
- void movswl(Register dst, Register src);
- void movslq(Register dst, Address src);
- void movslq(Register dst, Register src);
-
- void movzbl(Register dst, Address src);
- void movzbl(Register dst, Register src);
- void movzwl(Register dst, Address src);
- void movzwl(Register dst, Register src);
-
- protected: // Avoid using the next instructions directly.
- // New cpus require use of movsd and movss to avoid partial register stall
- // when loading from memory. But for old Opteron use movlpd instead of movsd.
- // The selection is done in MacroAssembler::movdbl() and movflt().
- void movss(XMMRegister dst, XMMRegister src);
- void movss(XMMRegister dst, Address src);
- void movss(Address dst, XMMRegister src);
- void movsd(XMMRegister dst, XMMRegister src);
- void movsd(Address dst, XMMRegister src);
- void movsd(XMMRegister dst, Address src);
- void movlpd(XMMRegister dst, Address src);
- // New cpus require use of movaps and movapd to avoid partial register stall
- // when moving between registers.
- void movapd(XMMRegister dst, XMMRegister src);
- void movaps(XMMRegister dst, XMMRegister src);
- public:
-
- void movdl(XMMRegister dst, Register src);
- void movdl(Register dst, XMMRegister src);
- void movdq(XMMRegister dst, Register src);
- void movdq(Register dst, XMMRegister src);
-
- void cmovl(Condition cc, Register dst, Register src);
- void cmovl(Condition cc, Register dst, Address src);
- void cmovq(Condition cc, Register dst, Register src);
- void cmovq(Condition cc, Register dst, Address src);
-
- // Prefetches
- private:
- void prefetch_prefix(Address src);
- public:
- void prefetcht0(Address src);
- void prefetcht1(Address src);
- void prefetcht2(Address src);
- void prefetchnta(Address src);
- void prefetchw(Address src);
-
- // Arithmetics
- void adcl(Register dst, int imm32);
- void adcl(Register dst, Address src);
- void adcl(Register dst, Register src);
- void adcq(Register dst, int imm32);
- void adcq(Register dst, Address src);
- void adcq(Register dst, Register src);
-
- void addl(Address dst, int imm32);
- void addl(Address dst, Register src);
- void addl(Register dst, int imm32);
- void addl(Register dst, Address src);
- void addl(Register dst, Register src);
- void addq(Address dst, int imm32);
- void addq(Address dst, Register src);
- void addq(Register dst, int imm32);
- void addq(Register dst, Address src);
- void addq(Register dst, Register src);
-
- void andl(Register dst, int imm32);
- void andl(Register dst, Address src);
- void andl(Register dst, Register src);
- void andq(Register dst, int imm32);
- void andq(Register dst, Address src);
- void andq(Register dst, Register src);
-
- void cmpb(Address dst, int imm8);
- void cmpl(Address dst, int imm32);
- void cmpl(Register dst, int imm32);
- void cmpl(Register dst, Register src);
- void cmpl(Register dst, Address src);
- void cmpq(Address dst, int imm32);
- void cmpq(Address dst, Register src);
- void cmpq(Register dst, int imm32);
- void cmpq(Register dst, Register src);
- void cmpq(Register dst, Address src);
-
- void ucomiss(XMMRegister dst, XMMRegister src);
- void ucomisd(XMMRegister dst, XMMRegister src);
-
- protected:
- // Don't use next inc() and dec() methods directly. INC & DEC instructions
- // could cause a partial flag stall since they don't set CF flag.
- // Use MacroAssembler::decrement() & MacroAssembler::increment() methods
- // which call inc() & dec() or add() & sub() in accordance with
- // the product flag UseIncDec value.
-
- void decl(Register dst);
- void decl(Address dst);
- void decq(Register dst);
- void decq(Address dst);
-
- void incl(Register dst);
- void incl(Address dst);
- void incq(Register dst);
- void incq(Address dst);
-
- public:
- void idivl(Register src);
- void idivq(Register src);
- void cdql();
- void cdqq();
-
- void imull(Register dst, Register src);
- void imull(Register dst, Register src, int value);
- void imulq(Register dst, Register src);
- void imulq(Register dst, Register src, int value);
-
- void leal(Register dst, Address src);
- void leaq(Register dst, Address src);
-
- void mull(Address src);
- void mull(Register src);
-
- void negl(Register dst);
- void negq(Register dst);
-
- void notl(Register dst);
- void notq(Register dst);
-
- void orl(Address dst, int imm32);
- void orl(Register dst, int imm32);
- void orl(Register dst, Address src);
- void orl(Register dst, Register src);
- void orq(Address dst, int imm32);
- void orq(Register dst, int imm32);
- void orq(Register dst, Address src);
- void orq(Register dst, Register src);
-
- void rcll(Register dst, int imm8);
- void rclq(Register dst, int imm8);
-
- void sarl(Register dst, int imm8);
- void sarl(Register dst);
- void sarq(Register dst, int imm8);
- void sarq(Register dst);
-
- void sbbl(Address dst, int imm32);
- void sbbl(Register dst, int imm32);
- void sbbl(Register dst, Address src);
- void sbbl(Register dst, Register src);
- void sbbq(Address dst, int imm32);
- void sbbq(Register dst, int imm32);
- void sbbq(Register dst, Address src);
- void sbbq(Register dst, Register src);
-
- void shll(Register dst, int imm8);
- void shll(Register dst);
- void shlq(Register dst, int imm8);
- void shlq(Register dst);
-
- void shrl(Register dst, int imm8);
- void shrl(Register dst);
- void shrq(Register dst, int imm8);
- void shrq(Register dst);
-
- void subl(Address dst, int imm32);
- void subl(Address dst, Register src);
- void subl(Register dst, int imm32);
- void subl(Register dst, Address src);
- void subl(Register dst, Register src);
- void subq(Address dst, int imm32);
- void subq(Address dst, Register src);
- void subq(Register dst, int imm32);
- void subq(Register dst, Address src);
- void subq(Register dst, Register src);
-
- void testb(Register dst, int imm8);
- void testl(Register dst, int imm32);
- void testl(Register dst, Register src);
- void testq(Register dst, int imm32);
- void testq(Register dst, Register src);
-
- void xaddl(Address dst, Register src);
- void xaddq(Address dst, Register src);
-
- void xorl(Register dst, int imm32);
- void xorl(Register dst, Address src);
- void xorl(Register dst, Register src);
- void xorq(Register dst, int imm32);
- void xorq(Register dst, Address src);
- void xorq(Register dst, Register src);
-
- // Miscellaneous
- void bswapl(Register reg);
- void bswapq(Register reg);
- void lock();
-
- void xchgl(Register reg, Address adr);
- void xchgl(Register dst, Register src);
- void xchgq(Register reg, Address adr);
- void xchgq(Register dst, Register src);
-
- void cmpxchgl(Register reg, Address adr);
- void cmpxchgq(Register reg, Address adr);
-
- void nop(int i = 1);
- void addr_nop_4();
- void addr_nop_5();
- void addr_nop_7();
- void addr_nop_8();
-
- void hlt();
- void ret(int imm16);
- void smovl();
- void rep_movl();
- void rep_movq();
- void rep_set();
- void repne_scanl();
- void repne_scanq();
- void setb(Condition cc, Register dst);
-
- void clflush(Address adr);
-
- enum Membar_mask_bits {
- StoreStore = 1 << 3,
- LoadStore = 1 << 2,
- StoreLoad = 1 << 1,
- LoadLoad = 1 << 0
- };
-
- // Serializes memory.
- void membar(Membar_mask_bits order_constraint) {
- // We only have to handle StoreLoad and LoadLoad
- if (order_constraint & StoreLoad) {
- // MFENCE subsumes LFENCE
- mfence();
- } /* [jk] not needed currently: else if (order_constraint & LoadLoad) {
- lfence();
- } */
- }
-
- void lfence() {
- emit_byte(0x0F);
- emit_byte(0xAE);
- emit_byte(0xE8);
- }
-
- void mfence() {
- emit_byte(0x0F);
- emit_byte(0xAE);
- emit_byte(0xF0);
- }
-
- // Identify processor type and features
- void cpuid() {
- emit_byte(0x0F);
- emit_byte(0xA2);
- }
-
- void cld() { emit_byte(0xfc);
- }
-
- void std() { emit_byte(0xfd);
- }
-
-
- // Calls
-
- void call(Label& L, relocInfo::relocType rtype);
- void call(Register reg);
- void call(Address adr);
-
- // Jumps
-
- void jmp(Register reg);
- void jmp(Address adr);
-
- // Label operations & relative jumps (PPUM Appendix D)
- // unconditional jump to L
- void jmp(Label& L, relocInfo::relocType rtype = relocInfo::none);
-
-
- // Unconditional 8-bit offset jump to L.
- // WARNING: be very careful using this for forward jumps. If the label is
- // not bound within an 8-bit offset of this instruction, a run-time error
- // will occur.
- void jmpb(Label& L);
-
- // jcc is the generic conditional branch generator to run- time
- // routines, jcc is used for branches to labels. jcc takes a branch
- // opcode (cc) and a label (L) and generates either a backward
- // branch or a forward branch and links it to the label fixup
- // chain. Usage:
- //
- // Label L; // unbound label
- // jcc(cc, L); // forward branch to unbound label
- // bind(L); // bind label to the current pc
- // jcc(cc, L); // backward branch to bound label
- // bind(L); // illegal: a label may be bound only once
- //
- // Note: The same Label can be used for forward and backward branches
- // but it may be bound only once.
-
- void jcc(Condition cc, Label& L,
- relocInfo::relocType rtype = relocInfo::none);
-
- // Conditional jump to a 8-bit offset to L.
- // WARNING: be very careful using this for forward jumps. If the label is
- // not bound within an 8-bit offset of this instruction, a run-time error
- // will occur.
- void jccb(Condition cc, Label& L);
-
- // Floating-point operations
-
- void fxsave(Address dst);
- void fxrstor(Address src);
- void ldmxcsr(Address src);
- void stmxcsr(Address dst);
-
- void addss(XMMRegister dst, XMMRegister src);
- void addss(XMMRegister dst, Address src);
- void subss(XMMRegister dst, XMMRegister src);
- void subss(XMMRegister dst, Address src);
- void mulss(XMMRegister dst, XMMRegister src);
- void mulss(XMMRegister dst, Address src);
- void divss(XMMRegister dst, XMMRegister src);
- void divss(XMMRegister dst, Address src);
- void addsd(XMMRegister dst, XMMRegister src);
- void addsd(XMMRegister dst, Address src);
- void subsd(XMMRegister dst, XMMRegister src);
- void subsd(XMMRegister dst, Address src);
- void mulsd(XMMRegister dst, XMMRegister src);
- void mulsd(XMMRegister dst, Address src);
- void divsd(XMMRegister dst, XMMRegister src);
- void divsd(XMMRegister dst, Address src);
-
- // We only need the double form
- void sqrtsd(XMMRegister dst, XMMRegister src);
- void sqrtsd(XMMRegister dst, Address src);
-
- void xorps(XMMRegister dst, XMMRegister src);
- void xorps(XMMRegister dst, Address src);
- void xorpd(XMMRegister dst, XMMRegister src);
- void xorpd(XMMRegister dst, Address src);
-
- void cvtsi2ssl(XMMRegister dst, Register src);
- void cvtsi2ssq(XMMRegister dst, Register src);
- void cvtsi2sdl(XMMRegister dst, Register src);
- void cvtsi2sdq(XMMRegister dst, Register src);
- void cvttss2sil(Register dst, XMMRegister src); // truncates
- void cvttss2siq(Register dst, XMMRegister src); // truncates
- void cvttsd2sil(Register dst, XMMRegister src); // truncates
- void cvttsd2siq(Register dst, XMMRegister src); // truncates
- void cvtss2sd(XMMRegister dst, XMMRegister src);
- void cvtsd2ss(XMMRegister dst, XMMRegister src);
- void cvtdq2pd(XMMRegister dst, XMMRegister src);
- void cvtdq2ps(XMMRegister dst, XMMRegister src);
-
- void pxor(XMMRegister dst, Address src); // Xor Packed Byte Integer Values
- void pxor(XMMRegister dst, XMMRegister src); // Xor Packed Byte Integer Values
-
- void movdqa(XMMRegister dst, Address src); // Move Aligned Double Quadword
- void movdqa(XMMRegister dst, XMMRegister src);
- void movdqa(Address dst, XMMRegister src);
-
- void movq(XMMRegister dst, Address src);
- void movq(Address dst, XMMRegister src);
-
- void pshufd(XMMRegister dst, XMMRegister src, int mode); // Shuffle Packed Doublewords
- void pshufd(XMMRegister dst, Address src, int mode);
- void pshuflw(XMMRegister dst, XMMRegister src, int mode); // Shuffle Packed Low Words
- void pshuflw(XMMRegister dst, Address src, int mode);
-
- void psrlq(XMMRegister dst, int shift); // Shift Right Logical Quadword Immediate
-
- void punpcklbw(XMMRegister dst, XMMRegister src); // Interleave Low Bytes
- void punpcklbw(XMMRegister dst, Address src);
-};
-
-
-// MacroAssembler extends Assembler by frequently used macros.
-//
-// Instructions for which a 'better' code sequence exists depending
-// on arguments should also go in here.
-
-class MacroAssembler : public Assembler {
- friend class LIR_Assembler;
- protected:
-
- Address as_Address(AddressLiteral adr);
- Address as_Address(ArrayAddress adr);
-
- // Support for VM calls
- //
- // This is the base routine called by the different versions of
- // call_VM_leaf. The interpreter may customize this version by
- // overriding it for its purposes (e.g., to save/restore additional
- // registers when doing a VM call).
-
- virtual void call_VM_leaf_base(
- address entry_point, // the entry point
- int number_of_arguments // the number of arguments to
- // pop after the call
- );
-
- // This is the base routine called by the different versions of
- // call_VM. The interpreter may customize this version by overriding
- // it for its purposes (e.g., to save/restore additional registers
- // when doing a VM call).
- //
- // If no java_thread register is specified (noreg) than rdi will be
- // used instead. call_VM_base returns the register which contains
- // the thread upon return. If a thread register has been specified,
- // the return value will correspond to that register. If no
- // last_java_sp is specified (noreg) than rsp will be used instead.
- virtual void call_VM_base( // returns the register
- // containing the thread upon
- // return
- Register oop_result, // where an oop-result ends up
- // if any; use noreg otherwise
- Register java_thread, // the thread if computed
- // before ; use noreg otherwise
- Register last_java_sp, // to set up last_Java_frame in
- // stubs; use noreg otherwise
- address entry_point, // the entry point
- int number_of_arguments, // the number of arguments (w/o
- // thread) to pop after the
- // call
- bool check_exceptions // whether to check for pending
- // exceptions after return
- );
-
- // This routines should emit JVMTI PopFrame handling and ForceEarlyReturn code.
- // The implementation is only non-empty for the InterpreterMacroAssembler,
- // as only the interpreter handles PopFrame and ForceEarlyReturn requests.
- virtual void check_and_handle_popframe(Register java_thread);
- virtual void check_and_handle_earlyret(Register java_thread);
-
- void call_VM_helper(Register oop_result,
- address entry_point,
- int number_of_arguments,
- bool check_exceptions = true);
-
- public:
- MacroAssembler(CodeBuffer* code) : Assembler(code) {}
-
- // Support for NULL-checks
- //
- // Generates code that causes a NULL OS exception if the content of
- // reg is NULL. If the accessed location is M[reg + offset] and the
- // offset is known, provide the offset. No explicit code generation
- // is needed if the offset is within a certain range (0 <= offset <=
- // page_size).
- void null_check(Register reg, int offset = -1);
- static bool needs_explicit_null_check(intptr_t offset);
-
- // Required platform-specific helpers for Label::patch_instructions.
- // They _shadow_ the declarations in AbstractAssembler, which are undefined.
- void pd_patch_instruction(address branch, address target);
-#ifndef PRODUCT
- static void pd_print_patched_instruction(address branch);
-#endif
-
-
- // The following 4 methods return the offset of the appropriate move
- // instruction. Note: these are 32 bit instructions
-
- // Support for fast byte/word loading with zero extension (depending
- // on particular CPU)
- int load_unsigned_byte(Register dst, Address src);
- int load_unsigned_word(Register dst, Address src);
-
- // Support for fast byte/word loading with sign extension (depending
- // on particular CPU)
- int load_signed_byte(Register dst, Address src);
- int load_signed_word(Register dst, Address src);
-
- // Support for inc/dec with optimal instruction selection depending
- // on value
- void incrementl(Register reg, int value = 1);
- void decrementl(Register reg, int value = 1);
- void incrementq(Register reg, int value = 1);
- void decrementq(Register reg, int value = 1);
-
- void incrementl(Address dst, int value = 1);
- void decrementl(Address dst, int value = 1);
- void incrementq(Address dst, int value = 1);
- void decrementq(Address dst, int value = 1);
-
- // Support optimal SSE move instructions.
- void movflt(XMMRegister dst, XMMRegister src) {
- if (UseXmmRegToRegMoveAll) { movaps(dst, src); return; }
- else { movss (dst, src); return; }
- }
-
- void movflt(XMMRegister dst, Address src) { movss(dst, src); }
-
- void movflt(XMMRegister dst, AddressLiteral src);
-
- void movflt(Address dst, XMMRegister src) { movss(dst, src); }
-
- void movdbl(XMMRegister dst, XMMRegister src) {
- if (UseXmmRegToRegMoveAll) { movapd(dst, src); return; }
- else { movsd (dst, src); return; }
- }
-
- void movdbl(XMMRegister dst, AddressLiteral src);
-
- void movdbl(XMMRegister dst, Address src) {
- if (UseXmmLoadAndClearUpper) { movsd (dst, src); return; }
- else { movlpd(dst, src); return; }
- }
-
- void movdbl(Address dst, XMMRegister src) { movsd(dst, src); }
-
- void incrementl(AddressLiteral dst);
- void incrementl(ArrayAddress dst);
-
- // Alignment
- void align(int modulus);
-
- // Misc
- void fat_nop(); // 5 byte nop
-
-
- // C++ bool manipulation
-
- void movbool(Register dst, Address src);
- void movbool(Address dst, bool boolconst);
- void movbool(Address dst, Register src);
- void testbool(Register dst);
-
- // oop manipulations
- void load_klass(Register dst, Register src);
- void store_klass(Register dst, Register src);
- void store_klass_gap(Register dst, Register src);
-
- void load_prototype_header(Register dst, Register src);
-
- void load_heap_oop(Register dst, Address src);
- void store_heap_oop(Address dst, Register src);
- void encode_heap_oop(Register r);
- void decode_heap_oop(Register r);
- void encode_heap_oop_not_null(Register r);
- void decode_heap_oop_not_null(Register r);
- void encode_heap_oop_not_null(Register dst, Register src);
- void decode_heap_oop_not_null(Register dst, Register src);
-
- void set_narrow_oop(Register dst, jobject obj);
-
- // Stack frame creation/removal
- void enter();
- void leave();
-
- // Support for getting the JavaThread pointer (i.e.; a reference to
- // thread-local information) The pointer will be loaded into the
- // thread register.
- void get_thread(Register thread);
-
- void int3();
-
- // Support for VM calls
- //
- // It is imperative that all calls into the VM are handled via the
- // call_VM macros. They make sure that the stack linkage is setup
- // correctly. call_VM's correspond to ENTRY/ENTRY_X entry points
- // while call_VM_leaf's correspond to LEAF entry points.
- void call_VM(Register oop_result,
- address entry_point,
- bool check_exceptions = true);
- void call_VM(Register oop_result,
- address entry_point,
- Register arg_1,
- bool check_exceptions = true);
- void call_VM(Register oop_result,
- address entry_point,
- Register arg_1, Register arg_2,
- bool check_exceptions = true);
- void call_VM(Register oop_result,
- address entry_point,
- Register arg_1, Register arg_2, Register arg_3,
- bool check_exceptions = true);
-
- // Overloadings with last_Java_sp
- void call_VM(Register oop_result,
- Register last_java_sp,
- address entry_point,
- int number_of_arguments = 0,
- bool check_exceptions = true);
- void call_VM(Register oop_result,
- Register last_java_sp,
- address entry_point,
- Register arg_1, bool
- check_exceptions = true);
- void call_VM(Register oop_result,
- Register last_java_sp,
- address entry_point,
- Register arg_1, Register arg_2,
- bool check_exceptions = true);
- void call_VM(Register oop_result,
- Register last_java_sp,
- address entry_point,
- Register arg_1, Register arg_2, Register arg_3,
- bool check_exceptions = true);
-
- void call_VM_leaf(address entry_point,
- int number_of_arguments = 0);
- void call_VM_leaf(address entry_point,
- Register arg_1);
- void call_VM_leaf(address entry_point,
- Register arg_1, Register arg_2);
- void call_VM_leaf(address entry_point,
- Register arg_1, Register arg_2, Register arg_3);
-
- // last Java Frame (fills frame anchor)
- void set_last_Java_frame(Register last_java_sp,
- Register last_java_fp,
- address last_java_pc);
- void reset_last_Java_frame(bool clear_fp, bool clear_pc);
-
- // Stores
- void store_check(Register obj); // store check for
- // obj - register is
- // destroyed
- // afterwards
- void store_check(Register obj, Address dst); // same as above, dst
- // is exact store
- // location (reg. is
- // destroyed)
-
- // split store_check(Register obj) to enhance instruction interleaving
- void store_check_part_1(Register obj);
- void store_check_part_2(Register obj);
-
- // C 'boolean' to Java boolean: x == 0 ? 0 : 1
- void c2bool(Register x);
-
- // Int division/reminder for Java
- // (as idivl, but checks for special case as described in JVM spec.)
- // returns idivl instruction offset for implicit exception handling
- int corrected_idivl(Register reg);
- // Long division/reminder for Java
- // (as idivq, but checks for special case as described in JVM spec.)
- // returns idivq instruction offset for implicit exception handling
- int corrected_idivq(Register reg);
-
- // Push and pop integer/fpu/cpu state
- void push_IU_state();
- void pop_IU_state();
-
- void push_FPU_state();
- void pop_FPU_state();
-
- void push_CPU_state();
- void pop_CPU_state();
-
- // Sign extension
- void sign_extend_short(Register reg);
- void sign_extend_byte(Register reg);
-
- // Division by power of 2, rounding towards 0
- void division_with_shift(Register reg, int shift_value);
-
- // Round up to a power of two
- void round_to_l(Register reg, int modulus);
- void round_to_q(Register reg, int modulus);
-
- // allocation
- void eden_allocate(
- Register obj, // result: pointer to object after
- // successful allocation
- Register var_size_in_bytes, // object size in bytes if unknown at
- // compile time; invalid otherwise
- int con_size_in_bytes, // object size in bytes if known at
- // compile time
- Register t1, // temp register
- Label& slow_case // continuation point if fast
- // allocation fails
- );
- void tlab_allocate(
- Register obj, // result: pointer to object after
- // successful allocation
- Register var_size_in_bytes, // object size in bytes if unknown at
- // compile time; invalid otherwise
- int con_size_in_bytes, // object size in bytes if known at
- // compile time
- Register t1, // temp register
- Register t2, // temp register
- Label& slow_case // continuation point if fast
- // allocation fails
- );
- void tlab_refill(Label& retry_tlab, Label& try_eden, Label& slow_case);
-
- //----
-
- // Debugging
-
- // only if +VerifyOops
- void verify_oop(Register reg, const char* s = "broken oop");
- void verify_oop_addr(Address addr, const char * s = "broken oop addr");
-
- // if heap base register is used - reinit it with the correct value
- void reinit_heapbase();
-
- // only if +VerifyFPU
- void verify_FPU(int stack_depth, const char* s = "illegal FPU state") {}
-
- // prints msg, dumps registers and stops execution
- void stop(const char* msg);
-
- // prints message and continues
- void warn(const char* msg);
-
- static void debug(char* msg, int64_t pc, int64_t regs[]);
-
- void os_breakpoint();
-
- void untested()
- {
- stop("untested");
- }
-
- void unimplemented(const char* what = "")
- {
- char* b = new char[1024];
- sprintf(b, "unimplemented: %s", what);
- stop(b);
- }
-
- void should_not_reach_here()
- {
- stop("should not reach here");
- }
-
- // Stack overflow checking
- void bang_stack_with_offset(int offset)
- {
- // stack grows down, caller passes positive offset
- assert(offset > 0, "must bang with negative offset");
- movl(Address(rsp, (-offset)), rax);
- }
-
- // Writes to stack successive pages until offset reached to check for
- // stack overflow + shadow pages. Also, clobbers tmp
- void bang_stack_size(Register offset, Register tmp);
-
- // Support for serializing memory accesses between threads.
- void serialize_memory(Register thread, Register tmp);
-
- void verify_tlab();
-
- // Biased locking support
- // lock_reg and obj_reg must be loaded up with the appropriate values.
- // swap_reg must be rax and is killed.
- // tmp_reg must be supplied and is killed.
- // If swap_reg_contains_mark is true then the code assumes that the
- // mark word of the object has already been loaded into swap_reg.
- // Optional slow case is for implementations (interpreter and C1) which branch to
- // slow case directly. Leaves condition codes set for C2's Fast_Lock node.
- // Returns offset of first potentially-faulting instruction for null
- // check info (currently consumed only by C1). If
- // swap_reg_contains_mark is true then returns -1 as it is assumed
- // the calling code has already passed any potential faults.
- int biased_locking_enter(Register lock_reg, Register obj_reg, Register swap_reg, Register tmp_reg,
- bool swap_reg_contains_mark,
- Label& done, Label* slow_case = NULL,
- BiasedLockingCounters* counters = NULL);
- void biased_locking_exit (Register obj_reg, Register temp_reg, Label& done);
-
- Condition negate_condition(Condition cond);
-
- // Instructions that use AddressLiteral operands. These instruction can handle 32bit/64bit
- // operands. In general the names are modified to avoid hiding the instruction in Assembler
- // so that we don't need to implement all the varieties in the Assembler with trivial wrappers
- // here in MacroAssembler. The major exception to this rule is call
-
- // Arithmetics
-
- void cmp8(AddressLiteral src1, int8_t imm32);
-
- void cmp32(AddressLiteral src1, int32_t src2);
- // compare reg - mem, or reg - &mem
- void cmp32(Register src1, AddressLiteral src2);
-
- void cmp32(Register src1, Address src2);
-
-#ifndef _LP64
- void cmpoop(Address dst, jobject obj);
- void cmpoop(Register dst, jobject obj);
-#endif // _LP64
-
- // NOTE src2 must be the lval. This is NOT an mem-mem compare
- void cmpptr(Address src1, AddressLiteral src2);
-
- void cmpptr(Register src1, AddressLiteral src);
-
- // will be cmpreg(?)
- void cmp64(Register src1, AddressLiteral src);
-
- void cmpxchgptr(Register reg, Address adr);
- void cmpxchgptr(Register reg, AddressLiteral adr);
-
- // Helper functions for statistics gathering.
- // Conditionally (atomically, on MPs) increments passed counter address, preserving condition codes.
- void cond_inc32(Condition cond, AddressLiteral counter_addr);
- // Unconditional atomic increment.
- void atomic_incl(AddressLiteral counter_addr);
-
-
- void lea(Register dst, AddressLiteral src);
- void lea(Register dst, Address src);
-
-
- // Calls
- void call(Label& L, relocInfo::relocType rtype);
- void call(Register entry);
- void call(AddressLiteral entry);
-
- // Jumps
-
- // 32bit can do a case table jump in one instruction but we no longer allow the base
- // to be installed in the Address class
- void jump(ArrayAddress entry);
-
- void jump(AddressLiteral entry);
- void jump_cc(Condition cc, AddressLiteral dst);
-
- // Floating
-
- void ldmxcsr(Address src) { Assembler::ldmxcsr(src); }
- void ldmxcsr(AddressLiteral src);
-
-private:
- // these are private because users should be doing movflt/movdbl
-
- void movss(XMMRegister dst, XMMRegister src) { Assembler::movss(dst, src); }
- void movss(Address dst, XMMRegister src) { Assembler::movss(dst, src); }
- void movss(XMMRegister dst, Address src) { Assembler::movss(dst, src); }
- void movss(XMMRegister dst, AddressLiteral src);
-
- void movlpd(XMMRegister dst, Address src) {Assembler::movlpd(dst, src); }
- void movlpd(XMMRegister dst, AddressLiteral src);
-
-public:
-
-
- void xorpd(XMMRegister dst, XMMRegister src) {Assembler::xorpd(dst, src); }
- void xorpd(XMMRegister dst, Address src) {Assembler::xorpd(dst, src); }
- void xorpd(XMMRegister dst, AddressLiteral src);
-
- void xorps(XMMRegister dst, XMMRegister src) {Assembler::xorps(dst, src); }
- void xorps(XMMRegister dst, Address src) {Assembler::xorps(dst, src); }
- void xorps(XMMRegister dst, AddressLiteral src);
-
-
- // Data
-
- void movoop(Register dst, jobject obj);
- void movoop(Address dst, jobject obj);
-
- void movptr(ArrayAddress dst, Register src);
- void movptr(Register dst, AddressLiteral src);
-
- void movptr(Register dst, intptr_t src);
- void movptr(Address dst, intptr_t src);
-
- void movptr(Register dst, ArrayAddress src);
-
- // to avoid hiding movl
- void mov32(AddressLiteral dst, Register src);
- void mov32(Register dst, AddressLiteral src);
-
- void pushoop(jobject obj);
-
- // Can push value or effective address
- void pushptr(AddressLiteral src);
-
-};
-
-/**
- * class SkipIfEqual:
- *
- * Instantiating this class will result in assembly code being output that will
- * jump around any code emitted between the creation of the instance and it's
- * automatic destruction at the end of a scope block, depending on the value of
- * the flag passed to the constructor, which will be checked at run-time.
- */
-class SkipIfEqual {
- private:
- MacroAssembler* _masm;
- Label _label;
-
- public:
- SkipIfEqual(MacroAssembler*, const bool* flag_addr, bool value);
- ~SkipIfEqual();
-};
-
-
-#ifdef ASSERT
-inline bool AbstractAssembler::pd_check_instruction_mark() { return true; }
-#endif
diff --git a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
index 09419c956..283a63d85 100644
--- a/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
+++ b/src/cpu/x86/vm/c1_CodeStubs_x86.cpp
@@ -43,11 +43,12 @@ void ConversionStub::emit_code(LIR_Assembler* ce) {
__ comisd(input()->as_xmm_double_reg(),
ExternalAddress((address)&double_zero));
} else {
- __ pushl(rax);
+ LP64_ONLY(ShouldNotReachHere());
+ __ push(rax);
__ ftst();
__ fnstsw_ax();
__ sahf();
- __ popl(rax);
+ __ pop(rax);
}
Label NaN, do_return;
@@ -61,7 +62,7 @@ void ConversionStub::emit_code(LIR_Assembler* ce) {
// input is NaN -> return 0
__ bind(NaN);
- __ xorl(result()->as_register(), result()->as_register());
+ __ xorptr(result()->as_register(), result()->as_register());
__ bind(do_return);
__ jmp(_continuation);
@@ -139,7 +140,7 @@ NewInstanceStub::NewInstanceStub(LIR_Opr klass_reg, LIR_Opr result, ciInstanceKl
void NewInstanceStub::emit_code(LIR_Assembler* ce) {
assert(__ rsp_offset() == 0, "frame size should be fixed");
__ bind(_entry);
- __ movl(rdx, _klass_reg->as_register());
+ __ movptr(rdx, _klass_reg->as_register());
__ call(RuntimeAddress(Runtime1::entry_for(_stub_id)));
ce->add_call_info_here(_info);
ce->verify_oop_map(_info);
@@ -306,10 +307,10 @@ void PatchingStub::emit_code(LIR_Assembler* ce) {
assert(_obj != noreg, "must be a valid register");
Register tmp = rax;
if (_obj == tmp) tmp = rbx;
- __ pushl(tmp);
+ __ push(tmp);
__ get_thread(tmp);
- __ cmpl(tmp, Address(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc)));
- __ popl(tmp);
+ __ cmpptr(tmp, Address(_obj, instanceKlass::init_thread_offset_in_bytes() + sizeof(klassOopDesc)));
+ __ pop(tmp);
__ jcc(Assembler::notEqual, call_patch);
// access_field patches may execute the patched code before it's
@@ -434,7 +435,7 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
VMReg r_1 = args[i].first();
if (r_1->is_stack()) {
int st_off = r_1->reg2stack() * wordSize;
- __ movl (Address(rsp, st_off), r[i]);
+ __ movptr (Address(rsp, st_off), r[i]);
} else {
assert(r[i] == args[i].first()->as_Register(), "Wrong register for arg ");
}
@@ -449,7 +450,7 @@ void ArrayCopyStub::emit_code(LIR_Assembler* ce) {
ce->add_call_info_here(info());
#ifndef PRODUCT
- __ increment(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
+ __ incrementl(ExternalAddress((address)&Runtime1::_arraycopy_slowcase_cnt));
#endif
__ jmp(_continuation);
diff --git a/src/cpu/x86/vm/c1_Defs_x86.hpp b/src/cpu/x86/vm/c1_Defs_x86.hpp
index 8594a8349..e8d3e8b67 100644
--- a/src/cpu/x86/vm/c1_Defs_x86.hpp
+++ b/src/cpu/x86/vm/c1_Defs_x86.hpp
@@ -36,27 +36,34 @@ enum {
// registers
enum {
- pd_nof_cpu_regs_frame_map = 8, // number of registers used during code emission
- pd_nof_fpu_regs_frame_map = 8, // number of registers used during code emission
- pd_nof_xmm_regs_frame_map = 8, // number of registers used during code emission
- pd_nof_caller_save_cpu_regs_frame_map = 6, // number of registers killed by calls
- pd_nof_caller_save_fpu_regs_frame_map = 8, // number of registers killed by calls
- pd_nof_caller_save_xmm_regs_frame_map = 8, // number of registers killed by calls
+ pd_nof_cpu_regs_frame_map = RegisterImpl::number_of_registers, // number of registers used during code emission
+ pd_nof_fpu_regs_frame_map = FloatRegisterImpl::number_of_registers, // number of registers used during code emission
+ pd_nof_xmm_regs_frame_map = XMMRegisterImpl::number_of_registers, // number of registers used during code emission
- pd_nof_cpu_regs_reg_alloc = 6, // number of registers that are visible to register allocator
+#ifdef _LP64
+ #define UNALLOCATED 4 // rsp, rbp, r15, r10
+#else
+ #define UNALLOCATED 2 // rsp, rbp
+#endif // LP64
+
+ pd_nof_caller_save_cpu_regs_frame_map = pd_nof_cpu_regs_frame_map - UNALLOCATED, // number of registers killed by calls
+ pd_nof_caller_save_fpu_regs_frame_map = pd_nof_fpu_regs_frame_map, // number of registers killed by calls
+ pd_nof_caller_save_xmm_regs_frame_map = pd_nof_xmm_regs_frame_map, // number of registers killed by calls
+
+ pd_nof_cpu_regs_reg_alloc = pd_nof_caller_save_cpu_regs_frame_map, // number of registers that are visible to register allocator
pd_nof_fpu_regs_reg_alloc = 6, // number of registers that are visible to register allocator
- pd_nof_cpu_regs_linearscan = 8, // number of registers visible to linear scan
- pd_nof_fpu_regs_linearscan = 8, // number of registers visible to linear scan
- pd_nof_xmm_regs_linearscan = 8, // number of registers visible to linear scan
+ pd_nof_cpu_regs_linearscan = pd_nof_cpu_regs_frame_map, // number of registers visible to linear scan
+ pd_nof_fpu_regs_linearscan = pd_nof_fpu_regs_frame_map, // number of registers visible to linear scan
+ pd_nof_xmm_regs_linearscan = pd_nof_xmm_regs_frame_map, // number of registers visible to linear scan
pd_first_cpu_reg = 0,
- pd_last_cpu_reg = 5,
+ pd_last_cpu_reg = NOT_LP64(5) LP64_ONLY(11),
pd_first_byte_reg = 2,
pd_last_byte_reg = 5,
pd_first_fpu_reg = pd_nof_cpu_regs_frame_map,
pd_last_fpu_reg = pd_first_fpu_reg + 7,
pd_first_xmm_reg = pd_nof_cpu_regs_frame_map + pd_nof_fpu_regs_frame_map,
- pd_last_xmm_reg = pd_first_xmm_reg + 7
+ pd_last_xmm_reg = pd_first_xmm_reg + pd_nof_xmm_regs_frame_map - 1
};
diff --git a/src/cpu/x86/vm/c1_FrameMap_x86.cpp b/src/cpu/x86/vm/c1_FrameMap_x86.cpp
index 2118ec448..a48a8576f 100644
--- a/src/cpu/x86/vm/c1_FrameMap_x86.cpp
+++ b/src/cpu/x86/vm/c1_FrameMap_x86.cpp
@@ -39,10 +39,15 @@ LIR_Opr FrameMap::map_to_opr(BasicType type, VMRegPair* reg, bool) {
opr = LIR_OprFact::address(new LIR_Address(rsp_opr, st_off, type));
} else if (r_1->is_Register()) {
Register reg = r_1->as_Register();
- if (r_2->is_Register()) {
+ if (r_2->is_Register() && (type == T_LONG || type == T_DOUBLE)) {
Register reg2 = r_2->as_Register();
+#ifdef _LP64
+ assert(reg2 == reg, "must be same register");
+ opr = as_long_opr(reg);
+#else
opr = as_long_opr(reg2, reg);
- } else if (type == T_OBJECT) {
+#endif // _LP64
+ } else if (type == T_OBJECT || type == T_ARRAY) {
opr = as_oop_opr(reg);
} else {
opr = as_opr(reg);
@@ -88,18 +93,39 @@ LIR_Opr FrameMap::rax_oop_opr;
LIR_Opr FrameMap::rdx_oop_opr;
LIR_Opr FrameMap::rcx_oop_opr;
-LIR_Opr FrameMap::rax_rdx_long_opr;
-LIR_Opr FrameMap::rbx_rcx_long_opr;
+LIR_Opr FrameMap::long0_opr;
+LIR_Opr FrameMap::long1_opr;
LIR_Opr FrameMap::fpu0_float_opr;
LIR_Opr FrameMap::fpu0_double_opr;
LIR_Opr FrameMap::xmm0_float_opr;
LIR_Opr FrameMap::xmm0_double_opr;
+#ifdef _LP64
+
+LIR_Opr FrameMap::r8_opr;
+LIR_Opr FrameMap::r9_opr;
+LIR_Opr FrameMap::r10_opr;
+LIR_Opr FrameMap::r11_opr;
+LIR_Opr FrameMap::r12_opr;
+LIR_Opr FrameMap::r13_opr;
+LIR_Opr FrameMap::r14_opr;
+LIR_Opr FrameMap::r15_opr;
+
+// r10 and r15 can never contain oops since they aren't available to
+// the allocator
+LIR_Opr FrameMap::r8_oop_opr;
+LIR_Opr FrameMap::r9_oop_opr;
+LIR_Opr FrameMap::r11_oop_opr;
+LIR_Opr FrameMap::r12_oop_opr;
+LIR_Opr FrameMap::r13_oop_opr;
+LIR_Opr FrameMap::r14_oop_opr;
+#endif // _LP64
+
LIR_Opr FrameMap::_caller_save_cpu_regs[] = { 0, };
LIR_Opr FrameMap::_caller_save_fpu_regs[] = { 0, };
LIR_Opr FrameMap::_caller_save_xmm_regs[] = { 0, };
-XMMRegister FrameMap::_xmm_regs [8] = { 0, };
+XMMRegister FrameMap::_xmm_regs [] = { 0, };
XMMRegister FrameMap::nr2xmmreg(int rnr) {
assert(_init_done, "tables not initialized");
@@ -113,18 +139,39 @@ XMMRegister FrameMap::nr2xmmreg(int rnr) {
void FrameMap::init() {
if (_init_done) return;
- assert(nof_cpu_regs == 8, "wrong number of CPU registers");
- map_register(0, rsi); rsi_opr = LIR_OprFact::single_cpu(0); rsi_oop_opr = LIR_OprFact::single_cpu_oop(0);
- map_register(1, rdi); rdi_opr = LIR_OprFact::single_cpu(1); rdi_oop_opr = LIR_OprFact::single_cpu_oop(1);
- map_register(2, rbx); rbx_opr = LIR_OprFact::single_cpu(2); rbx_oop_opr = LIR_OprFact::single_cpu_oop(2);
- map_register(3, rax); rax_opr = LIR_OprFact::single_cpu(3); rax_oop_opr = LIR_OprFact::single_cpu_oop(3);
- map_register(4, rdx); rdx_opr = LIR_OprFact::single_cpu(4); rdx_oop_opr = LIR_OprFact::single_cpu_oop(4);
- map_register(5, rcx); rcx_opr = LIR_OprFact::single_cpu(5); rcx_oop_opr = LIR_OprFact::single_cpu_oop(5);
- map_register(6, rsp); rsp_opr = LIR_OprFact::single_cpu(6);
- map_register(7, rbp); rbp_opr = LIR_OprFact::single_cpu(7);
-
- rax_rdx_long_opr = LIR_OprFact::double_cpu(3 /*eax*/, 4 /*edx*/);
- rbx_rcx_long_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 5 /*ecx*/);
+ assert(nof_cpu_regs == LP64_ONLY(16) NOT_LP64(8), "wrong number of CPU registers");
+ map_register(0, rsi); rsi_opr = LIR_OprFact::single_cpu(0);
+ map_register(1, rdi); rdi_opr = LIR_OprFact::single_cpu(1);
+ map_register(2, rbx); rbx_opr = LIR_OprFact::single_cpu(2);
+ map_register(3, rax); rax_opr = LIR_OprFact::single_cpu(3);
+ map_register(4, rdx); rdx_opr = LIR_OprFact::single_cpu(4);
+ map_register(5, rcx); rcx_opr = LIR_OprFact::single_cpu(5);
+
+#ifndef _LP64
+ // The unallocatable registers are at the end
+ map_register(6, rsp);
+ map_register(7, rbp);
+#else
+ map_register( 6, r8); r8_opr = LIR_OprFact::single_cpu(6);
+ map_register( 7, r9); r9_opr = LIR_OprFact::single_cpu(7);
+ map_register( 8, r11); r11_opr = LIR_OprFact::single_cpu(8);
+ map_register( 9, r12); r12_opr = LIR_OprFact::single_cpu(9);
+ map_register(10, r13); r13_opr = LIR_OprFact::single_cpu(10);
+ map_register(11, r14); r14_opr = LIR_OprFact::single_cpu(11);
+ // The unallocatable registers are at the end
+ map_register(12, r10); r10_opr = LIR_OprFact::single_cpu(12);
+ map_register(13, r15); r15_opr = LIR_OprFact::single_cpu(13);
+ map_register(14, rsp);
+ map_register(15, rbp);
+#endif // _LP64
+
+#ifdef _LP64
+ long0_opr = LIR_OprFact::double_cpu(3 /*eax*/, 3 /*eax*/);
+ long1_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 2 /*ebx*/);
+#else
+ long0_opr = LIR_OprFact::double_cpu(3 /*eax*/, 4 /*edx*/);
+ long1_opr = LIR_OprFact::double_cpu(2 /*ebx*/, 5 /*ecx*/);
+#endif // _LP64
fpu0_float_opr = LIR_OprFact::single_fpu(0);
fpu0_double_opr = LIR_OprFact::double_fpu(0);
xmm0_float_opr = LIR_OprFact::single_xmm(0);
@@ -137,6 +184,15 @@ void FrameMap::init() {
_caller_save_cpu_regs[4] = rdx_opr;
_caller_save_cpu_regs[5] = rcx_opr;
+#ifdef _LP64
+ _caller_save_cpu_regs[6] = r8_opr;
+ _caller_save_cpu_regs[7] = r9_opr;
+ _caller_save_cpu_regs[8] = r11_opr;
+ _caller_save_cpu_regs[9] = r12_opr;
+ _caller_save_cpu_regs[10] = r13_opr;
+ _caller_save_cpu_regs[11] = r14_opr;
+#endif // _LP64
+
_xmm_regs[0] = xmm0;
_xmm_regs[1] = xmm1;
@@ -147,18 +203,51 @@ void FrameMap::init() {
_xmm_regs[6] = xmm6;
_xmm_regs[7] = xmm7;
+#ifdef _LP64
+ _xmm_regs[8] = xmm8;
+ _xmm_regs[9] = xmm9;
+ _xmm_regs[10] = xmm10;
+ _xmm_regs[11] = xmm11;
+ _xmm_regs[12] = xmm12;
+ _xmm_regs[13] = xmm13;
+ _xmm_regs[14] = xmm14;
+ _xmm_regs[15] = xmm15;
+#endif // _LP64
+
for (int i = 0; i < 8; i++) {
_caller_save_fpu_regs[i] = LIR_OprFact::single_fpu(i);
+ }
+
+ for (int i = 0; i < nof_caller_save_xmm_regs ; i++) {
_caller_save_xmm_regs[i] = LIR_OprFact::single_xmm(i);
}
_init_done = true;
+ rsi_oop_opr = as_oop_opr(rsi);
+ rdi_oop_opr = as_oop_opr(rdi);
+ rbx_oop_opr = as_oop_opr(rbx);
+ rax_oop_opr = as_oop_opr(rax);
+ rdx_oop_opr = as_oop_opr(rdx);
+ rcx_oop_opr = as_oop_opr(rcx);
+
+ rsp_opr = as_pointer_opr(rsp);
+ rbp_opr = as_pointer_opr(rbp);
+
+#ifdef _LP64
+ r8_oop_opr = as_oop_opr(r8);
+ r9_oop_opr = as_oop_opr(r9);
+ r11_oop_opr = as_oop_opr(r11);
+ r12_oop_opr = as_oop_opr(r12);
+ r13_oop_opr = as_oop_opr(r13);
+ r14_oop_opr = as_oop_opr(r14);
+#endif // _LP64
+
VMRegPair regs;
BasicType sig_bt = T_OBJECT;
SharedRuntime::java_calling_convention(&sig_bt, &regs, 1, true);
receiver_opr = as_oop_opr(regs.first()->as_Register());
- assert(receiver_opr == rcx_oop_opr, "rcvr ought to be rcx");
+
}
diff --git a/src/cpu/x86/vm/c1_FrameMap_x86.hpp b/src/cpu/x86/vm/c1_FrameMap_x86.hpp
index 419b8600a..d5d9816b7 100644
--- a/src/cpu/x86/vm/c1_FrameMap_x86.hpp
+++ b/src/cpu/x86/vm/c1_FrameMap_x86.hpp
@@ -38,8 +38,13 @@
nof_xmm_regs = pd_nof_xmm_regs_frame_map,
nof_caller_save_xmm_regs = pd_nof_caller_save_xmm_regs_frame_map,
first_available_sp_in_frame = 0,
+#ifndef _LP64
frame_pad_in_bytes = 8,
nof_reg_args = 2
+#else
+ frame_pad_in_bytes = 16,
+ nof_reg_args = 6
+#endif // _LP64
};
private:
@@ -65,17 +70,49 @@
static LIR_Opr rax_oop_opr;
static LIR_Opr rdx_oop_opr;
static LIR_Opr rcx_oop_opr;
+#ifdef _LP64
- static LIR_Opr rax_rdx_long_opr;
- static LIR_Opr rbx_rcx_long_opr;
+ static LIR_Opr r8_opr;
+ static LIR_Opr r9_opr;
+ static LIR_Opr r10_opr;
+ static LIR_Opr r11_opr;
+ static LIR_Opr r12_opr;
+ static LIR_Opr r13_opr;
+ static LIR_Opr r14_opr;
+ static LIR_Opr r15_opr;
+
+ static LIR_Opr r8_oop_opr;
+ static LIR_Opr r9_oop_opr;
+
+ static LIR_Opr r11_oop_opr;
+ static LIR_Opr r12_oop_opr;
+ static LIR_Opr r13_oop_opr;
+ static LIR_Opr r14_oop_opr;
+
+#endif // _LP64
+
+ static LIR_Opr long0_opr;
+ static LIR_Opr long1_opr;
static LIR_Opr fpu0_float_opr;
static LIR_Opr fpu0_double_opr;
static LIR_Opr xmm0_float_opr;
static LIR_Opr xmm0_double_opr;
+#ifdef _LP64
+ static LIR_Opr as_long_opr(Register r) {
+ return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
+ }
+ static LIR_Opr as_pointer_opr(Register r) {
+ return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r));
+ }
+#else
static LIR_Opr as_long_opr(Register r, Register r2) {
return LIR_OprFact::double_cpu(cpu_reg2rnr(r), cpu_reg2rnr(r2));
}
+ static LIR_Opr as_pointer_opr(Register r) {
+ return LIR_OprFact::single_cpu(cpu_reg2rnr(r));
+ }
+#endif // _LP64
// VMReg name for spilled physical FPU stack slot n
static VMReg fpu_regname (int n);
diff --git a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
index 081f38c3b..c267d0e6a 100644
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.cpp
@@ -113,7 +113,7 @@ bool LIR_Assembler::is_small_constant(LIR_Opr opr) {
LIR_Opr LIR_Assembler::receiverOpr() {
- return FrameMap::rcx_oop_opr;
+ return FrameMap::receiver_opr;
}
LIR_Opr LIR_Assembler::incomingReceiverOpr() {
@@ -121,7 +121,7 @@ LIR_Opr LIR_Assembler::incomingReceiverOpr() {
}
LIR_Opr LIR_Assembler::osrBufferPointer() {
- return FrameMap::rcx_opr;
+ return FrameMap::as_pointer_opr(receiverOpr()->as_register());
}
//--------------fpu register translations-----------------------
@@ -181,7 +181,7 @@ void LIR_Assembler::push(LIR_Opr opr) {
if (opr->is_single_cpu()) {
__ push_reg(opr->as_register());
} else if (opr->is_double_cpu()) {
- __ push_reg(opr->as_register_hi());
+ NOT_LP64(__ push_reg(opr->as_register_hi()));
__ push_reg(opr->as_register_lo());
} else if (opr->is_stack()) {
__ push_addr(frame_map()->address_for_slot(opr->single_stack_ix()));
@@ -202,31 +202,45 @@ void LIR_Assembler::push(LIR_Opr opr) {
void LIR_Assembler::pop(LIR_Opr opr) {
if (opr->is_single_cpu()) {
- __ pop(opr->as_register());
+ __ pop_reg(opr->as_register());
} else {
ShouldNotReachHere();
}
}
+bool LIR_Assembler::is_literal_address(LIR_Address* addr) {
+ return addr->base()->is_illegal() && addr->index()->is_illegal();
+}
+
//-------------------------------------------
+
Address LIR_Assembler::as_Address(LIR_Address* addr) {
+ return as_Address(addr, rscratch1);
+}
+
+Address LIR_Assembler::as_Address(LIR_Address* addr, Register tmp) {
if (addr->base()->is_illegal()) {
assert(addr->index()->is_illegal(), "must be illegal too");
- //return Address(addr->disp(), relocInfo::none);
- // hack for now since this should really return an AddressLiteral
- // which will have to await 64bit c1 changes.
- return Address(noreg, addr->disp());
+ AddressLiteral laddr((address)addr->disp(), relocInfo::none);
+ if (! __ reachable(laddr)) {
+ __ movptr(tmp, laddr.addr());
+ Address res(tmp, 0);
+ return res;
+ } else {
+ return __ as_Address(laddr);
+ }
}
- Register base = addr->base()->as_register();
+ Register base = addr->base()->as_pointer_register();
if (addr->index()->is_illegal()) {
return Address( base, addr->disp());
- } else if (addr->index()->is_single_cpu()) {
- Register index = addr->index()->as_register();
+ } else if (addr->index()->is_cpu_register()) {
+ Register index = addr->index()->as_pointer_register();
return Address(base, index, (Address::ScaleFactor) addr->scale(), addr->disp());
} else if (addr->index()->is_constant()) {
- int addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();
+ intptr_t addr_offset = (addr->index()->as_constant_ptr()->as_jint() << addr->scale()) + addr->disp();
+ assert(Assembler::is_simm32(addr_offset), "must be");
return Address(base, addr_offset);
} else {
@@ -284,7 +298,7 @@ void LIR_Assembler::osr_entry() {
// All other registers are dead at this point and the locals will be
// copied into place by code emitted in the IR.
- Register OSR_buf = osrBufferPointer()->as_register();
+ Register OSR_buf = osrBufferPointer()->as_pointer_register();
{ assert(frame::interpreter_frame_monitor_size() == BasicObjectLock::size(), "adjust code below");
int monitor_offset = BytesPerWord * method()->max_locals() +
(BasicObjectLock::size() * BytesPerWord) * (number_of_locks - 1);
@@ -294,16 +308,16 @@ void LIR_Assembler::osr_entry() {
// verify the interpreter's monitor has a non-null object
{
Label L;
- __ cmpl(Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes()), NULL_WORD);
+ __ cmpptr(Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);
__ jcc(Assembler::notZero, L);
__ stop("locked object is NULL");
__ bind(L);
}
#endif
- __ movl(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes()));
- __ movl(frame_map()->address_for_monitor_lock(i), rbx);
- __ movl(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes()));
- __ movl(frame_map()->address_for_monitor_object(i), rbx);
+ __ movptr(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::lock_offset_in_bytes()));
+ __ movptr(frame_map()->address_for_monitor_lock(i), rbx);
+ __ movptr(rbx, Address(OSR_buf, slot_offset + BasicObjectLock::obj_offset_in_bytes()));
+ __ movptr(frame_map()->address_for_monitor_object(i), rbx);
}
}
}
@@ -313,10 +327,11 @@ void LIR_Assembler::osr_entry() {
int LIR_Assembler::check_icache() {
Register receiver = FrameMap::receiver_opr->as_register();
Register ic_klass = IC_Klass;
+ const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
if (!VerifyOops) {
// insert some nops so that the verified entry point is aligned on CodeEntryAlignment
- while ((__ offset() + 9) % CodeEntryAlignment != 0) {
+ while ((__ offset() + ic_cmp_size) % CodeEntryAlignment != 0) {
__ nop();
}
}
@@ -347,7 +362,7 @@ void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register new_
// and cannot block => no GC can happen
// The slow case (MonitorAccessStub) uses the first two stack slots
// ([esp+0] and [esp+4]), therefore we store the exception at [esp+8]
- __ movl (Address(rsp, 2*wordSize), exception);
+ __ movptr (Address(rsp, 2*wordSize), exception);
}
Register obj_reg = obj_opr->as_register();
@@ -360,7 +375,7 @@ void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register new_
lock_reg = new_hdr;
// compute pointer to BasicLock
Address lock_addr = frame_map()->address_for_monitor_lock(monitor_no);
- __ leal(lock_reg, lock_addr);
+ __ lea(lock_reg, lock_addr);
// unlock object
MonitorAccessStub* slow_case = new MonitorExitStub(lock_opr, true, monitor_no);
// _slow_case_stubs->append(slow_case);
@@ -385,14 +400,18 @@ void LIR_Assembler::monitorexit(LIR_Opr obj_opr, LIR_Opr lock_opr, Register new_
if (exception->is_valid()) {
// restore exception
- __ movl (exception, Address(rsp, 2 * wordSize));
+ __ movptr (exception, Address(rsp, 2 * wordSize));
}
}
// This specifies the rsp decrement needed to build the frame
int LIR_Assembler::initial_frame_size_in_bytes() {
// if rounding, must let FrameMap know!
- return (frame_map()->framesize() - 2) * BytesPerWord; // subtract two words to account for return address and link
+
+ // The frame_map records size in slots (32bit word)
+
+ // subtract two words to account for return address and link
+ return (frame_map()->framesize() - (2*VMRegImpl::slots_per_word)) * VMRegImpl::stack_slot_size;
}
@@ -495,43 +514,43 @@ void LIR_Assembler::emit_deopt_handler() {
// This is the fast version of java.lang.String.compare; it has not
// OSR-entry and therefore, we generate a slow version for OSR's
void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst, CodeEmitInfo* info) {
- __ movl (rbx, rcx); // receiver is in rcx
- __ movl (rax, arg1->as_register());
+ __ movptr (rbx, rcx); // receiver is in rcx
+ __ movptr (rax, arg1->as_register());
// Get addresses of first characters from both Strings
- __ movl (rsi, Address(rax, java_lang_String::value_offset_in_bytes()));
- __ movl (rcx, Address(rax, java_lang_String::offset_offset_in_bytes()));
- __ leal (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
+ __ movptr (rsi, Address(rax, java_lang_String::value_offset_in_bytes()));
+ __ movptr (rcx, Address(rax, java_lang_String::offset_offset_in_bytes()));
+ __ lea (rsi, Address(rsi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
// rbx, may be NULL
add_debug_info_for_null_check_here(info);
- __ movl (rdi, Address(rbx, java_lang_String::value_offset_in_bytes()));
- __ movl (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes()));
- __ leal (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
+ __ movptr (rdi, Address(rbx, java_lang_String::value_offset_in_bytes()));
+ __ movptr (rcx, Address(rbx, java_lang_String::offset_offset_in_bytes()));
+ __ lea (rdi, Address(rdi, rcx, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
// compute minimum length (in rax) and difference of lengths (on top of stack)
if (VM_Version::supports_cmov()) {
- __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes()));
- __ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes()));
- __ movl (rcx, rbx);
- __ subl (rbx, rax); // subtract lengths
- __ pushl(rbx); // result
- __ cmovl(Assembler::lessEqual, rax, rcx);
+ __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes()));
+ __ movl (rax, Address(rax, java_lang_String::count_offset_in_bytes()));
+ __ mov (rcx, rbx);
+ __ subptr (rbx, rax); // subtract lengths
+ __ push (rbx); // result
+ __ cmov (Assembler::lessEqual, rax, rcx);
} else {
Label L;
- __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes()));
- __ movl (rcx, Address(rax, java_lang_String::count_offset_in_bytes()));
- __ movl (rax, rbx);
- __ subl (rbx, rcx);
- __ pushl(rbx);
- __ jcc (Assembler::lessEqual, L);
- __ movl (rax, rcx);
+ __ movl (rbx, Address(rbx, java_lang_String::count_offset_in_bytes()));
+ __ movl (rcx, Address(rax, java_lang_String::count_offset_in_bytes()));
+ __ mov (rax, rbx);
+ __ subptr (rbx, rcx);
+ __ push (rbx);
+ __ jcc (Assembler::lessEqual, L);
+ __ mov (rax, rcx);
__ bind (L);
}
// is minimum length 0?
Label noLoop, haveResult;
- __ testl (rax, rax);
+ __ testptr (rax, rax);
__ jcc (Assembler::zero, noLoop);
// compare first characters
@@ -546,9 +565,9 @@ void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst,
// set rsi.edi to the end of the arrays (arrays have same length)
// negate the index
- __ leal(rsi, Address(rsi, rax, Address::times_2, type2aelembytes(T_CHAR)));
- __ leal(rdi, Address(rdi, rax, Address::times_2, type2aelembytes(T_CHAR)));
- __ negl(rax);
+ __ lea(rsi, Address(rsi, rax, Address::times_2, type2aelembytes(T_CHAR)));
+ __ lea(rdi, Address(rdi, rax, Address::times_2, type2aelembytes(T_CHAR)));
+ __ negptr(rax);
// compare the strings in a loop
@@ -565,12 +584,12 @@ void LIR_Assembler::emit_string_compare(LIR_Opr arg0, LIR_Opr arg1, LIR_Opr dst,
// strings are equal up to min length
__ bind(noLoop);
- __ popl(rax);
+ __ pop(rax);
return_op(LIR_OprFact::illegalOpr);
__ bind(haveResult);
// leave instruction is going to discard the TOS value
- __ movl (rax, rcx); // result of call is in rax,
+ __ mov (rax, rcx); // result of call is in rax,
}
@@ -589,6 +608,11 @@ void LIR_Assembler::return_op(LIR_Opr result) {
// the poll sets the condition code, but no data registers
AddressLiteral polling_page(os::get_polling_page() + (SafepointPollOffset % os::vm_page_size()),
relocInfo::poll_return_type);
+
+ // NOTE: the requires that the polling page be reachable else the reloc
+ // goes to the movq that loads the address and not the faulting instruction
+ // which breaks the signal handler code
+
__ test32(rax, polling_page);
__ ret(0);
@@ -606,17 +630,22 @@ int LIR_Assembler::safepoint_poll(LIR_Opr tmp, CodeEmitInfo* info) {
}
int offset = __ offset();
+
+ // NOTE: the requires that the polling page be reachable else the reloc
+ // goes to the movq that loads the address and not the faulting instruction
+ // which breaks the signal handler code
+
__ test32(rax, polling_page);
return offset;
}
void LIR_Assembler::move_regs(Register from_reg, Register to_reg) {
- if (from_reg != to_reg) __ movl(to_reg, from_reg);
+ if (from_reg != to_reg) __ mov(to_reg, from_reg);
}
void LIR_Assembler::swap_reg(Register a, Register b) {
- __ xchgl(a, b);
+ __ xchgptr(a, b);
}
@@ -634,8 +663,12 @@ void LIR_Assembler::const2reg(LIR_Opr src, LIR_Opr dest, LIR_PatchCode patch_cod
case T_LONG: {
assert(patch_code == lir_patch_none, "no patching handled here");
- __ movl(dest->as_register_lo(), c->as_jint_lo());
- __ movl(dest->as_register_hi(), c->as_jint_hi());
+#ifdef _LP64
+ __ movptr(dest->as_register_lo(), (intptr_t)c->as_jlong());
+#else
+ __ movptr(dest->as_register_lo(), c->as_jint_lo());
+ __ movptr(dest->as_register_hi(), c->as_jint_hi());
+#endif // _LP64
break;
}
@@ -714,10 +747,15 @@ void LIR_Assembler::const2stack(LIR_Opr src, LIR_Opr dest) {
case T_LONG: // fall through
case T_DOUBLE:
- __ movl(frame_map()->address_for_slot(dest->double_stack_ix(),
- lo_word_offset_in_bytes), c->as_jint_lo_bits());
- __ movl(frame_map()->address_for_slot(dest->double_stack_ix(),
- hi_word_offset_in_bytes), c->as_jint_hi_bits());
+#ifdef _LP64
+ __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
+ lo_word_offset_in_bytes), (intptr_t)c->as_jlong_bits());
+#else
+ __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
+ lo_word_offset_in_bytes), c->as_jint_lo_bits());
+ __ movptr(frame_map()->address_for_slot(dest->double_stack_ix(),
+ hi_word_offset_in_bytes), c->as_jint_hi_bits());
+#endif // _LP64
break;
default:
@@ -731,7 +769,7 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
LIR_Const* c = src->as_constant_ptr();
LIR_Address* addr = dest->as_address_ptr();
- if (info != NULL) add_debug_info_for_null_check_here(info);
+ int null_check_here = code_offset();
switch (type) {
case T_INT: // fall through
case T_FLOAT:
@@ -741,16 +779,33 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
case T_OBJECT: // fall through
case T_ARRAY:
if (c->as_jobject() == NULL) {
- __ movl(as_Address(addr), NULL_WORD);
+ __ movptr(as_Address(addr), (int32_t)NULL_WORD);
} else {
- __ movoop(as_Address(addr), c->as_jobject());
+ if (is_literal_address(addr)) {
+ ShouldNotReachHere();
+ __ movoop(as_Address(addr, noreg), c->as_jobject());
+ } else {
+ __ movoop(as_Address(addr), c->as_jobject());
+ }
}
break;
case T_LONG: // fall through
case T_DOUBLE:
- __ movl(as_Address_hi(addr), c->as_jint_hi_bits());
- __ movl(as_Address_lo(addr), c->as_jint_lo_bits());
+#ifdef _LP64
+ if (is_literal_address(addr)) {
+ ShouldNotReachHere();
+ __ movptr(as_Address(addr, r15_thread), (intptr_t)c->as_jlong_bits());
+ } else {
+ __ movptr(r10, (intptr_t)c->as_jlong_bits());
+ null_check_here = code_offset();
+ __ movptr(as_Address_lo(addr), r10);
+ }
+#else
+ // Always reachable in 32bit so this doesn't produce useless move literal
+ __ movptr(as_Address_hi(addr), c->as_jint_hi_bits());
+ __ movptr(as_Address_lo(addr), c->as_jint_lo_bits());
+#endif // _LP64
break;
case T_BOOLEAN: // fall through
@@ -766,6 +821,10 @@ void LIR_Assembler::const2mem(LIR_Opr src, LIR_Opr dest, BasicType type, CodeEmi
default:
ShouldNotReachHere();
};
+
+ if (info != NULL) {
+ add_debug_info_for_null_check(null_check_here, info);
+ }
}
@@ -775,6 +834,13 @@ void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
// move between cpu-registers
if (dest->is_single_cpu()) {
+#ifdef _LP64
+ if (src->type() == T_LONG) {
+ // Can do LONG -> OBJECT
+ move_regs(src->as_register_lo(), dest->as_register());
+ return;
+ }
+#endif
assert(src->is_single_cpu(), "must match");
if (src->type() == T_OBJECT) {
__ verify_oop(src->as_register());
@@ -782,13 +848,27 @@ void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
move_regs(src->as_register(), dest->as_register());
} else if (dest->is_double_cpu()) {
+#ifdef _LP64
+ if (src->type() == T_OBJECT || src->type() == T_ARRAY) {
+ // Surprising to me but we can see move of a long to t_object
+ __ verify_oop(src->as_register());
+ move_regs(src->as_register(), dest->as_register_lo());
+ return;
+ }
+#endif
assert(src->is_double_cpu(), "must match");
Register f_lo = src->as_register_lo();
Register f_hi = src->as_register_hi();
Register t_lo = dest->as_register_lo();
Register t_hi = dest->as_register_hi();
+#ifdef _LP64
+ assert(f_hi == f_lo, "must be same");
+ assert(t_hi == t_lo, "must be same");
+ move_regs(f_lo, t_lo);
+#else
assert(f_lo != f_hi && t_lo != t_hi, "invalid register allocation");
+
if (f_lo == t_hi && f_hi == t_lo) {
swap_reg(f_lo, f_hi);
} else if (f_hi == t_lo) {
@@ -800,6 +880,7 @@ void LIR_Assembler::reg2reg(LIR_Opr src, LIR_Opr dest) {
move_regs(f_lo, t_lo);
move_regs(f_hi, t_hi);
}
+#endif // LP64
// special moves from fpu-register to xmm-register
// necessary for method results
@@ -841,14 +922,16 @@ void LIR_Assembler::reg2stack(LIR_Opr src, LIR_Opr dest, BasicType type, bool po
Address dst = frame_map()->address_for_slot(dest->single_stack_ix());
if (type == T_OBJECT || type == T_ARRAY) {
__ verify_oop(src->as_register());
+ __ movptr (dst, src->as_register());
+ } else {
+ __ movl (dst, src->as_register());
}
- __ movl (dst, src->as_register());
} else if (src->is_double_cpu()) {
Address dstLO = frame_map()->address_for_slot(dest->double_stack_ix(), lo_word_offset_in_bytes);
Address dstHI = frame_map()->address_for_slot(dest->double_stack_ix(), hi_word_offset_in_bytes);
- __ movl (dstLO, src->as_register_lo());
- __ movl (dstHI, src->as_register_hi());
+ __ movptr (dstLO, src->as_register_lo());
+ NOT_LP64(__ movptr (dstHI, src->as_register_hi()));
} else if (src->is_single_xmm()) {
Address dst_addr = frame_map()->address_for_slot(dest->single_stack_ix());
@@ -885,6 +968,8 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
}
if (patch_code != lir_patch_none) {
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
+ Address toa = as_Address(to_addr);
+ assert(toa.disp() != 0, "must have");
}
if (info != NULL) {
add_debug_info_for_null_check_here(info);
@@ -918,6 +1003,10 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
case T_ADDRESS: // fall through
case T_ARRAY: // fall through
case T_OBJECT: // fall through
+#ifdef _LP64
+ __ movptr(as_Address(to_addr), src->as_register());
+ break;
+#endif // _LP64
case T_INT:
__ movl(as_Address(to_addr), src->as_register());
break;
@@ -925,6 +1014,9 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
case T_LONG: {
Register from_lo = src->as_register_lo();
Register from_hi = src->as_register_hi();
+#ifdef _LP64
+ __ movptr(as_Address_lo(to_addr), from_lo);
+#else
Register base = to_addr->base()->as_register();
Register index = noreg;
if (to_addr->index()->is_register()) {
@@ -950,6 +1042,7 @@ void LIR_Assembler::reg2mem(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
}
__ movl(as_Address_hi(to_addr), from_hi);
}
+#endif // _LP64
break;
}
@@ -982,16 +1075,18 @@ void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
assert(dest->is_register(), "should not call otherwise");
if (dest->is_single_cpu()) {
- __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
if (type == T_ARRAY || type == T_OBJECT) {
+ __ movptr(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
__ verify_oop(dest->as_register());
+ } else {
+ __ movl(dest->as_register(), frame_map()->address_for_slot(src->single_stack_ix()));
}
} else if (dest->is_double_cpu()) {
Address src_addr_LO = frame_map()->address_for_slot(src->double_stack_ix(), lo_word_offset_in_bytes);
Address src_addr_HI = frame_map()->address_for_slot(src->double_stack_ix(), hi_word_offset_in_bytes);
- __ movl(dest->as_register_hi(), src_addr_HI);
- __ movl(dest->as_register_lo(), src_addr_LO);
+ __ movptr(dest->as_register_lo(), src_addr_LO);
+ NOT_LP64(__ movptr(dest->as_register_hi(), src_addr_HI));
} else if (dest->is_single_xmm()) {
Address src_addr = frame_map()->address_for_slot(src->single_stack_ix());
@@ -1019,15 +1114,25 @@ void LIR_Assembler::stack2reg(LIR_Opr src, LIR_Opr dest, BasicType type) {
void LIR_Assembler::stack2stack(LIR_Opr src, LIR_Opr dest, BasicType type) {
if (src->is_single_stack()) {
- __ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
- __ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
+ if (type == T_OBJECT || type == T_ARRAY) {
+ __ pushptr(frame_map()->address_for_slot(src ->single_stack_ix()));
+ __ popptr (frame_map()->address_for_slot(dest->single_stack_ix()));
+ } else {
+ __ pushl(frame_map()->address_for_slot(src ->single_stack_ix()));
+ __ popl (frame_map()->address_for_slot(dest->single_stack_ix()));
+ }
} else if (src->is_double_stack()) {
+#ifdef _LP64
+ __ pushptr(frame_map()->address_for_slot(src ->double_stack_ix()));
+ __ popptr (frame_map()->address_for_slot(dest->double_stack_ix()));
+#else
__ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 0));
- // push and pop the part at src + 4, adding 4 for the previous push
- __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), 4 + 4));
- __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 4 + 4));
+ // push and pop the part at src + wordSize, adding wordSize for the previous push
+ __ pushl(frame_map()->address_for_slot(src ->double_stack_ix(), wordSize));
+ __ popl (frame_map()->address_for_slot(dest->double_stack_ix(), wordSize));
__ popl (frame_map()->address_for_slot(dest->double_stack_ix(), 0));
+#endif // _LP64
} else {
ShouldNotReachHere();
@@ -1052,7 +1157,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
// so blow away the value of to_rinfo before loading a
// partial word into it. Do it here so that it precedes
// the potential patch point below.
- __ xorl(dest->as_register(), dest->as_register());
+ __ xorptr(dest->as_register(), dest->as_register());
}
break;
}
@@ -1060,6 +1165,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
PatchingStub* patch = NULL;
if (patch_code != lir_patch_none) {
patch = new PatchingStub(_masm, PatchingStub::access_field_id);
+ assert(from_addr.disp() != 0, "must have");
}
if (info != NULL) {
add_debug_info_for_null_check_here(info);
@@ -1091,13 +1197,21 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
case T_ADDRESS: // fall through
case T_OBJECT: // fall through
case T_ARRAY: // fall through
+#ifdef _LP64
+ __ movptr(dest->as_register(), from_addr);
+ break;
+#endif // _L64
case T_INT:
- __ movl(dest->as_register(), from_addr);
+ // %%% could this be a movl? this is safer but longer instruction
+ __ movl2ptr(dest->as_register(), from_addr);
break;
case T_LONG: {
Register to_lo = dest->as_register_lo();
Register to_hi = dest->as_register_hi();
+#ifdef _LP64
+ __ movptr(to_lo, as_Address_lo(addr));
+#else
Register base = addr->base()->as_register();
Register index = noreg;
if (addr->index()->is_register()) {
@@ -1109,7 +1223,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
// array access so this code will never have to deal with
// patches or null checks.
assert(info == NULL && patch == NULL, "must be");
- __ leal(to_hi, as_Address(addr));
+ __ lea(to_hi, as_Address(addr));
__ movl(to_lo, Address(to_hi, 0));
__ movl(to_hi, Address(to_hi, BytesPerWord));
} else if (base == to_lo || index == to_lo) {
@@ -1132,6 +1246,7 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
}
__ movl(to_hi, as_Address_hi(addr));
}
+#endif // _LP64
break;
}
@@ -1140,12 +1255,13 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
Register dest_reg = dest->as_register();
assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
- __ movsxb(dest_reg, from_addr);
+ __ movsbl(dest_reg, from_addr);
} else {
__ movb(dest_reg, from_addr);
__ shll(dest_reg, 24);
__ sarl(dest_reg, 24);
}
+ // These are unsigned so the zero extension on 64bit is just what we need
break;
}
@@ -1153,22 +1269,26 @@ void LIR_Assembler::mem2reg(LIR_Opr src, LIR_Opr dest, BasicType type, LIR_Patch
Register dest_reg = dest->as_register();
assert(VM_Version::is_P6() || dest_reg->has_byte_register(), "must use byte registers if not P6");
if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
- __ movzxw(dest_reg, from_addr);
+ __ movzwl(dest_reg, from_addr);
} else {
__ movw(dest_reg, from_addr);
}
+ // This is unsigned so the zero extension on 64bit is just what we need
+ // __ movl2ptr(dest_reg, dest_reg);
break;
}
case T_SHORT: {
Register dest_reg = dest->as_register();
if (VM_Version::is_P6() || from_addr.uses(dest_reg)) {
- __ movsxw(dest_reg, from_addr);
+ __ movswl(dest_reg, from_addr);
} else {
__ movw(dest_reg, from_addr);
__ shll(dest_reg, 16);
__ sarl(dest_reg, 16);
}
+ // Might not be needed in 64bit but certainly doesn't hurt (except for code size)
+ __ movl2ptr(dest_reg, dest_reg);
break;
}
@@ -1306,9 +1426,13 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
switch (op->bytecode()) {
case Bytecodes::_i2l:
+#ifdef _LP64
+ __ movl2ptr(dest->as_register_lo(), src->as_register());
+#else
move_regs(src->as_register(), dest->as_register_lo());
move_regs(src->as_register(), dest->as_register_hi());
__ sarl(dest->as_register_hi(), 31);
+#endif // LP64
break;
case Bytecodes::_l2i:
@@ -1346,9 +1470,9 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
case Bytecodes::_i2f:
case Bytecodes::_i2d:
if (dest->is_single_xmm()) {
- __ cvtsi2ss(dest->as_xmm_float_reg(), src->as_register());
+ __ cvtsi2ssl(dest->as_xmm_float_reg(), src->as_register());
} else if (dest->is_double_xmm()) {
- __ cvtsi2sd(dest->as_xmm_double_reg(), src->as_register());
+ __ cvtsi2sdl(dest->as_xmm_double_reg(), src->as_register());
} else {
assert(dest->fpu() == 0, "result must be on TOS");
__ movl(Address(rsp, 0), src->as_register());
@@ -1359,9 +1483,9 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
case Bytecodes::_f2i:
case Bytecodes::_d2i:
if (src->is_single_xmm()) {
- __ cvttss2si(dest->as_register(), src->as_xmm_float_reg());
+ __ cvttss2sil(dest->as_register(), src->as_xmm_float_reg());
} else if (src->is_double_xmm()) {
- __ cvttsd2si(dest->as_register(), src->as_xmm_double_reg());
+ __ cvttsd2sil(dest->as_register(), src->as_xmm_double_reg());
} else {
assert(src->fpu() == 0, "input must be on TOS");
__ fldcw(ExternalAddress(StubRoutines::addr_fpu_cntrl_wrd_trunc()));
@@ -1382,8 +1506,8 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
assert(!dest->is_xmm_register(), "result in xmm register not supported (no SSE instruction present)");
assert(dest->fpu() == 0, "result must be on TOS");
- __ movl(Address(rsp, 0), src->as_register_lo());
- __ movl(Address(rsp, BytesPerWord), src->as_register_hi());
+ __ movptr(Address(rsp, 0), src->as_register_lo());
+ NOT_LP64(__ movl(Address(rsp, BytesPerWord), src->as_register_hi()));
__ fild_d(Address(rsp, 0));
// float result is rounded later through spilling
break;
@@ -1392,7 +1516,7 @@ void LIR_Assembler::emit_opConvert(LIR_OpConvert* op) {
case Bytecodes::_d2l:
assert(!src->is_xmm_register(), "input in xmm register not supported (no SSE instruction present)");
assert(src->fpu() == 0, "input must be on TOS");
- assert(dest == FrameMap::rax_rdx_long_opr, "runtime stub places result in these registers");
+ assert(dest == FrameMap::long0_opr, "runtime stub places result in these registers");
// instruction sequence too long to inline it here
{
@@ -1439,7 +1563,7 @@ void LIR_Assembler::emit_alloc_array(LIR_OpAllocArray* op) {
} else if (len == tmp3) {
// everything is ok
} else {
- __ movl(tmp3, len);
+ __ mov(tmp3, len);
}
__ allocate_array(op->obj()->as_register(),
len,
@@ -1466,31 +1590,32 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
CodeStub* stub = op->stub();
Label done;
- __ cmpl(value, 0);
+ __ cmpptr(value, (int32_t)NULL_WORD);
__ jcc(Assembler::equal, done);
add_debug_info_for_null_check_here(op->info_for_exception());
- __ movl(k_RInfo, Address(array, oopDesc::klass_offset_in_bytes()));
- __ movl(klass_RInfo, Address(value, oopDesc::klass_offset_in_bytes()));
+ __ movptr(k_RInfo, Address(array, oopDesc::klass_offset_in_bytes()));
+ __ movptr(klass_RInfo, Address(value, oopDesc::klass_offset_in_bytes()));
// get instance klass
- __ movl(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
+ __ movptr(k_RInfo, Address(k_RInfo, objArrayKlass::element_klass_offset_in_bytes() + sizeof(oopDesc)));
// get super_check_offset
__ movl(Rtmp1, Address(k_RInfo, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes()));
// See if we get an immediate positive hit
- __ cmpl(k_RInfo, Address(klass_RInfo, Rtmp1, Address::times_1));
+ __ cmpptr(k_RInfo, Address(klass_RInfo, Rtmp1, Address::times_1));
__ jcc(Assembler::equal, done);
// check for immediate negative hit
__ cmpl(Rtmp1, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes());
__ jcc(Assembler::notEqual, *stub->entry());
// check for self
- __ cmpl(klass_RInfo, k_RInfo);
+ __ cmpptr(klass_RInfo, k_RInfo);
__ jcc(Assembler::equal, done);
- __ pushl(klass_RInfo);
- __ pushl(k_RInfo);
+ __ push(klass_RInfo);
+ __ push(k_RInfo);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
- __ popl(klass_RInfo);
- __ popl(k_RInfo);
+ __ pop(klass_RInfo);
+ __ pop(k_RInfo);
+ // result is a boolean
__ cmpl(k_RInfo, 0);
__ jcc(Assembler::equal, *stub->entry());
__ bind(done);
@@ -1521,10 +1646,14 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
if (!k->is_loaded()) {
jobject2reg_with_patching(k_RInfo, op->info_for_patch());
} else {
+#ifdef _LP64
+ __ movoop(k_RInfo, k->encoding());
+#else
k_RInfo = noreg;
+#endif // _LP64
}
assert(obj != k_RInfo, "must be different");
- __ cmpl(obj, 0);
+ __ cmpptr(obj, (int32_t)NULL_WORD);
if (op->profiled_method() != NULL) {
ciMethod* method = op->profiled_method();
int bci = op->profiled_bci();
@@ -1556,9 +1685,13 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
// get object classo
// not a safepoint as obj null check happens earlier
if (k->is_loaded()) {
+#ifdef _LP64
+ __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
+#else
__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->encoding());
+#endif // _LP64
} else {
- __ cmpl(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
+ __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
}
__ jcc(Assembler::notEqual, *stub->entry());
@@ -1566,24 +1699,37 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
} else {
// get object class
// not a safepoint as obj null check happens earlier
- __ movl(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
+ __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
if (k->is_loaded()) {
// See if we get an immediate positive hit
+#ifdef _LP64
+ __ cmpptr(k_RInfo, Address(klass_RInfo, k->super_check_offset()));
+#else
__ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->encoding());
+#endif // _LP64
if (sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() != k->super_check_offset()) {
__ jcc(Assembler::notEqual, *stub->entry());
} else {
// See if we get an immediate positive hit
__ jcc(Assembler::equal, done);
// check for self
+#ifdef _LP64
+ __ cmpptr(klass_RInfo, k_RInfo);
+#else
__ cmpoop(klass_RInfo, k->encoding());
+#endif // _LP64
__ jcc(Assembler::equal, done);
- __ pushl(klass_RInfo);
+ __ push(klass_RInfo);
+#ifdef _LP64
+ __ push(k_RInfo);
+#else
__ pushoop(k->encoding());
+#endif // _LP64
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
- __ popl(klass_RInfo);
- __ popl(klass_RInfo);
+ __ pop(klass_RInfo);
+ __ pop(klass_RInfo);
+ // result is a boolean
__ cmpl(klass_RInfo, 0);
__ jcc(Assembler::equal, *stub->entry());
}
@@ -1591,20 +1737,21 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
} else {
__ movl(Rtmp1, Address(k_RInfo, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes()));
// See if we get an immediate positive hit
- __ cmpl(k_RInfo, Address(klass_RInfo, Rtmp1, Address::times_1));
+ __ cmpptr(k_RInfo, Address(klass_RInfo, Rtmp1, Address::times_1));
__ jcc(Assembler::equal, done);
// check for immediate negative hit
__ cmpl(Rtmp1, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes());
__ jcc(Assembler::notEqual, *stub->entry());
// check for self
- __ cmpl(klass_RInfo, k_RInfo);
+ __ cmpptr(klass_RInfo, k_RInfo);
__ jcc(Assembler::equal, done);
- __ pushl(klass_RInfo);
- __ pushl(k_RInfo);
+ __ push(klass_RInfo);
+ __ push(k_RInfo);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
- __ popl(klass_RInfo);
- __ popl(k_RInfo);
+ __ pop(klass_RInfo);
+ __ pop(k_RInfo);
+ // result is a boolean
__ cmpl(k_RInfo, 0);
__ jcc(Assembler::equal, *stub->entry());
__ bind(done);
@@ -1612,7 +1759,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
}
if (dst != obj) {
- __ movl(dst, obj);
+ __ mov(dst, obj);
}
} else if (code == lir_instanceof) {
Register obj = op->object()->as_register();
@@ -1632,29 +1779,33 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
// so let's do it before loading the class
if (!k->is_loaded()) {
jobject2reg_with_patching(k_RInfo, op->info_for_patch());
+ } else {
+ LP64_ONLY(__ movoop(k_RInfo, k->encoding()));
}
assert(obj != k_RInfo, "must be different");
__ verify_oop(obj);
if (op->fast_check()) {
- __ cmpl(obj, 0);
+ __ cmpptr(obj, (int32_t)NULL_WORD);
__ jcc(Assembler::equal, zero);
// get object class
// not a safepoint as obj null check happens earlier
- if (k->is_loaded()) {
- __ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->encoding());
+ if (LP64_ONLY(false &&) k->is_loaded()) {
+ NOT_LP64(__ cmpoop(Address(obj, oopDesc::klass_offset_in_bytes()), k->encoding()));
k_RInfo = noreg;
} else {
- __ cmpl(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
+ __ cmpptr(k_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
}
__ jcc(Assembler::equal, one);
} else {
// get object class
// not a safepoint as obj null check happens earlier
- __ cmpl(obj, 0);
+ __ cmpptr(obj, (int32_t)NULL_WORD);
__ jcc(Assembler::equal, zero);
- __ movl(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
+ __ movptr(klass_RInfo, Address(obj, oopDesc::klass_offset_in_bytes()));
+
+#ifndef _LP64
if (k->is_loaded()) {
// See if we get an immediate positive hit
__ cmpoop(Address(klass_RInfo, k->super_check_offset()), k->encoding());
@@ -1663,40 +1814,43 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
// check for self
__ cmpoop(klass_RInfo, k->encoding());
__ jcc(Assembler::equal, one);
- __ pushl(klass_RInfo);
+ __ push(klass_RInfo);
__ pushoop(k->encoding());
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
- __ popl(klass_RInfo);
- __ popl(dst);
+ __ pop(klass_RInfo);
+ __ pop(dst);
__ jmp(done);
}
} else {
+#else
+ { // YUCK
+#endif // LP64
assert(dst != klass_RInfo && dst != k_RInfo, "need 3 registers");
__ movl(dst, Address(k_RInfo, sizeof(oopDesc) + Klass::super_check_offset_offset_in_bytes()));
// See if we get an immediate positive hit
- __ cmpl(k_RInfo, Address(klass_RInfo, dst, Address::times_1));
+ __ cmpptr(k_RInfo, Address(klass_RInfo, dst, Address::times_1));
__ jcc(Assembler::equal, one);
// check for immediate negative hit
__ cmpl(dst, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes());
__ jcc(Assembler::notEqual, zero);
// check for self
- __ cmpl(klass_RInfo, k_RInfo);
+ __ cmpptr(klass_RInfo, k_RInfo);
__ jcc(Assembler::equal, one);
- __ pushl(klass_RInfo);
- __ pushl(k_RInfo);
+ __ push(klass_RInfo);
+ __ push(k_RInfo);
__ call(RuntimeAddress(Runtime1::entry_for(Runtime1::slow_subtype_check_id)));
- __ popl(klass_RInfo);
- __ popl(dst);
+ __ pop(klass_RInfo);
+ __ pop(dst);
__ jmp(done);
}
}
__ bind(zero);
- __ xorl(dst, dst);
+ __ xorptr(dst, dst);
__ jmp(done);
__ bind(one);
- __ movl(dst, 1);
+ __ movptr(dst, 1);
__ bind(done);
} else {
ShouldNotReachHere();
@@ -1706,8 +1860,7 @@ void LIR_Assembler::emit_opTypeCheck(LIR_OpTypeCheck* op) {
void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
- if (op->code() == lir_cas_long) {
- assert(VM_Version::supports_cx8(), "wrong machine");
+ if (LP64_ONLY(false &&) op->code() == lir_cas_long && VM_Version::supports_cx8()) {
assert(op->cmp_value()->as_register_lo() == rax, "wrong register");
assert(op->cmp_value()->as_register_hi() == rdx, "wrong register");
assert(op->new_value()->as_register_lo() == rbx, "wrong register");
@@ -1716,10 +1869,11 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
if (os::is_MP()) {
__ lock();
}
- __ cmpxchg8(Address(addr, 0));
+ NOT_LP64(__ cmpxchg8(Address(addr, 0)));
- } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj) {
- Register addr = op->addr()->as_register();
+ } else if (op->code() == lir_cas_int || op->code() == lir_cas_obj ) {
+ NOT_LP64(assert(op->addr()->is_single_cpu(), "must be single");)
+ Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
Register newval = op->new_value()->as_register();
Register cmpval = op->cmp_value()->as_register();
assert(cmpval == rax, "wrong register");
@@ -1730,7 +1884,28 @@ void LIR_Assembler::emit_compare_and_swap(LIR_OpCompareAndSwap* op) {
if (os::is_MP()) {
__ lock();
}
- __ cmpxchg(newval, Address(addr, 0));
+ if ( op->code() == lir_cas_obj) {
+ __ cmpxchgptr(newval, Address(addr, 0));
+ } else if (op->code() == lir_cas_int) {
+ __ cmpxchgl(newval, Address(addr, 0));
+ } else {
+ LP64_ONLY(__ cmpxchgq(newval, Address(addr, 0)));
+ }
+#ifdef _LP64
+ } else if (op->code() == lir_cas_long) {
+ Register addr = (op->addr()->is_single_cpu() ? op->addr()->as_register() : op->addr()->as_register_lo());
+ Register newval = op->new_value()->as_register_lo();
+ Register cmpval = op->cmp_value()->as_register_lo();
+ assert(cmpval == rax, "wrong register");
+ assert(newval != NULL, "new val must be register");
+ assert(cmpval != newval, "cmp and new values must be in different registers");
+ assert(cmpval != addr, "cmp and addr must be in different registers");
+ assert(newval != addr, "new value and addr must be in different registers");
+ if (os::is_MP()) {
+ __ lock();
+ }
+ __ cmpxchgq(newval, Address(addr, 0));
+#endif // _LP64
} else {
Unimplemented();
}
@@ -1765,17 +1940,17 @@ void LIR_Assembler::cmove(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2, L
// optimized version that does not require a branch
if (opr2->is_single_cpu()) {
assert(opr2->cpu_regnr() != result->cpu_regnr(), "opr2 already overwritten by previous move");
- __ cmovl(ncond, result->as_register(), opr2->as_register());
+ __ cmov(ncond, result->as_register(), opr2->as_register());
} else if (opr2->is_double_cpu()) {
assert(opr2->cpu_regnrLo() != result->cpu_regnrLo() && opr2->cpu_regnrLo() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
assert(opr2->cpu_regnrHi() != result->cpu_regnrLo() && opr2->cpu_regnrHi() != result->cpu_regnrHi(), "opr2 already overwritten by previous move");
- __ cmovl(ncond, result->as_register_lo(), opr2->as_register_lo());
- __ cmovl(ncond, result->as_register_hi(), opr2->as_register_hi());
+ __ cmovptr(ncond, result->as_register_lo(), opr2->as_register_lo());
+ NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), opr2->as_register_hi());)
} else if (opr2->is_single_stack()) {
__ cmovl(ncond, result->as_register(), frame_map()->address_for_slot(opr2->single_stack_ix()));
} else if (opr2->is_double_stack()) {
- __ cmovl(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes));
- __ cmovl(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));
+ __ cmovptr(ncond, result->as_register_lo(), frame_map()->address_for_slot(opr2->double_stack_ix(), lo_word_offset_in_bytes));
+ NOT_LP64(__ cmovptr(ncond, result->as_register_hi(), frame_map()->address_for_slot(opr2->double_stack_ix(), hi_word_offset_in_bytes));)
} else {
ShouldNotReachHere();
}
@@ -1851,23 +2026,28 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
// cpu register - cpu register
Register rreg_lo = right->as_register_lo();
Register rreg_hi = right->as_register_hi();
- assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi);
+ NOT_LP64(assert_different_registers(lreg_lo, lreg_hi, rreg_lo, rreg_hi));
+ LP64_ONLY(assert_different_registers(lreg_lo, rreg_lo));
switch (code) {
case lir_add:
- __ addl(lreg_lo, rreg_lo);
- __ adcl(lreg_hi, rreg_hi);
+ __ addptr(lreg_lo, rreg_lo);
+ NOT_LP64(__ adcl(lreg_hi, rreg_hi));
break;
case lir_sub:
- __ subl(lreg_lo, rreg_lo);
- __ sbbl(lreg_hi, rreg_hi);
+ __ subptr(lreg_lo, rreg_lo);
+ NOT_LP64(__ sbbl(lreg_hi, rreg_hi));
break;
case lir_mul:
+#ifdef _LP64
+ __ imulq(lreg_lo, rreg_lo);
+#else
assert(lreg_lo == rax && lreg_hi == rdx, "must be");
__ imull(lreg_hi, rreg_lo);
__ imull(rreg_hi, lreg_lo);
__ addl (rreg_hi, lreg_hi);
__ mull (rreg_lo);
__ addl (lreg_hi, rreg_hi);
+#endif // _LP64
break;
default:
ShouldNotReachHere();
@@ -1875,20 +2055,35 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
} else if (right->is_constant()) {
// cpu register - constant
+#ifdef _LP64
+ jlong c = right->as_constant_ptr()->as_jlong_bits();
+ __ movptr(r10, (intptr_t) c);
+ switch (code) {
+ case lir_add:
+ __ addptr(lreg_lo, r10);
+ break;
+ case lir_sub:
+ __ subptr(lreg_lo, r10);
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+#else
jint c_lo = right->as_constant_ptr()->as_jint_lo();
jint c_hi = right->as_constant_ptr()->as_jint_hi();
switch (code) {
case lir_add:
- __ addl(lreg_lo, c_lo);
+ __ addptr(lreg_lo, c_lo);
__ adcl(lreg_hi, c_hi);
break;
case lir_sub:
- __ subl(lreg_lo, c_lo);
+ __ subptr(lreg_lo, c_lo);
__ sbbl(lreg_hi, c_hi);
break;
default:
ShouldNotReachHere();
}
+#endif // _LP64
} else {
ShouldNotReachHere();
@@ -2065,11 +2260,11 @@ void LIR_Assembler::arith_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
jint c = right->as_constant_ptr()->as_jint();
switch (code) {
case lir_add: {
- __ increment(laddr, c);
+ __ incrementl(laddr, c);
break;
}
case lir_sub: {
- __ decrement(laddr, c);
+ __ decrementl(laddr, c);
break;
}
default: ShouldNotReachHere();
@@ -2211,9 +2406,9 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
} else {
Register rright = right->as_register();
switch (code) {
- case lir_logic_and: __ andl (reg, rright); break;
- case lir_logic_or : __ orl (reg, rright); break;
- case lir_logic_xor: __ xorl (reg, rright); break;
+ case lir_logic_and: __ andptr (reg, rright); break;
+ case lir_logic_or : __ orptr (reg, rright); break;
+ case lir_logic_xor: __ xorptr (reg, rright); break;
default: ShouldNotReachHere();
}
}
@@ -2222,6 +2417,21 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
Register l_lo = left->as_register_lo();
Register l_hi = left->as_register_hi();
if (right->is_constant()) {
+#ifdef _LP64
+ __ mov64(rscratch1, right->as_constant_ptr()->as_jlong());
+ switch (code) {
+ case lir_logic_and:
+ __ andq(l_lo, rscratch1);
+ break;
+ case lir_logic_or:
+ __ orq(l_lo, rscratch1);
+ break;
+ case lir_logic_xor:
+ __ xorq(l_lo, rscratch1);
+ break;
+ default: ShouldNotReachHere();
+ }
+#else
int r_lo = right->as_constant_ptr()->as_jint_lo();
int r_hi = right->as_constant_ptr()->as_jint_hi();
switch (code) {
@@ -2239,22 +2449,23 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
break;
default: ShouldNotReachHere();
}
+#endif // _LP64
} else {
Register r_lo = right->as_register_lo();
Register r_hi = right->as_register_hi();
assert(l_lo != r_hi, "overwriting registers");
switch (code) {
case lir_logic_and:
- __ andl(l_lo, r_lo);
- __ andl(l_hi, r_hi);
+ __ andptr(l_lo, r_lo);
+ NOT_LP64(__ andptr(l_hi, r_hi);)
break;
case lir_logic_or:
- __ orl(l_lo, r_lo);
- __ orl(l_hi, r_hi);
+ __ orptr(l_lo, r_lo);
+ NOT_LP64(__ orptr(l_hi, r_hi);)
break;
case lir_logic_xor:
- __ xorl(l_lo, r_lo);
- __ xorl(l_hi, r_hi);
+ __ xorptr(l_lo, r_lo);
+ NOT_LP64(__ xorptr(l_hi, r_hi);)
break;
default: ShouldNotReachHere();
}
@@ -2263,6 +2474,9 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
Register dst_lo = dst->as_register_lo();
Register dst_hi = dst->as_register_hi();
+#ifdef _LP64
+ move_regs(l_lo, dst_lo);
+#else
if (dst_lo == l_hi) {
assert(dst_hi != l_lo, "overwriting registers");
move_regs(l_hi, dst_hi);
@@ -2272,6 +2486,7 @@ void LIR_Assembler::logic_op(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Opr
move_regs(l_lo, dst_lo);
move_regs(l_hi, dst_hi);
}
+#endif // _LP64
}
}
@@ -2306,7 +2521,7 @@ void LIR_Assembler::arithmetic_idiv(LIR_Code code, LIR_Opr left, LIR_Opr right,
move_regs(lreg, dreg);
} else if (code == lir_irem) {
Label done;
- __ movl(dreg, lreg);
+ __ mov(dreg, lreg);
__ andl(dreg, 0x80000000 | (divisor - 1));
__ jcc(Assembler::positive, done);
__ decrement(dreg);
@@ -2340,21 +2555,36 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
Register reg1 = opr1->as_register();
if (opr2->is_single_cpu()) {
// cpu register - cpu register
- __ cmpl(reg1, opr2->as_register());
+ if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
+ __ cmpptr(reg1, opr2->as_register());
+ } else {
+ assert(opr2->type() != T_OBJECT && opr2->type() != T_ARRAY, "cmp int, oop?");
+ __ cmpl(reg1, opr2->as_register());
+ }
} else if (opr2->is_stack()) {
// cpu register - stack
- __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
+ if (opr1->type() == T_OBJECT || opr1->type() == T_ARRAY) {
+ __ cmpptr(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
+ } else {
+ __ cmpl(reg1, frame_map()->address_for_slot(opr2->single_stack_ix()));
+ }
} else if (opr2->is_constant()) {
// cpu register - constant
LIR_Const* c = opr2->as_constant_ptr();
if (c->type() == T_INT) {
__ cmpl(reg1, c->as_jint());
- } else if (c->type() == T_OBJECT) {
+ } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
+ // In 64bit oops are single register
jobject o = c->as_jobject();
if (o == NULL) {
- __ cmpl(reg1, NULL_WORD);
+ __ cmpptr(reg1, (int32_t)NULL_WORD);
} else {
+#ifdef _LP64
+ __ movoop(rscratch1, o);
+ __ cmpptr(reg1, rscratch1);
+#else
__ cmpoop(reg1, c->as_jobject());
+#endif // _LP64
}
} else {
ShouldNotReachHere();
@@ -2373,6 +2603,9 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
Register xlo = opr1->as_register_lo();
Register xhi = opr1->as_register_hi();
if (opr2->is_double_cpu()) {
+#ifdef _LP64
+ __ cmpptr(xlo, opr2->as_register_lo());
+#else
// cpu register - cpu register
Register ylo = opr2->as_register_lo();
Register yhi = opr2->as_register_hi();
@@ -2381,11 +2614,16 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
if (condition == lir_cond_equal || condition == lir_cond_notEqual) {
__ orl(xhi, xlo);
}
+#endif // _LP64
} else if (opr2->is_constant()) {
// cpu register - constant 0
assert(opr2->as_jlong() == (jlong)0, "only handles zero");
+#ifdef _LP64
+ __ cmpptr(xlo, (int32_t)opr2->as_jlong());
+#else
assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "only handles equals case");
__ orl(xhi, xlo);
+#endif // _LP64
} else {
ShouldNotReachHere();
}
@@ -2438,16 +2676,28 @@ void LIR_Assembler::comp_op(LIR_Condition condition, LIR_Opr opr1, LIR_Opr opr2,
__ fcmp(noreg, opr2->fpu(), op->fpu_pop_count() > 0, op->fpu_pop_count() > 1);
} else if (opr1->is_address() && opr2->is_constant()) {
+ LIR_Const* c = opr2->as_constant_ptr();
+#ifdef _LP64
+ if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
+ assert(condition == lir_cond_equal || condition == lir_cond_notEqual, "need to reverse");
+ __ movoop(rscratch1, c->as_jobject());
+ }
+#endif // LP64
if (op->info() != NULL) {
add_debug_info_for_null_check_here(op->info());
}
// special case: address - constant
LIR_Address* addr = opr1->as_address_ptr();
- LIR_Const* c = opr2->as_constant_ptr();
if (c->type() == T_INT) {
__ cmpl(as_Address(addr), c->as_jint());
- } else if (c->type() == T_OBJECT) {
+ } else if (c->type() == T_OBJECT || c->type() == T_ARRAY) {
+#ifdef _LP64
+ // %%% Make this explode if addr isn't reachable until we figure out a
+ // better strategy by giving noreg as the temp for as_Address
+ __ cmpptr(rscratch1, as_Address(addr, noreg));
+#else
__ cmpoop(as_Address(addr), c->as_jobject());
+#endif // _LP64
} else {
ShouldNotReachHere();
}
@@ -2476,11 +2726,27 @@ void LIR_Assembler::comp_fl2i(LIR_Code code, LIR_Opr left, LIR_Opr right, LIR_Op
}
} else {
assert(code == lir_cmp_l2i, "check");
+#ifdef _LP64
+ Register dest = dst->as_register();
+ __ xorptr(dest, dest);
+ Label high, done;
+ __ cmpptr(left->as_register_lo(), right->as_register_lo());
+ __ jcc(Assembler::equal, done);
+ __ jcc(Assembler::greater, high);
+ __ decrement(dest);
+ __ jmp(done);
+ __ bind(high);
+ __ increment(dest);
+
+ __ bind(done);
+
+#else
__ lcmp2int(left->as_register_hi(),
left->as_register_lo(),
right->as_register_hi(),
right->as_register_lo());
move_regs(left->as_register_hi(), dst->as_register());
+#endif // _LP64
}
}
@@ -2551,7 +2817,8 @@ void LIR_Assembler::emit_static_call_stub() {
__ movoop(rbx, (jobject)NULL);
// must be set to -1 at code generation time
assert(!os::is_MP() || ((__ offset() + 1) % BytesPerWord) == 0, "must be aligned on MP");
- __ jump(RuntimeAddress((address)-1));
+ // On 64bit this will die since it will take a movq & jmp, must be only a jmp
+ __ jump(RuntimeAddress(__ pc()));
assert(__ offset() - start <= call_stub_size, "stub too big")
__ end_a_stub();
@@ -2616,6 +2883,14 @@ void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr
Register lo = left->as_register_lo();
Register hi = left->as_register_hi();
assert(lo != SHIFT_count && hi != SHIFT_count, "left cannot be ECX");
+#ifdef _LP64
+ switch (code) {
+ case lir_shl: __ shlptr(lo); break;
+ case lir_shr: __ sarptr(lo); break;
+ case lir_ushr: __ shrptr(lo); break;
+ default: ShouldNotReachHere();
+ }
+#else
switch (code) {
case lir_shl: __ lshl(hi, lo); break;
@@ -2623,6 +2898,7 @@ void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, LIR_Opr count, LIR_Opr
case lir_ushr: __ lshr(hi, lo, false); break;
default: ShouldNotReachHere();
}
+#endif // LP64
} else {
ShouldNotReachHere();
}
@@ -2643,7 +2919,21 @@ void LIR_Assembler::shift_op(LIR_Code code, LIR_Opr left, jint count, LIR_Opr de
default: ShouldNotReachHere();
}
} else if (dest->is_double_cpu()) {
+#ifndef _LP64
Unimplemented();
+#else
+ // first move left into dest so that left is not destroyed by the shift
+ Register value = dest->as_register_lo();
+ count = count & 0x1F; // Java spec
+
+ move_regs(left->as_register_lo(), value);
+ switch (code) {
+ case lir_shl: __ shlptr(value, count); break;
+ case lir_shr: __ sarptr(value, count); break;
+ case lir_ushr: __ shrptr(value, count); break;
+ default: ShouldNotReachHere();
+ }
+#endif // _LP64
} else {
ShouldNotReachHere();
}
@@ -2654,7 +2944,7 @@ void LIR_Assembler::store_parameter(Register r, int offset_from_rsp_in_words) {
assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
- __ movl (Address(rsp, offset_from_rsp_in_bytes), r);
+ __ movptr (Address(rsp, offset_from_rsp_in_bytes), r);
}
@@ -2662,7 +2952,7 @@ void LIR_Assembler::store_parameter(jint c, int offset_from_rsp_in_words) {
assert(offset_from_rsp_in_words >= 0, "invalid offset from rsp");
int offset_from_rsp_in_bytes = offset_from_rsp_in_words * BytesPerWord;
assert(offset_from_rsp_in_bytes < frame_map()->reserved_argument_area_size(), "invalid offset");
- __ movl (Address(rsp, offset_from_rsp_in_bytes), c);
+ __ movptr (Address(rsp, offset_from_rsp_in_bytes), c);
}
@@ -2710,27 +3000,52 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
// these are just temporary placements until we need to reload
store_parameter(src_pos, 3);
store_parameter(src, 4);
- assert(src == rcx && src_pos == rdx, "mismatch in calling convention");
+ NOT_LP64(assert(src == rcx && src_pos == rdx, "mismatch in calling convention");)
- // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
- __ pushl(length);
- __ pushl(dst_pos);
- __ pushl(dst);
- __ pushl(src_pos);
- __ pushl(src);
address entry = CAST_FROM_FN_PTR(address, Runtime1::arraycopy);
+
+ // pass arguments: may push as this is not a safepoint; SP must be fix at each safepoint
+#ifdef _LP64
+ // The arguments are in java calling convention so we can trivially shift them to C
+ // convention
+ assert_different_registers(c_rarg0, j_rarg1, j_rarg2, j_rarg3, j_rarg4);
+ __ mov(c_rarg0, j_rarg0);
+ assert_different_registers(c_rarg1, j_rarg2, j_rarg3, j_rarg4);
+ __ mov(c_rarg1, j_rarg1);
+ assert_different_registers(c_rarg2, j_rarg3, j_rarg4);
+ __ mov(c_rarg2, j_rarg2);
+ assert_different_registers(c_rarg3, j_rarg4);
+ __ mov(c_rarg3, j_rarg3);
+#ifdef _WIN64
+ // Allocate abi space for args but be sure to keep stack aligned
+ __ subptr(rsp, 6*wordSize);
+ store_parameter(j_rarg4, 4);
+ __ call(RuntimeAddress(entry));
+ __ addptr(rsp, 6*wordSize);
+#else
+ __ mov(c_rarg4, j_rarg4);
+ __ call(RuntimeAddress(entry));
+#endif // _WIN64
+#else
+ __ push(length);
+ __ push(dst_pos);
+ __ push(dst);
+ __ push(src_pos);
+ __ push(src);
__ call_VM_leaf(entry, 5); // removes pushed parameter from the stack
+#endif // _LP64
+
__ cmpl(rax, 0);
__ jcc(Assembler::equal, *stub->continuation());
// Reload values from the stack so they are where the stub
// expects them.
- __ movl (dst, Address(rsp, 0*BytesPerWord));
- __ movl (dst_pos, Address(rsp, 1*BytesPerWord));
- __ movl (length, Address(rsp, 2*BytesPerWord));
- __ movl (src_pos, Address(rsp, 3*BytesPerWord));
- __ movl (src, Address(rsp, 4*BytesPerWord));
+ __ movptr (dst, Address(rsp, 0*BytesPerWord));
+ __ movptr (dst_pos, Address(rsp, 1*BytesPerWord));
+ __ movptr (length, Address(rsp, 2*BytesPerWord));
+ __ movptr (src_pos, Address(rsp, 3*BytesPerWord));
+ __ movptr (src, Address(rsp, 4*BytesPerWord));
__ jmp(*stub->entry());
__ bind(*stub->continuation());
@@ -2769,13 +3084,15 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
Address src_klass_addr = Address(src, oopDesc::klass_offset_in_bytes());
Address dst_klass_addr = Address(dst, oopDesc::klass_offset_in_bytes());
+ // length and pos's are all sign extended at this point on 64bit
+
// test for NULL
if (flags & LIR_OpArrayCopy::src_null_check) {
- __ testl(src, src);
+ __ testptr(src, src);
__ jcc(Assembler::zero, *stub->entry());
}
if (flags & LIR_OpArrayCopy::dst_null_check) {
- __ testl(dst, dst);
+ __ testptr(dst, dst);
__ jcc(Assembler::zero, *stub->entry());
}
@@ -2794,19 +3111,19 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
}
if (flags & LIR_OpArrayCopy::src_range_check) {
- __ leal(tmp, Address(src_pos, length, Address::times_1, 0));
+ __ lea(tmp, Address(src_pos, length, Address::times_1, 0));
__ cmpl(tmp, src_length_addr);
__ jcc(Assembler::above, *stub->entry());
}
if (flags & LIR_OpArrayCopy::dst_range_check) {
- __ leal(tmp, Address(dst_pos, length, Address::times_1, 0));
+ __ lea(tmp, Address(dst_pos, length, Address::times_1, 0));
__ cmpl(tmp, dst_length_addr);
__ jcc(Assembler::above, *stub->entry());
}
if (flags & LIR_OpArrayCopy::type_check) {
- __ movl(tmp, src_klass_addr);
- __ cmpl(tmp, dst_klass_addr);
+ __ movptr(tmp, src_klass_addr);
+ __ cmpptr(tmp, dst_klass_addr);
__ jcc(Assembler::notEqual, *stub->entry());
}
@@ -2822,14 +3139,14 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
Label known_ok, halt;
__ movoop(tmp, default_type->encoding());
if (basic_type != T_OBJECT) {
- __ cmpl(tmp, dst_klass_addr);
+ __ cmpptr(tmp, dst_klass_addr);
__ jcc(Assembler::notEqual, halt);
- __ cmpl(tmp, src_klass_addr);
+ __ cmpptr(tmp, src_klass_addr);
__ jcc(Assembler::equal, known_ok);
} else {
- __ cmpl(tmp, dst_klass_addr);
+ __ cmpptr(tmp, dst_klass_addr);
__ jcc(Assembler::equal, known_ok);
- __ cmpl(src, dst);
+ __ cmpptr(src, dst);
__ jcc(Assembler::equal, known_ok);
}
__ bind(halt);
@@ -2838,14 +3155,24 @@ void LIR_Assembler::emit_arraycopy(LIR_OpArrayCopy* op) {
}
#endif
- __ leal(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
- store_parameter(tmp, 0);
- __ leal(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
- store_parameter(tmp, 1);
if (shift_amount > 0 && basic_type != T_OBJECT) {
- __ shll(length, shift_amount);
+ __ shlptr(length, shift_amount);
}
+
+#ifdef _LP64
+ assert_different_registers(c_rarg0, dst, dst_pos, length);
+ __ lea(c_rarg0, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
+ assert_different_registers(c_rarg1, length);
+ __ lea(c_rarg1, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
+ __ mov(c_rarg2, length);
+
+#else
+ __ lea(tmp, Address(src, src_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
+ store_parameter(tmp, 0);
+ __ lea(tmp, Address(dst, dst_pos, scale, arrayOopDesc::base_offset_in_bytes(basic_type)));
+ store_parameter(tmp, 1);
store_parameter(length, 2);
+#endif // _LP64
if (basic_type == T_OBJECT) {
__ call_VM_leaf(CAST_FROM_FN_PTR(address, Runtime1::oop_arraycopy), 0);
} else {
@@ -2945,13 +3272,13 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
}
}
} else {
- __ movl(recv, Address(recv, oopDesc::klass_offset_in_bytes()));
+ __ movptr(recv, Address(recv, oopDesc::klass_offset_in_bytes()));
Label update_done;
uint i;
for (i = 0; i < VirtualCallData::row_limit(); i++) {
Label next_test;
// See if the receiver is receiver[n].
- __ cmpl(recv, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))));
+ __ cmpptr(recv, Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i))));
__ jcc(Assembler::notEqual, next_test);
Address data_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i)));
__ addl(data_addr, DataLayout::counter_increment);
@@ -2963,9 +3290,9 @@ void LIR_Assembler::emit_profile_call(LIR_OpProfileCall* op) {
for (i = 0; i < VirtualCallData::row_limit(); i++) {
Label next_test;
Address recv_addr(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_offset(i)));
- __ cmpl(recv_addr, NULL_WORD);
+ __ cmpptr(recv_addr, (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, next_test);
- __ movl(recv_addr, recv);
+ __ movptr(recv_addr, recv);
__ movl(Address(mdo, md->byte_offset_of_slot(data, VirtualCallData::receiver_count_offset(i))), DataLayout::counter_increment);
if (i < (VirtualCallData::row_limit() - 1)) {
__ jmp(update_done);
@@ -2985,7 +3312,7 @@ void LIR_Assembler::emit_delay(LIR_OpDelay*) {
void LIR_Assembler::monitor_address(int monitor_no, LIR_Opr dst) {
- __ leal(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
+ __ lea(dst->as_register(), frame_map()->address_for_monitor_lock(monitor_no));
}
@@ -3001,6 +3328,11 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
} else if (left->is_double_cpu()) {
Register lo = left->as_register_lo();
+#ifdef _LP64
+ Register dst = dest->as_register_lo();
+ __ movptr(dst, lo);
+ __ negptr(dst);
+#else
Register hi = left->as_register_hi();
__ lneg(hi, lo);
if (dest->as_register_lo() == hi) {
@@ -3011,6 +3343,7 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
move_regs(lo, dest->as_register_lo());
move_regs(hi, dest->as_register_hi());
}
+#endif // _LP64
} else if (dest->is_single_xmm()) {
if (left->as_xmm_float_reg() != dest->as_xmm_float_reg()) {
@@ -3039,8 +3372,9 @@ void LIR_Assembler::negate(LIR_Opr left, LIR_Opr dest) {
void LIR_Assembler::leal(LIR_Opr addr, LIR_Opr dest) {
assert(addr->is_address() && dest->is_register(), "check");
- Register reg = dest->as_register();
- __ leal(dest->as_register(), as_Address(addr->as_address_ptr()));
+ Register reg;
+ reg = dest->as_pointer_register();
+ __ lea(reg, as_Address(addr->as_address_ptr()));
}
@@ -3063,9 +3397,13 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type,
if (src->is_double_xmm()) {
if (dest->is_double_cpu()) {
- __ movd(dest->as_register_lo(), src->as_xmm_double_reg());
+#ifdef _LP64
+ __ movdq(dest->as_register_lo(), src->as_xmm_double_reg());
+#else
+ __ movdl(dest->as_register_lo(), src->as_xmm_double_reg());
__ psrlq(src->as_xmm_double_reg(), 32);
- __ movd(dest->as_register_hi(), src->as_xmm_double_reg());
+ __ movdl(dest->as_register_hi(), src->as_xmm_double_reg());
+#endif // _LP64
} else if (dest->is_double_stack()) {
__ movdbl(frame_map()->address_for_slot(dest->double_stack_ix()), src->as_xmm_double_reg());
} else if (dest->is_address()) {
@@ -3109,7 +3447,8 @@ void LIR_Assembler::volatile_move_op(LIR_Opr src, LIR_Opr dest, BasicType type,
void LIR_Assembler::membar() {
- __ membar();
+ // QQQ sparc TSO uses this,
+ __ membar( Assembler::Membar_mask_bits(Assembler::StoreLoad));
}
void LIR_Assembler::membar_acquire() {
@@ -3124,7 +3463,12 @@ void LIR_Assembler::membar_release() {
void LIR_Assembler::get_thread(LIR_Opr result_reg) {
assert(result_reg->is_register(), "check");
+#ifdef _LP64
+ // __ get_thread(result_reg->as_register_lo());
+ __ mov(result_reg->as_register(), r15_thread);
+#else
__ get_thread(result_reg->as_register());
+#endif // _LP64
}
diff --git a/src/cpu/x86/vm/c1_LIRAssembler_x86.hpp b/src/cpu/x86/vm/c1_LIRAssembler_x86.hpp
index 5621a95a3..41747a1f4 100644
--- a/src/cpu/x86/vm/c1_LIRAssembler_x86.hpp
+++ b/src/cpu/x86/vm/c1_LIRAssembler_x86.hpp
@@ -36,13 +36,20 @@
address float_constant(float f);
address double_constant(double d);
+ bool is_literal_address(LIR_Address* addr);
+
+ // When we need to use something other than rscratch1 use this
+ // method.
+ Address as_Address(LIR_Address* addr, Register tmp);
+
+
public:
void store_parameter(Register r, int offset_from_esp_in_words);
void store_parameter(jint c, int offset_from_esp_in_words);
void store_parameter(jobject c, int offset_from_esp_in_words);
- enum { call_stub_size = 15,
+ enum { call_stub_size = NOT_LP64(15) LP64_ONLY(28),
exception_handler_size = DEBUG_ONLY(1*K) NOT_DEBUG(175),
- deopt_handler_size = 10
+ deopt_handler_size = NOT_LP64(10) LP64_ONLY(17)
};
diff --git a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
index d2f9e2d35..b8c29fb09 100644
--- a/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
+++ b/src/cpu/x86/vm/c1_LIRGenerator_x86.cpp
@@ -77,7 +77,7 @@ LIR_Opr LIRGenerator::result_register_for(ValueType* type, bool callee) {
switch (type->tag()) {
case intTag: opr = FrameMap::rax_opr; break;
case objectTag: opr = FrameMap::rax_oop_opr; break;
- case longTag: opr = FrameMap::rax_rdx_long_opr; break;
+ case longTag: opr = FrameMap::long0_opr; break;
case floatTag: opr = UseSSE >= 1 ? FrameMap::xmm0_float_opr : FrameMap::fpu0_float_opr; break;
case doubleTag: opr = UseSSE >= 2 ? FrameMap::xmm0_double_opr : FrameMap::fpu0_double_opr; break;
@@ -117,12 +117,14 @@ bool LIRGenerator::can_store_as_constant(Value v, BasicType type) const {
bool LIRGenerator::can_inline_as_constant(Value v) const {
+ if (v->type()->tag() == longTag) return false;
return v->type()->tag() != objectTag ||
(v->type()->is_constant() && v->type()->as_ObjectType()->constant_value()->is_null_object());
}
bool LIRGenerator::can_inline_as_constant(LIR_Const* c) const {
+ if (c->type() == T_LONG) return false;
return c->type() != T_OBJECT || c->as_jobject() == NULL;
}
@@ -155,6 +157,13 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
addr = new LIR_Address(array_opr,
offset_in_bytes + index_opr->as_jint() * elem_size, type);
} else {
+#ifdef _LP64
+ if (index_opr->type() == T_INT) {
+ LIR_Opr tmp = new_register(T_LONG);
+ __ convert(Bytecodes::_i2l, index_opr, tmp);
+ index_opr = tmp;
+ }
+#endif // _LP64
addr = new LIR_Address(array_opr,
index_opr,
LIR_Address::scale(type),
@@ -164,7 +173,7 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
// This store will need a precise card mark, so go ahead and
// compute the full adddres instead of computing once for the
// store and again for the card mark.
- LIR_Opr tmp = new_register(T_INT);
+ LIR_Opr tmp = new_pointer_register();
__ leal(LIR_OprFact::address(addr), tmp);
return new LIR_Address(tmp, 0, type);
} else {
@@ -174,9 +183,8 @@ LIR_Address* LIRGenerator::emit_array_address(LIR_Opr array_opr, LIR_Opr index_o
void LIRGenerator::increment_counter(address counter, int step) {
- LIR_Opr temp = new_register(T_INT);
- LIR_Opr pointer = new_register(T_INT);
- __ move(LIR_OprFact::intConst((int)counter), pointer);
+ LIR_Opr pointer = new_pointer_register();
+ __ move(LIR_OprFact::intptrConst(counter), pointer);
LIR_Address* addr = new LIR_Address(pointer, 0, T_INT);
increment_counter(addr, step);
}
@@ -481,7 +489,7 @@ void LIRGenerator::do_ArithmeticOp_Long(ArithmeticOp* x) {
left.load_item();
right.load_item();
- LIR_Opr reg = FrameMap::rax_rdx_long_opr;
+ LIR_Opr reg = FrameMap::long0_opr;
arithmetic_op_long(x->op(), reg, left.result(), right.result(), NULL);
LIR_Opr result = rlock_result(x);
__ move(reg, result);
@@ -690,10 +698,10 @@ void LIRGenerator::do_AttemptUpdate(Intrinsic* x) {
LIRItem new_value (x->argument_at(2), this); // replace field with new_value if it matches cmp_value
// compare value must be in rdx,eax (hi,lo); may be destroyed by cmpxchg8 instruction
- cmp_value.load_item_force(FrameMap::rax_rdx_long_opr);
+ cmp_value.load_item_force(FrameMap::long0_opr);
// new value must be in rcx,ebx (hi,lo)
- new_value.load_item_force(FrameMap::rbx_rcx_long_opr);
+ new_value.load_item_force(FrameMap::long1_opr);
// object pointer register is overwritten with field address
obj.load_item();
@@ -720,7 +728,10 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
LIRItem val (x->argument_at(3), this); // replace field with val if matches cmp
assert(obj.type()->tag() == objectTag, "invalid type");
- assert(offset.type()->tag() == intTag, "invalid type");
+
+ // In 64bit the type can be long, sparc doesn't have this assert
+ // assert(offset.type()->tag() == intTag, "invalid type");
+
assert(cmp.type()->tag() == type->tag(), "invalid type");
assert(val.type()->tag() == type->tag(), "invalid type");
@@ -735,8 +746,8 @@ void LIRGenerator::do_CompareAndSwap(Intrinsic* x, ValueType* type) {
cmp.load_item_force(FrameMap::rax_opr);
val.load_item();
} else if (type == longType) {
- cmp.load_item_force(FrameMap::rax_rdx_long_opr);
- val.load_item_force(FrameMap::rbx_rcx_long_opr);
+ cmp.load_item_force(FrameMap::long0_opr);
+ val.load_item_force(FrameMap::long1_opr);
} else {
ShouldNotReachHere();
}
@@ -833,12 +844,33 @@ void LIRGenerator::do_ArrayCopy(Intrinsic* x) {
// operands for arraycopy must use fixed registers, otherwise
// LinearScan will fail allocation (because arraycopy always needs a
// call)
+
+#ifndef _LP64
src.load_item_force (FrameMap::rcx_oop_opr);
src_pos.load_item_force (FrameMap::rdx_opr);
dst.load_item_force (FrameMap::rax_oop_opr);
dst_pos.load_item_force (FrameMap::rbx_opr);
length.load_item_force (FrameMap::rdi_opr);
LIR_Opr tmp = (FrameMap::rsi_opr);
+#else
+
+ // The java calling convention will give us enough registers
+ // so that on the stub side the args will be perfect already.
+ // On the other slow/special case side we call C and the arg
+ // positions are not similar enough to pick one as the best.
+ // Also because the java calling convention is a "shifted" version
+ // of the C convention we can process the java args trivially into C
+ // args without worry of overwriting during the xfer
+
+ src.load_item_force (FrameMap::as_oop_opr(j_rarg0));
+ src_pos.load_item_force (FrameMap::as_opr(j_rarg1));
+ dst.load_item_force (FrameMap::as_oop_opr(j_rarg2));
+ dst_pos.load_item_force (FrameMap::as_opr(j_rarg3));
+ length.load_item_force (FrameMap::as_opr(j_rarg4));
+
+ LIR_Opr tmp = FrameMap::as_opr(j_rarg5);
+#endif // LP64
+
set_no_result(x);
int flags;
@@ -857,7 +889,7 @@ LIR_Opr fixed_register_for(BasicType type) {
case T_FLOAT: return FrameMap::fpu0_float_opr;
case T_DOUBLE: return FrameMap::fpu0_double_opr;
case T_INT: return FrameMap::rax_opr;
- case T_LONG: return FrameMap::rax_rdx_long_opr;
+ case T_LONG: return FrameMap::long0_opr;
default: ShouldNotReachHere(); return LIR_OprFact::illegalOpr;
}
}
@@ -1161,9 +1193,13 @@ void LIRGenerator::do_If(If* x) {
LIR_Opr LIRGenerator::getThreadPointer() {
+#ifdef _LP64
+ return FrameMap::as_pointer_opr(r15_thread);
+#else
LIR_Opr result = new_register(T_INT);
__ get_thread(result);
return result;
+#endif //
}
void LIRGenerator::trace_block_entry(BlockBegin* block) {
diff --git a/src/cpu/x86/vm/c1_LinearScan_x86.hpp b/src/cpu/x86/vm/c1_LinearScan_x86.hpp
index cdfa446da..73510cd55 100644
--- a/src/cpu/x86/vm/c1_LinearScan_x86.hpp
+++ b/src/cpu/x86/vm/c1_LinearScan_x86.hpp
@@ -23,18 +23,29 @@
*/
inline bool LinearScan::is_processed_reg_num(int reg_num) {
+#ifndef _LP64
// rsp and rbp (numbers 6 ancd 7) are ignored
assert(FrameMap::rsp_opr->cpu_regnr() == 6, "wrong assumption below");
assert(FrameMap::rbp_opr->cpu_regnr() == 7, "wrong assumption below");
assert(reg_num >= 0, "invalid reg_num");
return reg_num < 6 || reg_num > 7;
+#else
+ // rsp and rbp, r10, r15 (numbers 6 ancd 7) are ignored
+ assert(FrameMap::r10_opr->cpu_regnr() == 12, "wrong assumption below");
+ assert(FrameMap::r15_opr->cpu_regnr() == 13, "wrong assumption below");
+ assert(FrameMap::rsp_opr->cpu_regnrLo() == 14, "wrong assumption below");
+ assert(FrameMap::rbp_opr->cpu_regnrLo() == 15, "wrong assumption below");
+ assert(reg_num >= 0, "invalid reg_num");
+
+ return reg_num < 12 || reg_num > 15;
+#endif // _LP64
}
inline int LinearScan::num_physical_regs(BasicType type) {
// Intel requires two cpu registers for long,
// but requires only one fpu register for double
- if (type == T_LONG) {
+ if (LP64_ONLY(false &&) type == T_LONG) {
return 2;
}
return 1;
diff --git a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp
index 27024aa8a..8af0ceeb8 100644
--- a/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp
+++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.cpp
@@ -26,18 +26,17 @@
#include "incls/_c1_MacroAssembler_x86.cpp.incl"
int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr, Register scratch, Label& slow_case) {
- const int aligned_mask = 3;
+ const int aligned_mask = BytesPerWord -1;
const int hdr_offset = oopDesc::mark_offset_in_bytes();
assert(hdr == rax, "hdr must be rax, for the cmpxchg instruction");
assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
- assert(BytesPerWord == 4, "adjust aligned_mask and code");
Label done;
int null_check_offset = -1;
verify_oop(obj);
// save object being locked into the BasicObjectLock
- movl(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj);
+ movptr(Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()), obj);
if (UseBiasedLocking) {
assert(scratch != noreg, "should have scratch register at this point");
@@ -47,16 +46,16 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
}
// Load object header
- movl(hdr, Address(obj, hdr_offset));
+ movptr(hdr, Address(obj, hdr_offset));
// and mark it as unlocked
- orl(hdr, markOopDesc::unlocked_value);
+ orptr(hdr, markOopDesc::unlocked_value);
// save unlocked object header into the displaced header location on the stack
- movl(Address(disp_hdr, 0), hdr);
+ movptr(Address(disp_hdr, 0), hdr);
// test if object header is still the same (i.e. unlocked), and if so, store the
// displaced header address in the object header - if it is not the same, get the
// object header instead
if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg!
- cmpxchg(disp_hdr, Address(obj, hdr_offset));
+ cmpxchgptr(disp_hdr, Address(obj, hdr_offset));
// if the object header was the same, we're done
if (PrintBiasedLockingStatistics) {
cond_inc32(Assembler::equal,
@@ -76,11 +75,11 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
//
// assuming both the stack pointer and page_size have their least
// significant 2 bits cleared and page_size is a power of 2
- subl(hdr, rsp);
- andl(hdr, aligned_mask - os::vm_page_size());
+ subptr(hdr, rsp);
+ andptr(hdr, aligned_mask - os::vm_page_size());
// for recursive locking, the result is zero => save it in the displaced header
// location (NULL in the displaced hdr location indicates recursive locking)
- movl(Address(disp_hdr, 0), hdr);
+ movptr(Address(disp_hdr, 0), hdr);
// otherwise we don't care about the result and handle locking via runtime call
jcc(Assembler::notZero, slow_case);
// done
@@ -90,35 +89,34 @@ int C1_MacroAssembler::lock_object(Register hdr, Register obj, Register disp_hdr
void C1_MacroAssembler::unlock_object(Register hdr, Register obj, Register disp_hdr, Label& slow_case) {
- const int aligned_mask = 3;
+ const int aligned_mask = BytesPerWord -1;
const int hdr_offset = oopDesc::mark_offset_in_bytes();
assert(disp_hdr == rax, "disp_hdr must be rax, for the cmpxchg instruction");
assert(hdr != obj && hdr != disp_hdr && obj != disp_hdr, "registers must be different");
- assert(BytesPerWord == 4, "adjust aligned_mask and code");
Label done;
if (UseBiasedLocking) {
// load object
- movl(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
+ movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
biased_locking_exit(obj, hdr, done);
}
// load displaced header
- movl(hdr, Address(disp_hdr, 0));
+ movptr(hdr, Address(disp_hdr, 0));
// if the loaded hdr is NULL we had recursive locking
- testl(hdr, hdr);
+ testptr(hdr, hdr);
// if we had recursive locking, we are done
jcc(Assembler::zero, done);
if (!UseBiasedLocking) {
// load object
- movl(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
+ movptr(obj, Address(disp_hdr, BasicObjectLock::obj_offset_in_bytes()));
}
verify_oop(obj);
// test if object header is pointing to the displaced header, and if so, restore
// the displaced header in the object - if the object header is not pointing to
// the displaced header, get the object header instead
if (os::is_MP()) MacroAssembler::lock(); // must be immediately before cmpxchg!
- cmpxchg(hdr, Address(obj, hdr_offset));
+ cmpxchgptr(hdr, Address(obj, hdr_offset));
// if the object header was not pointing to the displaced header,
// we do unlocking via runtime call
jcc(Assembler::notEqual, slow_case);
@@ -141,13 +139,14 @@ void C1_MacroAssembler::initialize_header(Register obj, Register klass, Register
assert_different_registers(obj, klass, len);
if (UseBiasedLocking && !len->is_valid()) {
assert_different_registers(obj, klass, len, t1, t2);
- movl(t1, Address(klass, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
- movl(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
+ movptr(t1, Address(klass, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
+ movptr(Address(obj, oopDesc::mark_offset_in_bytes()), t1);
} else {
- movl(Address(obj, oopDesc::mark_offset_in_bytes ()), (int)markOopDesc::prototype());
+ // This assumes that all prototype bits fit in an int32_t
+ movptr(Address(obj, oopDesc::mark_offset_in_bytes ()), (int32_t)(intptr_t)markOopDesc::prototype());
}
- movl(Address(obj, oopDesc::klass_offset_in_bytes()), klass);
+ movptr(Address(obj, oopDesc::klass_offset_in_bytes()), klass);
if (len->is_valid()) {
movl(Address(obj, arrayOopDesc::length_offset_in_bytes()), len);
}
@@ -160,25 +159,27 @@ void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int
assert(obj != len_in_bytes && obj != t1 && t1 != len_in_bytes, "registers must be different");
assert((hdr_size_in_bytes & (BytesPerWord - 1)) == 0, "header size is not a multiple of BytesPerWord");
Register index = len_in_bytes;
- subl(index, hdr_size_in_bytes);
+ // index is positive and ptr sized
+ subptr(index, hdr_size_in_bytes);
jcc(Assembler::zero, done);
// initialize topmost word, divide index by 2, check if odd and test if zero
// note: for the remaining code to work, index must be a multiple of BytesPerWord
#ifdef ASSERT
{ Label L;
- testl(index, BytesPerWord - 1);
+ testptr(index, BytesPerWord - 1);
jcc(Assembler::zero, L);
stop("index is not a multiple of BytesPerWord");
bind(L);
}
#endif
- xorl(t1, t1); // use _zero reg to clear memory (shorter code)
+ xorptr(t1, t1); // use _zero reg to clear memory (shorter code)
if (UseIncDec) {
- shrl(index, 3); // divide by 8 and set carry flag if bit 2 was set
+ shrptr(index, 3); // divide by 8/16 and set carry flag if bit 2 was set
} else {
- shrl(index, 2); // use 2 instructions to avoid partial flag stall
- shrl(index, 1);
+ shrptr(index, 2); // use 2 instructions to avoid partial flag stall
+ shrptr(index, 1);
}
+#ifndef _LP64
// index could have been not a multiple of 8 (i.e., bit 2 was set)
{ Label even;
// note: if index was a multiple of 8, than it cannot
@@ -186,16 +187,17 @@ void C1_MacroAssembler::initialize_body(Register obj, Register len_in_bytes, int
// => if it is even, we don't need to check for 0 again
jcc(Assembler::carryClear, even);
// clear topmost word (no jump needed if conditional assignment would work here)
- movl(Address(obj, index, Address::times_8, hdr_size_in_bytes - 0*BytesPerWord), t1);
+ movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 0*BytesPerWord), t1);
// index could be 0 now, need to check again
jcc(Assembler::zero, done);
bind(even);
}
+#endif // !_LP64
// initialize remaining object fields: rdx is a multiple of 2 now
{ Label loop;
bind(loop);
- movl(Address(obj, index, Address::times_8, hdr_size_in_bytes - 1*BytesPerWord), t1);
- movl(Address(obj, index, Address::times_8, hdr_size_in_bytes - 2*BytesPerWord), t1);
+ movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 1*BytesPerWord), t1);
+ NOT_LP64(movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - 2*BytesPerWord), t1);)
decrement(index);
jcc(Assembler::notZero, loop);
}
@@ -227,30 +229,30 @@ void C1_MacroAssembler::initialize_object(Register obj, Register klass, Register
const Register index = t2;
const int threshold = 6 * BytesPerWord; // approximate break even point for code size (see comments below)
if (var_size_in_bytes != noreg) {
- movl(index, var_size_in_bytes);
+ mov(index, var_size_in_bytes);
initialize_body(obj, index, hdr_size_in_bytes, t1_zero);
} else if (con_size_in_bytes <= threshold) {
// use explicit null stores
// code size = 2 + 3*n bytes (n = number of fields to clear)
- xorl(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
+ xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
for (int i = hdr_size_in_bytes; i < con_size_in_bytes; i += BytesPerWord)
- movl(Address(obj, i), t1_zero);
+ movptr(Address(obj, i), t1_zero);
} else if (con_size_in_bytes > hdr_size_in_bytes) {
// use loop to null out the fields
// code size = 16 bytes for even n (n = number of fields to clear)
// initialize last object field first if odd number of fields
- xorl(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
- movl(index, (con_size_in_bytes - hdr_size_in_bytes) >> 3);
+ xorptr(t1_zero, t1_zero); // use t1_zero reg to clear memory (shorter code)
+ movptr(index, (con_size_in_bytes - hdr_size_in_bytes) >> 3);
// initialize last object field if constant size is odd
if (((con_size_in_bytes - hdr_size_in_bytes) & 4) != 0)
- movl(Address(obj, con_size_in_bytes - (1*BytesPerWord)), t1_zero);
+ movptr(Address(obj, con_size_in_bytes - (1*BytesPerWord)), t1_zero);
// initialize remaining object fields: rdx is a multiple of 2
{ Label loop;
bind(loop);
- movl(Address(obj, index, Address::times_8,
- hdr_size_in_bytes - (1*BytesPerWord)), t1_zero);
- movl(Address(obj, index, Address::times_8,
- hdr_size_in_bytes - (2*BytesPerWord)), t1_zero);
+ movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (1*BytesPerWord)),
+ t1_zero);
+ NOT_LP64(movptr(Address(obj, index, Address::times_8, hdr_size_in_bytes - (2*BytesPerWord)),
+ t1_zero);)
decrement(index);
jcc(Assembler::notZero, loop);
}
@@ -269,17 +271,17 @@ void C1_MacroAssembler::allocate_array(Register obj, Register len, Register t1,
assert_different_registers(obj, len, t1, t2, klass);
// determine alignment mask
- assert(BytesPerWord == 4, "must be a multiple of 2 for masking code to work");
+ assert(!(BytesPerWord & 1), "must be a multiple of 2 for masking code to work");
// check for negative or excessive length
- cmpl(len, max_array_allocation_length);
+ cmpptr(len, (int32_t)max_array_allocation_length);
jcc(Assembler::above, slow_case);
const Register arr_size = t2; // okay to be the same
// align object end
- movl(arr_size, header_size * BytesPerWord + MinObjAlignmentInBytesMask);
- leal(arr_size, Address(arr_size, len, f));
- andl(arr_size, ~MinObjAlignmentInBytesMask);
+ movptr(arr_size, (int32_t)header_size * BytesPerWord + MinObjAlignmentInBytesMask);
+ lea(arr_size, Address(arr_size, len, f));
+ andptr(arr_size, ~MinObjAlignmentInBytesMask);
try_allocate(obj, arr_size, 0, t1, t2, slow_case);
@@ -305,12 +307,13 @@ void C1_MacroAssembler::inline_cache_check(Register receiver, Register iCache) {
// check against inline cache
assert(!MacroAssembler::needs_explicit_null_check(oopDesc::klass_offset_in_bytes()), "must add explicit null check");
int start_offset = offset();
- cmpl(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
+ cmpptr(iCache, Address(receiver, oopDesc::klass_offset_in_bytes()));
// if icache check fails, then jump to runtime routine
// Note: RECEIVER must still contain the receiver!
jump_cc(Assembler::notEqual,
RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
- assert(offset() - start_offset == 9, "check alignment in emit_method_entry");
+ const int ic_cmp_size = LP64_ONLY(10) NOT_LP64(9);
+ assert(offset() - start_offset == ic_cmp_size, "check alignment in emit_method_entry");
}
@@ -364,7 +367,7 @@ void C1_MacroAssembler::verify_stack_oop(int stack_offset) {
void C1_MacroAssembler::verify_not_null_oop(Register r) {
if (!VerifyOops) return;
Label not_null;
- testl(r, r);
+ testptr(r, r);
jcc(Assembler::notZero, not_null);
stop("non-null oop required");
bind(not_null);
@@ -373,12 +376,12 @@ void C1_MacroAssembler::verify_not_null_oop(Register r) {
void C1_MacroAssembler::invalidate_registers(bool inv_rax, bool inv_rbx, bool inv_rcx, bool inv_rdx, bool inv_rsi, bool inv_rdi) {
#ifdef ASSERT
- if (inv_rax) movl(rax, 0xDEAD);
- if (inv_rbx) movl(rbx, 0xDEAD);
- if (inv_rcx) movl(rcx, 0xDEAD);
- if (inv_rdx) movl(rdx, 0xDEAD);
- if (inv_rsi) movl(rsi, 0xDEAD);
- if (inv_rdi) movl(rdi, 0xDEAD);
+ if (inv_rax) movptr(rax, 0xDEAD);
+ if (inv_rbx) movptr(rbx, 0xDEAD);
+ if (inv_rcx) movptr(rcx, 0xDEAD);
+ if (inv_rdx) movptr(rdx, 0xDEAD);
+ if (inv_rsi) movptr(rsi, 0xDEAD);
+ if (inv_rdi) movptr(rdi, 0xDEAD);
#endif
}
diff --git a/src/cpu/x86/vm/c1_MacroAssembler_x86.hpp b/src/cpu/x86/vm/c1_MacroAssembler_x86.hpp
index 62f6d4c14..d0b4230ad 100644
--- a/src/cpu/x86/vm/c1_MacroAssembler_x86.hpp
+++ b/src/cpu/x86/vm/c1_MacroAssembler_x86.hpp
@@ -94,16 +94,17 @@
// Note: NEVER push values directly, but only through following push_xxx functions;
// This helps us to track the rsp changes compared to the entry rsp (->_rsp_offset)
- void push_jint (jint i) { _rsp_offset++; pushl(i); }
+ void push_jint (jint i) { _rsp_offset++; push(i); }
void push_oop (jobject o) { _rsp_offset++; pushoop(o); }
- void push_addr (Address a) { _rsp_offset++; pushl(a); }
- void push_reg (Register r) { _rsp_offset++; pushl(r); }
- void pop (Register r) { _rsp_offset--; popl (r); assert(_rsp_offset >= 0, "stack offset underflow"); }
+ // Seems to always be in wordSize
+ void push_addr (Address a) { _rsp_offset++; pushptr(a); }
+ void push_reg (Register r) { _rsp_offset++; push(r); }
+ void pop_reg (Register r) { _rsp_offset--; pop(r); assert(_rsp_offset >= 0, "stack offset underflow"); }
void dec_stack (int nof_words) {
_rsp_offset -= nof_words;
assert(_rsp_offset >= 0, "stack offset underflow");
- addl(rsp, wordSize * nof_words);
+ addptr(rsp, wordSize * nof_words);
}
void dec_stack_after_call (int nof_words) {
diff --git a/src/cpu/x86/vm/c1_Runtime1_x86.cpp b/src/cpu/x86/vm/c1_Runtime1_x86.cpp
index be73c7936..01a232149 100644
--- a/src/cpu/x86/vm/c1_Runtime1_x86.cpp
+++ b/src/cpu/x86/vm/c1_Runtime1_x86.cpp
@@ -30,52 +30,58 @@
int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, int args_size) {
// setup registers
- const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions)
+ const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread); // is callee-saved register (Visual C++ calling conventions)
assert(!(oop_result1->is_valid() || oop_result2->is_valid()) || oop_result1 != oop_result2, "registers must be different");
assert(oop_result1 != thread && oop_result2 != thread, "registers must be different");
assert(args_size >= 0, "illegal args_size");
+#ifdef _LP64
+ mov(c_rarg0, thread);
+ set_num_rt_args(0); // Nothing on stack
+#else
set_num_rt_args(1 + args_size);
// push java thread (becomes first argument of C function)
get_thread(thread);
- pushl(thread);
+ push(thread);
+#endif // _LP64
set_last_Java_frame(thread, noreg, rbp, NULL);
+
// do the call
call(RuntimeAddress(entry));
int call_offset = offset();
// verify callee-saved register
#ifdef ASSERT
guarantee(thread != rax, "change this code");
- pushl(rax);
+ push(rax);
{ Label L;
get_thread(rax);
- cmpl(thread, rax);
+ cmpptr(thread, rax);
jcc(Assembler::equal, L);
int3();
stop("StubAssembler::call_RT: rdi not callee saved?");
bind(L);
}
- popl(rax);
+ pop(rax);
#endif
reset_last_Java_frame(thread, true, false);
// discard thread and arguments
- addl(rsp, (1 + args_size)*BytesPerWord);
+ NOT_LP64(addptr(rsp, num_rt_args()*BytesPerWord));
// check for pending exceptions
{ Label L;
- cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
+ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
jcc(Assembler::equal, L);
// exception pending => remove activation and forward to exception handler
- movl(rax, Address(thread, Thread::pending_exception_offset()));
+ movptr(rax, Address(thread, Thread::pending_exception_offset()));
// make sure that the vm_results are cleared
if (oop_result1->is_valid()) {
- movl(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
+ movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
}
if (oop_result2->is_valid()) {
- movl(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
+ movptr(Address(thread, JavaThread::vm_result_2_offset()), (int32_t)NULL_WORD);
}
if (frame_size() == no_frame_size) {
leave();
@@ -89,13 +95,13 @@ int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address e
}
// get oop results if there are any and reset the values in the thread
if (oop_result1->is_valid()) {
- movl(oop_result1, Address(thread, JavaThread::vm_result_offset()));
- movl(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
+ movptr(oop_result1, Address(thread, JavaThread::vm_result_offset()));
+ movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
verify_oop(oop_result1);
}
if (oop_result2->is_valid()) {
- movl(oop_result2, Address(thread, JavaThread::vm_result_2_offset()));
- movl(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
+ movptr(oop_result2, Address(thread, JavaThread::vm_result_2_offset()));
+ movptr(Address(thread, JavaThread::vm_result_2_offset()), (int32_t)NULL_WORD);
verify_oop(oop_result2);
}
return call_offset;
@@ -103,22 +109,58 @@ int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address e
int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1) {
- pushl(arg1);
+#ifdef _LP64
+ mov(c_rarg1, arg1);
+#else
+ push(arg1);
+#endif // _LP64
return call_RT(oop_result1, oop_result2, entry, 1);
}
int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2) {
- pushl(arg2);
- pushl(arg1);
+#ifdef _LP64
+ if (c_rarg1 == arg2) {
+ if (c_rarg2 == arg1) {
+ xchgq(arg1, arg2);
+ } else {
+ mov(c_rarg2, arg2);
+ mov(c_rarg1, arg1);
+ }
+ } else {
+ mov(c_rarg1, arg1);
+ mov(c_rarg2, arg2);
+ }
+#else
+ push(arg2);
+ push(arg1);
+#endif // _LP64
return call_RT(oop_result1, oop_result2, entry, 2);
}
int StubAssembler::call_RT(Register oop_result1, Register oop_result2, address entry, Register arg1, Register arg2, Register arg3) {
- pushl(arg3);
- pushl(arg2);
- pushl(arg1);
+#ifdef _LP64
+ // if there is any conflict use the stack
+ if (arg1 == c_rarg2 || arg1 == c_rarg3 ||
+ arg2 == c_rarg1 || arg1 == c_rarg3 ||
+ arg3 == c_rarg1 || arg1 == c_rarg2) {
+ push(arg3);
+ push(arg2);
+ push(arg1);
+ pop(c_rarg1);
+ pop(c_rarg2);
+ pop(c_rarg3);
+ } else {
+ mov(c_rarg1, arg1);
+ mov(c_rarg2, arg2);
+ mov(c_rarg3, arg3);
+ }
+#else
+ push(arg3);
+ push(arg2);
+ push(arg1);
+#endif // _LP64
return call_RT(oop_result1, oop_result2, entry, 3);
}
@@ -154,7 +196,7 @@ void StubFrame::load_argument(int offset_in_words, Register reg) {
// + 3: argument with offset 1
// + 4: ...
- __ movl(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord));
+ __ movptr(reg, Address(rbp, (offset_in_words + 2) * BytesPerWord));
}
@@ -170,8 +212,8 @@ StubFrame::~StubFrame() {
#define __ sasm->
-const int float_regs_as_doubles_size_in_words = 16;
-const int xmm_regs_as_doubles_size_in_words = 16;
+const int float_regs_as_doubles_size_in_slots = pd_nof_fpu_regs_frame_map * 2;
+const int xmm_regs_as_doubles_size_in_slots = FrameMap::nof_xmm_regs * 2;
// Stack layout for saving/restoring all the registers needed during a runtime
// call (this includes deoptimization)
@@ -180,29 +222,61 @@ const int xmm_regs_as_doubles_size_in_words = 16;
// but the code in save_live_registers will take the argument count into
// account.
//
+#ifdef _LP64
+ #define SLOT2(x) x,
+ #define SLOT_PER_WORD 2
+#else
+ #define SLOT2(x)
+ #define SLOT_PER_WORD 1
+#endif // _LP64
+
enum reg_save_layout {
- dummy1,
- dummy2,
+ // 64bit needs to keep stack 16 byte aligned. So we add some alignment dummies to make that
+ // happen and will assert if the stack size we create is misaligned
+#ifdef _LP64
+ align_dummy_0, align_dummy_1,
+#endif // _LP64
+ dummy1, SLOT2(dummy1H) // 0, 4
+ dummy2, SLOT2(dummy2H) // 8, 12
// Two temps to be used as needed by users of save/restore callee registers
- temp_2_off,
- temp_1_off,
- xmm_regs_as_doubles_off,
- float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_words,
- fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_words,
- fpu_state_end_off = fpu_state_off + FPUStateSizeInWords,
- marker = fpu_state_end_off,
- extra_space_offset,
+ temp_2_off, SLOT2(temp_2H_off) // 16, 20
+ temp_1_off, SLOT2(temp_1H_off) // 24, 28
+ xmm_regs_as_doubles_off, // 32
+ float_regs_as_doubles_off = xmm_regs_as_doubles_off + xmm_regs_as_doubles_size_in_slots, // 160
+ fpu_state_off = float_regs_as_doubles_off + float_regs_as_doubles_size_in_slots, // 224
+ // fpu_state_end_off is exclusive
+ fpu_state_end_off = fpu_state_off + (FPUStateSizeInWords / SLOT_PER_WORD), // 352
+ marker = fpu_state_end_off, SLOT2(markerH) // 352, 356
+ extra_space_offset, // 360
+#ifdef _LP64
+ r15_off = extra_space_offset, r15H_off, // 360, 364
+ r14_off, r14H_off, // 368, 372
+ r13_off, r13H_off, // 376, 380
+ r12_off, r12H_off, // 384, 388
+ r11_off, r11H_off, // 392, 396
+ r10_off, r10H_off, // 400, 404
+ r9_off, r9H_off, // 408, 412
+ r8_off, r8H_off, // 416, 420
+ rdi_off, rdiH_off, // 424, 428
+#else
rdi_off = extra_space_offset,
- rsi_off,
- rbp_off,
- rsp_off,
- rbx_off,
- rdx_off,
- rcx_off,
- rax_off,
- saved_rbp_off,
- return_off,
- reg_save_frame_size, // As noted: neglects any parameters to runtime
+#endif // _LP64
+ rsi_off, SLOT2(rsiH_off) // 432, 436
+ rbp_off, SLOT2(rbpH_off) // 440, 444
+ rsp_off, SLOT2(rspH_off) // 448, 452
+ rbx_off, SLOT2(rbxH_off) // 456, 460
+ rdx_off, SLOT2(rdxH_off) // 464, 468
+ rcx_off, SLOT2(rcxH_off) // 472, 476
+ rax_off, SLOT2(raxH_off) // 480, 484
+ saved_rbp_off, SLOT2(saved_rbpH_off) // 488, 492
+ return_off, SLOT2(returnH_off) // 496, 500
+ reg_save_frame_size, // As noted: neglects any parameters to runtime // 504
+
+#ifdef _WIN64
+ c_rarg0_off = rcx_off,
+#else
+ c_rarg0_off = rdi_off,
+#endif // WIN64
// equates
@@ -229,18 +303,49 @@ enum reg_save_layout {
static OopMap* generate_oop_map(StubAssembler* sasm, int num_rt_args,
bool save_fpu_registers = true) {
- int frame_size = reg_save_frame_size + num_rt_args; // args + thread
- sasm->set_frame_size(frame_size);
+
+ // In 64bit all the args are in regs so there are no additional stack slots
+ LP64_ONLY(num_rt_args = 0);
+ LP64_ONLY(assert((reg_save_frame_size * VMRegImpl::stack_slot_size) % 16 == 0, "must be 16 byte aligned");)
+ int frame_size_in_slots = reg_save_frame_size + num_rt_args; // args + thread
+ sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word );
// record saved value locations in an OopMap
// locations are offsets from sp after runtime call; num_rt_args is number of arguments in call, including thread
- OopMap* map = new OopMap(frame_size, 0);
+ OopMap* map = new OopMap(frame_size_in_slots, 0);
map->set_callee_saved(VMRegImpl::stack2reg(rax_off + num_rt_args), rax->as_VMReg());
map->set_callee_saved(VMRegImpl::stack2reg(rcx_off + num_rt_args), rcx->as_VMReg());
map->set_callee_saved(VMRegImpl::stack2reg(rdx_off + num_rt_args), rdx->as_VMReg());
map->set_callee_saved(VMRegImpl::stack2reg(rbx_off + num_rt_args), rbx->as_VMReg());
map->set_callee_saved(VMRegImpl::stack2reg(rsi_off + num_rt_args), rsi->as_VMReg());
map->set_callee_saved(VMRegImpl::stack2reg(rdi_off + num_rt_args), rdi->as_VMReg());
+#ifdef _LP64
+ map->set_callee_saved(VMRegImpl::stack2reg(r8_off + num_rt_args), r8->as_VMReg());
+ map->set_callee_saved(VMRegImpl::stack2reg(r9_off + num_rt_args), r9->as_VMReg());
+ map->set_callee_saved(VMRegImpl::stack2reg(r10_off + num_rt_args), r10->as_VMReg());
+ map->set_callee_saved(VMRegImpl::stack2reg(r11_off + num_rt_args), r11->as_VMReg());
+ map->set_callee_saved(VMRegImpl::stack2reg(r12_off + num_rt_args), r12->as_VMReg());
+ map->set_callee_saved(VMRegImpl::stack2reg(r13_off + num_rt_args), r13->as_VMReg());
+ map->set_callee_saved(VMRegImpl::stack2reg(r14_off + num_rt_args), r14->as_VMReg());
+ map->set_callee_saved(VMRegImpl::stack2reg(r15_off + num_rt_args), r15->as_VMReg());
+
+ // This is stupid but needed.
+ map->set_callee_saved(VMRegImpl::stack2reg(raxH_off + num_rt_args), rax->as_VMReg()->next());
+ map->set_callee_saved(VMRegImpl::stack2reg(rcxH_off + num_rt_args), rcx->as_VMReg()->next());
+ map->set_callee_saved(VMRegImpl::stack2reg(rdxH_off + num_rt_args), rdx->as_VMReg()->next());
+ map->set_callee_saved(VMRegImpl::stack2reg(rbxH_off + num_rt_args), rbx->as_VMReg()->next());
+ map->set_callee_saved(VMRegImpl::stack2reg(rsiH_off + num_rt_args), rsi->as_VMReg()->next());
+ map->set_callee_saved(VMRegImpl::stack2reg(rdiH_off + num_rt_args), rdi->as_VMReg()->next());
+
+ map->set_callee_saved(VMRegImpl::stack2reg(r8H_off + num_rt_args), r8->as_VMReg()->next());
+ map->set_callee_saved(VMRegImpl::stack2reg(r9H_off + num_rt_args), r9->as_VMReg()->next());
+ map->set_callee_saved(VMRegImpl::stack2reg(r10H_off + num_rt_args), r10->as_VMReg()->next());
+ map->set_callee_saved(VMRegImpl::stack2reg(r11H_off + num_rt_args), r11->as_VMReg()->next());
+ map->set_callee_saved(VMRegImpl::stack2reg(r12H_off + num_rt_args), r12->as_VMReg()->next());
+ map->set_callee_saved(VMRegImpl::stack2reg(r13H_off + num_rt_args), r13->as_VMReg()->next());
+ map->set_callee_saved(VMRegImpl::stack2reg(r14H_off + num_rt_args), r14->as_VMReg()->next());
+ map->set_callee_saved(VMRegImpl::stack2reg(r15H_off + num_rt_args), r15->as_VMReg()->next());
+#endif // _LP64
if (save_fpu_registers) {
if (UseSSE < 2) {
@@ -288,30 +393,31 @@ static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
bool save_fpu_registers = true) {
__ block_comment("save_live_registers");
- int frame_size = reg_save_frame_size + num_rt_args; // args + thread
+ // 64bit passes the args in regs to the c++ runtime
+ int frame_size_in_slots = reg_save_frame_size NOT_LP64(+ num_rt_args); // args + thread
// frame_size = round_to(frame_size, 4);
- sasm->set_frame_size(frame_size);
+ sasm->set_frame_size(frame_size_in_slots / VMRegImpl::slots_per_word );
- __ pushad(); // integer registers
+ __ pusha(); // integer registers
// assert(float_regs_as_doubles_off % 2 == 0, "misaligned offset");
// assert(xmm_regs_as_doubles_off % 2 == 0, "misaligned offset");
- __ subl(rsp, extra_space_offset * wordSize);
+ __ subptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
#ifdef ASSERT
- __ movl(Address(rsp, marker * wordSize), 0xfeedbeef);
+ __ movptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
#endif
if (save_fpu_registers) {
if (UseSSE < 2) {
// save FPU stack
- __ fnsave(Address(rsp, fpu_state_off * wordSize));
+ __ fnsave(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
__ fwait();
#ifdef ASSERT
Label ok;
- __ cmpw(Address(rsp, fpu_state_off * wordSize), StubRoutines::fpu_cntrl_wrd_std());
+ __ cmpw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
__ jccb(Assembler::equal, ok);
__ stop("corrupted control word detected");
__ bind(ok);
@@ -321,18 +427,18 @@ static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
// since fstp_d can cause FPU stack underflow exceptions. Write it
// into the on stack copy and then reload that to make sure that the
// current and future values are correct.
- __ movw(Address(rsp, fpu_state_off * wordSize), StubRoutines::fpu_cntrl_wrd_std());
- __ frstor(Address(rsp, fpu_state_off * wordSize));
+ __ movw(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size), StubRoutines::fpu_cntrl_wrd_std());
+ __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
// Save the FPU registers in de-opt-able form
- __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 0));
- __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 8));
- __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 16));
- __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 24));
- __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 32));
- __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 40));
- __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 48));
- __ fstp_d(Address(rsp, float_regs_as_doubles_off * BytesPerWord + 56));
+ __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
+ __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
+ __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
+ __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
+ __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
+ __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
+ __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
+ __ fstp_d(Address(rsp, float_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
}
if (UseSSE >= 2) {
@@ -341,24 +447,34 @@ static OopMap* save_live_registers(StubAssembler* sasm, int num_rt_args,
// so always save them as doubles.
// note that float values are _not_ converted automatically, so for float values
// the second word contains only garbage data.
- __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 0), xmm0);
- __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 8), xmm1);
- __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 16), xmm2);
- __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 24), xmm3);
- __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 32), xmm4);
- __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 40), xmm5);
- __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 48), xmm6);
- __ movdbl(Address(rsp, xmm_regs_as_doubles_off * wordSize + 56), xmm7);
+ __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0);
+ __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1);
+ __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
+ __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
+ __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
+ __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
+ __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
+ __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
+#ifdef _LP64
+ __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64), xmm8);
+ __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72), xmm9);
+ __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80), xmm10);
+ __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88), xmm11);
+ __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96), xmm12);
+ __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104), xmm13);
+ __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112), xmm14);
+ __ movdbl(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120), xmm15);
+#endif // _LP64
} else if (UseSSE == 1) {
// save XMM registers as float because double not supported without SSE2
- __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 0), xmm0);
- __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 8), xmm1);
- __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 16), xmm2);
- __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 24), xmm3);
- __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 32), xmm4);
- __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 40), xmm5);
- __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 48), xmm6);
- __ movflt(Address(rsp, xmm_regs_as_doubles_off * wordSize + 56), xmm7);
+ __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0), xmm0);
+ __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8), xmm1);
+ __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16), xmm2);
+ __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24), xmm3);
+ __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32), xmm4);
+ __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40), xmm5);
+ __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48), xmm6);
+ __ movflt(Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56), xmm7);
}
}
@@ -373,28 +489,38 @@ static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true)
if (restore_fpu_registers) {
if (UseSSE >= 2) {
// restore XMM registers
- __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * wordSize + 0));
- __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * wordSize + 8));
- __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * wordSize + 16));
- __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * wordSize + 24));
- __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * wordSize + 32));
- __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * wordSize + 40));
- __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * wordSize + 48));
- __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * wordSize + 56));
+ __ movdbl(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
+ __ movdbl(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
+ __ movdbl(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
+ __ movdbl(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
+ __ movdbl(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
+ __ movdbl(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
+ __ movdbl(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
+ __ movdbl(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
+#ifdef _LP64
+ __ movdbl(xmm8, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 64));
+ __ movdbl(xmm9, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 72));
+ __ movdbl(xmm10, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 80));
+ __ movdbl(xmm11, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 88));
+ __ movdbl(xmm12, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 96));
+ __ movdbl(xmm13, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 104));
+ __ movdbl(xmm14, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 112));
+ __ movdbl(xmm15, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 120));
+#endif // _LP64
} else if (UseSSE == 1) {
// restore XMM registers
- __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * wordSize + 0));
- __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * wordSize + 8));
- __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * wordSize + 16));
- __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * wordSize + 24));
- __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * wordSize + 32));
- __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * wordSize + 40));
- __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * wordSize + 48));
- __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * wordSize + 56));
+ __ movflt(xmm0, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 0));
+ __ movflt(xmm1, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 8));
+ __ movflt(xmm2, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 16));
+ __ movflt(xmm3, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 24));
+ __ movflt(xmm4, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 32));
+ __ movflt(xmm5, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 40));
+ __ movflt(xmm6, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 48));
+ __ movflt(xmm7, Address(rsp, xmm_regs_as_doubles_off * VMRegImpl::stack_slot_size + 56));
}
if (UseSSE < 2) {
- __ frstor(Address(rsp, fpu_state_off * wordSize));
+ __ frstor(Address(rsp, fpu_state_off * VMRegImpl::stack_slot_size));
} else {
// check that FPU stack is really empty
__ verify_FPU(0, "restore_live_registers");
@@ -408,14 +534,14 @@ static void restore_fpu(StubAssembler* sasm, bool restore_fpu_registers = true)
#ifdef ASSERT
{
Label ok;
- __ cmpl(Address(rsp, marker * wordSize), 0xfeedbeef);
+ __ cmpptr(Address(rsp, marker * VMRegImpl::stack_slot_size), (int32_t)0xfeedbeef);
__ jcc(Assembler::equal, ok);
__ stop("bad offsets in frame");
__ bind(ok);
}
-#endif
+#endif // ASSERT
- __ addl(rsp, extra_space_offset * wordSize);
+ __ addptr(rsp, extra_space_offset * VMRegImpl::stack_slot_size);
}
@@ -423,7 +549,7 @@ static void restore_live_registers(StubAssembler* sasm, bool restore_fpu_registe
__ block_comment("restore_live_registers");
restore_fpu(sasm, restore_fpu_registers);
- __ popad();
+ __ popa();
}
@@ -432,14 +558,35 @@ static void restore_live_registers_except_rax(StubAssembler* sasm, bool restore_
restore_fpu(sasm, restore_fpu_registers);
- __ popl(rdi);
- __ popl(rsi);
- __ popl(rbp);
- __ popl(rbx); // skip this value
- __ popl(rbx);
- __ popl(rdx);
- __ popl(rcx);
- __ addl(rsp, 4);
+#ifdef _LP64
+ __ movptr(r15, Address(rsp, 0));
+ __ movptr(r14, Address(rsp, wordSize));
+ __ movptr(r13, Address(rsp, 2 * wordSize));
+ __ movptr(r12, Address(rsp, 3 * wordSize));
+ __ movptr(r11, Address(rsp, 4 * wordSize));
+ __ movptr(r10, Address(rsp, 5 * wordSize));
+ __ movptr(r9, Address(rsp, 6 * wordSize));
+ __ movptr(r8, Address(rsp, 7 * wordSize));
+ __ movptr(rdi, Address(rsp, 8 * wordSize));
+ __ movptr(rsi, Address(rsp, 9 * wordSize));
+ __ movptr(rbp, Address(rsp, 10 * wordSize));
+ // skip rsp
+ __ movptr(rbx, Address(rsp, 12 * wordSize));
+ __ movptr(rdx, Address(rsp, 13 * wordSize));
+ __ movptr(rcx, Address(rsp, 14 * wordSize));
+
+ __ addptr(rsp, 16 * wordSize);
+#else
+
+ __ pop(rdi);
+ __ pop(rsi);
+ __ pop(rbp);
+ __ pop(rbx); // skip this value
+ __ pop(rbx);
+ __ pop(rdx);
+ __ pop(rcx);
+ __ addptr(rsp, BytesPerWord);
+#endif // _LP64
}
@@ -465,10 +612,13 @@ OopMapSet* Runtime1::generate_exception_throw(StubAssembler* sasm, address targe
// load argument for exception that is passed as an argument into the stub
if (has_argument) {
- __ movl(temp_reg, Address(rbp, 2*BytesPerWord));
- __ pushl(temp_reg);
+#ifdef _LP64
+ __ movptr(c_rarg1, Address(rbp, 2*BytesPerWord));
+#else
+ __ movptr(temp_reg, Address(rbp, 2*BytesPerWord));
+ __ push(temp_reg);
+#endif // _LP64
}
-
int call_offset = __ call_RT(noreg, noreg, target, num_rt_args - 1);
OopMapSet* oop_maps = new OopMapSet();
@@ -486,7 +636,7 @@ void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_map
const Register exception_pc = rdx;
// other registers used in this stub
const Register real_return_addr = rbx;
- const Register thread = rdi;
+ const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
__ block_comment("generate_handle_exception");
@@ -503,19 +653,19 @@ void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_map
__ verify_not_null_oop(exception_oop);
// load address of JavaThread object for thread-local data
- __ get_thread(thread);
+ NOT_LP64(__ get_thread(thread);)
#ifdef ASSERT
// check that fields in JavaThread for exception oop and issuing pc are
// empty before writing to them
Label oop_empty;
- __ cmpl(Address(thread, JavaThread::exception_oop_offset()), 0);
+ __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t) NULL_WORD);
__ jcc(Assembler::equal, oop_empty);
__ stop("exception oop already set");
__ bind(oop_empty);
Label pc_empty;
- __ cmpl(Address(thread, JavaThread::exception_pc_offset()), 0);
+ __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
__ jcc(Assembler::equal, pc_empty);
__ stop("exception pc already set");
__ bind(pc_empty);
@@ -523,15 +673,15 @@ void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_map
// save exception oop and issuing pc into JavaThread
// (exception handler will load it from here)
- __ movl(Address(thread, JavaThread::exception_oop_offset()), exception_oop);
- __ movl(Address(thread, JavaThread::exception_pc_offset()), exception_pc);
+ __ movptr(Address(thread, JavaThread::exception_oop_offset()), exception_oop);
+ __ movptr(Address(thread, JavaThread::exception_pc_offset()), exception_pc);
// save real return address (pc that called this stub)
- __ movl(real_return_addr, Address(rbp, 1*BytesPerWord));
- __ movl(Address(rsp, temp_1_off * BytesPerWord), real_return_addr);
+ __ movptr(real_return_addr, Address(rbp, 1*BytesPerWord));
+ __ movptr(Address(rsp, temp_1_off * VMRegImpl::stack_slot_size), real_return_addr);
// patch throwing pc into return address (has bci & oop map)
- __ movl(Address(rbp, 1*BytesPerWord), exception_pc);
+ __ movptr(Address(rbp, 1*BytesPerWord), exception_pc);
// compute the exception handler.
// the exception oop and the throwing pc are read from the fields in JavaThread
@@ -548,12 +698,12 @@ void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_map
// Do we have an exception handler in the nmethod?
Label no_handler;
Label done;
- __ testl(rax, rax);
+ __ testptr(rax, rax);
__ jcc(Assembler::zero, no_handler);
// exception handler found
// patch the return address -> the stub will directly return to the exception handler
- __ movl(Address(rbp, 1*BytesPerWord), rax);
+ __ movptr(Address(rbp, 1*BytesPerWord), rax);
// restore registers
restore_live_registers(sasm, save_fpu_registers);
@@ -568,18 +718,18 @@ void Runtime1::generate_handle_exception(StubAssembler *sasm, OopMapSet* oop_map
// there is no need to restore the registers
// restore the real return address that was saved before the RT-call
- __ movl(real_return_addr, Address(rsp, temp_1_off * BytesPerWord));
- __ movl(Address(rbp, 1*BytesPerWord), real_return_addr);
+ __ movptr(real_return_addr, Address(rsp, temp_1_off * VMRegImpl::stack_slot_size));
+ __ movptr(Address(rbp, 1*BytesPerWord), real_return_addr);
// load address of JavaThread object for thread-local data
- __ get_thread(thread);
+ NOT_LP64(__ get_thread(thread);)
// restore exception oop into rax, (convention for unwind code)
- __ movl(exception_oop, Address(thread, JavaThread::exception_oop_offset()));
+ __ movptr(exception_oop, Address(thread, JavaThread::exception_oop_offset()));
// clear exception fields in JavaThread because they are no longer needed
// (fields must be cleared because they are processed by GC otherwise)
- __ movl(Address(thread, JavaThread::exception_oop_offset()), NULL_WORD);
- __ movl(Address(thread, JavaThread::exception_pc_offset()), NULL_WORD);
+ __ movptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
+ __ movptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
// pop the stub frame off
__ leave();
@@ -595,22 +745,22 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
// other registers used in this stub
const Register exception_pc = rdx;
const Register handler_addr = rbx;
- const Register thread = rdi;
+ const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
// verify that only rax, is valid at this time
__ invalidate_registers(false, true, true, true, true, true);
#ifdef ASSERT
// check that fields in JavaThread for exception oop and issuing pc are empty
- __ get_thread(thread);
+ NOT_LP64(__ get_thread(thread);)
Label oop_empty;
- __ cmpl(Address(thread, JavaThread::exception_oop_offset()), 0);
+ __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), 0);
__ jcc(Assembler::equal, oop_empty);
__ stop("exception oop must be empty");
__ bind(oop_empty);
Label pc_empty;
- __ cmpl(Address(thread, JavaThread::exception_pc_offset()), 0);
+ __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), 0);
__ jcc(Assembler::equal, pc_empty);
__ stop("exception pc must be empty");
__ bind(pc_empty);
@@ -622,12 +772,12 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
// leave activation of nmethod
__ leave();
// store return address (is on top of stack after leave)
- __ movl(exception_pc, Address(rsp, 0));
+ __ movptr(exception_pc, Address(rsp, 0));
__ verify_oop(exception_oop);
// save exception oop from rax, to stack before call
- __ pushl(exception_oop);
+ __ push(exception_oop);
// search the exception handler address of the caller (using the return address)
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), exception_pc);
@@ -637,17 +787,17 @@ void Runtime1::generate_unwind_exception(StubAssembler *sasm) {
__ invalidate_registers(false, true, true, true, true, true);
// move result of call into correct register
- __ movl(handler_addr, rax);
+ __ movptr(handler_addr, rax);
// restore exception oop in rax, (required convention of exception handler)
- __ popl(exception_oop);
+ __ pop(exception_oop);
__ verify_oop(exception_oop);
// get throwing pc (= return address).
// rdx has been destroyed by the call, so it must be set again
// the pop is also necessary to simulate the effect of a ret(0)
- __ popl(exception_pc);
+ __ pop(exception_pc);
// verify that that there is really a valid exception in rax,
__ verify_not_null_oop(exception_oop);
@@ -677,12 +827,18 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
OopMap* oop_map = save_live_registers(sasm, num_rt_args);
- __ pushl(rax); // push dummy
+#ifdef _LP64
+ const Register thread = r15_thread;
+ // No need to worry about dummy
+ __ mov(c_rarg0, thread);
+#else
+ __ push(rax); // push dummy
const Register thread = rdi; // is callee-saved register (Visual C++ calling conventions)
// push java thread (becomes first argument of C function)
__ get_thread(thread);
- __ pushl(thread);
+ __ push(thread);
+#endif // _LP64
__ set_last_Java_frame(thread, noreg, rbp, NULL);
// do the call
__ call(RuntimeAddress(target));
@@ -691,27 +847,29 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
// verify callee-saved register
#ifdef ASSERT
guarantee(thread != rax, "change this code");
- __ pushl(rax);
+ __ push(rax);
{ Label L;
__ get_thread(rax);
- __ cmpl(thread, rax);
+ __ cmpptr(thread, rax);
__ jcc(Assembler::equal, L);
- __ stop("StubAssembler::call_RT: rdi not callee saved?");
+ __ stop("StubAssembler::call_RT: rdi/r15 not callee saved?");
__ bind(L);
}
- __ popl(rax);
+ __ pop(rax);
#endif
__ reset_last_Java_frame(thread, true, false);
- __ popl(rcx); // discard thread arg
- __ popl(rcx); // discard dummy
+#ifndef _LP64
+ __ pop(rcx); // discard thread arg
+ __ pop(rcx); // discard dummy
+#endif // _LP64
// check for pending exceptions
{ Label L;
- __ cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
+ __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L);
// exception pending => remove activation and forward to exception handler
- __ testl(rax, rax); // have we deoptimized?
+ __ testptr(rax, rax); // have we deoptimized?
__ jump_cc(Assembler::equal,
RuntimeAddress(Runtime1::entry_for(Runtime1::forward_exception_id)));
@@ -719,38 +877,38 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
// JavaThread, so copy and clear pending exception.
// load and clear pending exception
- __ movl(rax, Address(thread, Thread::pending_exception_offset()));
- __ movl(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
+ __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
+ __ movptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
// check that there is really a valid exception
__ verify_not_null_oop(rax);
// load throwing pc: this is the return address of the stub
- __ movl(rdx, Address(rsp, return_off * BytesPerWord));
+ __ movptr(rdx, Address(rsp, return_off * VMRegImpl::stack_slot_size));
#ifdef ASSERT
// check that fields in JavaThread for exception oop and issuing pc are empty
Label oop_empty;
- __ cmpoop(Address(thread, JavaThread::exception_oop_offset()), 0);
+ __ cmpptr(Address(thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, oop_empty);
__ stop("exception oop must be empty");
__ bind(oop_empty);
Label pc_empty;
- __ cmpl(Address(thread, JavaThread::exception_pc_offset()), 0);
+ __ cmpptr(Address(thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, pc_empty);
__ stop("exception pc must be empty");
__ bind(pc_empty);
#endif
// store exception oop and throwing pc to JavaThread
- __ movl(Address(thread, JavaThread::exception_oop_offset()), rax);
- __ movl(Address(thread, JavaThread::exception_pc_offset()), rdx);
+ __ movptr(Address(thread, JavaThread::exception_oop_offset()), rax);
+ __ movptr(Address(thread, JavaThread::exception_pc_offset()), rdx);
restore_live_registers(sasm);
__ leave();
- __ addl(rsp, 4); // remove return address from stack
+ __ addptr(rsp, BytesPerWord); // remove return address from stack
// Forward the exception directly to deopt blob. We can blow no
// registers and must leave throwing pc on the stack. A patch may
@@ -767,7 +925,7 @@ OopMapSet* Runtime1::generate_patching(StubAssembler* sasm, address target) {
Label reexecuteEntry, cont;
- __ testl(rax, rax); // have we deoptimized?
+ __ testptr(rax, rax); // have we deoptimized?
__ jcc(Assembler::equal, cont); // no
// Will reexecute. Proper return address is already on the stack we just restore
@@ -806,21 +964,21 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// dispatch to the handler if found. Otherwise unwind and
// dispatch to the callers exception handler.
- const Register thread = rdi;
+ const Register thread = NOT_LP64(rdi) LP64_ONLY(r15_thread);
const Register exception_oop = rax;
const Register exception_pc = rdx;
// load pending exception oop into rax,
- __ movl(exception_oop, Address(thread, Thread::pending_exception_offset()));
+ __ movptr(exception_oop, Address(thread, Thread::pending_exception_offset()));
// clear pending exception
- __ movl(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
+ __ movptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
// load issuing PC (the return address for this stub) into rdx
- __ movl(exception_pc, Address(rbp, 1*BytesPerWord));
+ __ movptr(exception_pc, Address(rbp, 1*BytesPerWord));
// make sure that the vm_results are cleared (may be unnecessary)
- __ movl(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
- __ movl(Address(thread, JavaThread::vm_result_2_offset()), NULL_WORD);
+ __ movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
+ __ movptr(Address(thread, JavaThread::vm_result_2_offset()), (int32_t)NULL_WORD);
// verify that that there is really a valid exception in rax,
__ verify_not_null_oop(exception_oop);
@@ -857,8 +1015,8 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
Register t2 = rsi;
assert_different_registers(klass, obj, obj_size, t1, t2);
- __ pushl(rdi);
- __ pushl(rbx);
+ __ push(rdi);
+ __ push(rbx);
if (id == fast_new_instance_init_check_id) {
// make sure the klass is initialized
@@ -889,28 +1047,28 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(retry_tlab);
- // get the instance size
+ // get the instance size (size is postive so movl is fine for 64bit)
__ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
__ tlab_allocate(obj, obj_size, 0, t1, t2, slow_path);
__ initialize_object(obj, klass, obj_size, 0, t1, t2);
__ verify_oop(obj);
- __ popl(rbx);
- __ popl(rdi);
+ __ pop(rbx);
+ __ pop(rdi);
__ ret(0);
__ bind(try_eden);
- // get the instance size
+ // get the instance size (size is postive so movl is fine for 64bit)
__ movl(obj_size, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
__ eden_allocate(obj, obj_size, 0, t1, slow_path);
__ initialize_object(obj, klass, obj_size, 0, t1, t2);
__ verify_oop(obj);
- __ popl(rbx);
- __ popl(rdi);
+ __ pop(rbx);
+ __ pop(rdi);
__ ret(0);
__ bind(slow_path);
- __ popl(rbx);
- __ popl(rdi);
+ __ pop(rbx);
+ __ pop(rdi);
}
__ enter();
@@ -996,15 +1154,17 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(retry_tlab);
// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
+ // since size is postive movl does right thing on 64bit
__ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
+ // since size is postive movl does right thing on 64bit
__ movl(arr_size, length);
assert(t1 == rcx, "fixed register usage");
- __ shll(arr_size /* by t1=rcx, mod 32 */);
- __ shrl(t1, Klass::_lh_header_size_shift);
- __ andl(t1, Klass::_lh_header_size_mask);
- __ addl(arr_size, t1);
- __ addl(arr_size, MinObjAlignmentInBytesMask); // align up
- __ andl(arr_size, ~MinObjAlignmentInBytesMask);
+ __ shlptr(arr_size /* by t1=rcx, mod 32 */);
+ __ shrptr(t1, Klass::_lh_header_size_shift);
+ __ andptr(t1, Klass::_lh_header_size_mask);
+ __ addptr(arr_size, t1);
+ __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
+ __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
__ tlab_allocate(obj, arr_size, 0, t1, t2, slow_path); // preserves arr_size
@@ -1012,24 +1172,26 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte)));
assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
- __ andl(t1, Klass::_lh_header_size_mask);
- __ subl(arr_size, t1); // body length
- __ addl(t1, obj); // body start
+ __ andptr(t1, Klass::_lh_header_size_mask);
+ __ subptr(arr_size, t1); // body length
+ __ addptr(t1, obj); // body start
__ initialize_body(t1, arr_size, 0, t2);
__ verify_oop(obj);
__ ret(0);
__ bind(try_eden);
// get the allocation size: round_up(hdr + length << (layout_helper & 0x1F))
+ // since size is postive movl does right thing on 64bit
__ movl(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes()));
+ // since size is postive movl does right thing on 64bit
__ movl(arr_size, length);
assert(t1 == rcx, "fixed register usage");
- __ shll(arr_size /* by t1=rcx, mod 32 */);
- __ shrl(t1, Klass::_lh_header_size_shift);
- __ andl(t1, Klass::_lh_header_size_mask);
- __ addl(arr_size, t1);
- __ addl(arr_size, MinObjAlignmentInBytesMask); // align up
- __ andl(arr_size, ~MinObjAlignmentInBytesMask);
+ __ shlptr(arr_size /* by t1=rcx, mod 32 */);
+ __ shrptr(t1, Klass::_lh_header_size_shift);
+ __ andptr(t1, Klass::_lh_header_size_mask);
+ __ addptr(arr_size, t1);
+ __ addptr(arr_size, MinObjAlignmentInBytesMask); // align up
+ __ andptr(arr_size, ~MinObjAlignmentInBytesMask);
__ eden_allocate(obj, arr_size, 0, t1, slow_path); // preserves arr_size
@@ -1037,9 +1199,9 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ movb(t1, Address(klass, klassOopDesc::header_size() * HeapWordSize + Klass::layout_helper_offset_in_bytes() + (Klass::_lh_header_size_shift / BitsPerByte)));
assert(Klass::_lh_header_size_shift % BitsPerByte == 0, "bytewise");
assert(Klass::_lh_header_size_mask <= 0xFF, "bytewise");
- __ andl(t1, Klass::_lh_header_size_mask);
- __ subl(arr_size, t1); // body length
- __ addl(t1, obj); // body start
+ __ andptr(t1, Klass::_lh_header_size_mask);
+ __ subptr(arr_size, t1); // body length
+ __ addptr(t1, obj); // body start
__ initialize_body(t1, arr_size, 0, t2);
__ verify_oop(obj);
__ ret(0);
@@ -1089,15 +1251,23 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
{
__ set_info("register_finalizer", dont_gc_arguments);
+ // This is called via call_runtime so the arguments
+ // will be place in C abi locations
+
+#ifdef _LP64
+ __ verify_oop(c_rarg0);
+ __ mov(rax, c_rarg0);
+#else
// The object is passed on the stack and we haven't pushed a
// frame yet so it's one work away from top of stack.
- __ movl(rax, Address(rsp, 1 * BytesPerWord));
+ __ movptr(rax, Address(rsp, 1 * BytesPerWord));
__ verify_oop(rax);
+#endif // _LP64
// load the klass and check the has finalizer flag
Label register_finalizer;
Register t = rsi;
- __ movl(t, Address(rax, oopDesc::klass_offset_in_bytes()));
+ __ movptr(t, Address(rax, oopDesc::klass_offset_in_bytes()));
__ movl(t, Address(t, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
__ testl(t, JVM_ACC_HAS_FINALIZER);
__ jcc(Assembler::notZero, register_finalizer);
@@ -1185,46 +1355,49 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
case slow_subtype_check_id:
{
enum layout {
- rax_off,
- rcx_off,
- rsi_off,
- rdi_off,
- saved_rbp_off,
- return_off,
- sub_off,
- super_off,
+ rax_off, SLOT2(raxH_off)
+ rcx_off, SLOT2(rcxH_off)
+ rsi_off, SLOT2(rsiH_off)
+ rdi_off, SLOT2(rdiH_off)
+ // saved_rbp_off, SLOT2(saved_rbpH_off)
+ return_off, SLOT2(returnH_off)
+ sub_off, SLOT2(subH_off)
+ super_off, SLOT2(superH_off)
framesize
};
__ set_info("slow_subtype_check", dont_gc_arguments);
- __ pushl(rdi);
- __ pushl(rsi);
- __ pushl(rcx);
- __ pushl(rax);
- __ movl(rsi, Address(rsp, (super_off - 1) * BytesPerWord)); // super
- __ movl(rax, Address(rsp, (sub_off - 1) * BytesPerWord)); // sub
+ __ push(rdi);
+ __ push(rsi);
+ __ push(rcx);
+ __ push(rax);
+
+ // This is called by pushing args and not with C abi
+ __ movptr(rsi, Address(rsp, (super_off) * VMRegImpl::stack_slot_size)); // super
+ __ movptr(rax, Address(rsp, (sub_off ) * VMRegImpl::stack_slot_size)); // sub
- __ movl(rdi,Address(rsi,sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes()));
- __ movl(rcx,Address(rdi,arrayOopDesc::length_offset_in_bytes()));
- __ addl(rdi,arrayOopDesc::base_offset_in_bytes(T_OBJECT));
+ __ movptr(rdi,Address(rsi,sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes()));
+ // since size is postive movl does right thing on 64bit
+ __ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));
+ __ addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
Label miss;
__ repne_scan();
__ jcc(Assembler::notEqual, miss);
- __ movl(Address(rsi,sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()), rax);
- __ movl(Address(rsp, (super_off - 1) * BytesPerWord), 1); // result
- __ popl(rax);
- __ popl(rcx);
- __ popl(rsi);
- __ popl(rdi);
+ __ movptr(Address(rsi,sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()), rax);
+ __ movptr(Address(rsp, (super_off) * VMRegImpl::stack_slot_size), 1); // result
+ __ pop(rax);
+ __ pop(rcx);
+ __ pop(rsi);
+ __ pop(rdi);
__ ret(0);
__ bind(miss);
- __ movl(Address(rsp, (super_off - 1) * BytesPerWord), 0); // result
- __ popl(rax);
- __ popl(rcx);
- __ popl(rsi);
- __ popl(rdi);
+ __ movptr(Address(rsp, (super_off) * VMRegImpl::stack_slot_size), 0); // result
+ __ pop(rax);
+ __ pop(rcx);
+ __ pop(rsi);
+ __ pop(rdi);
__ ret(0);
}
break;
@@ -1237,6 +1410,8 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
StubFrame f(sasm, "monitorenter", dont_gc_arguments);
OopMap* map = save_live_registers(sasm, 3, save_fpu_registers);
+ // Called with store_parameter and not C abi
+
f.load_argument(1, rax); // rax,: object
f.load_argument(0, rbx); // rbx,: lock address
@@ -1256,6 +1431,8 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
StubFrame f(sasm, "monitorexit", dont_gc_arguments);
OopMap* map = save_live_registers(sasm, 2, save_fpu_registers);
+ // Called with store_parameter and not C abi
+
f.load_argument(0, rax); // rax,: lock address
// note: really a leaf routine but must setup last java sp
@@ -1304,9 +1481,9 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
// the live registers get saved.
save_live_registers(sasm, 1);
- __ pushl(rax);
+ __ NOT_LP64(push(rax)) LP64_ONLY(mov(c_rarg0, rax));
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_object_alloc)));
- __ popl(rax);
+ NOT_LP64(__ pop(rax));
restore_live_registers(sasm);
}
@@ -1316,18 +1493,19 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
{
// rax, and rdx are destroyed, but should be free since the result is returned there
// preserve rsi,ecx
- __ pushl(rsi);
- __ pushl(rcx);
+ __ push(rsi);
+ __ push(rcx);
+ LP64_ONLY(__ push(rdx);)
// check for NaN
Label return0, do_return, return_min_jlong, do_convert;
- Address value_high_word(rsp, 8);
- Address value_low_word(rsp, 4);
- Address result_high_word(rsp, 16);
- Address result_low_word(rsp, 12);
+ Address value_high_word(rsp, wordSize + 4);
+ Address value_low_word(rsp, wordSize);
+ Address result_high_word(rsp, 3*wordSize + 4);
+ Address result_low_word(rsp, 3*wordSize);
- __ subl(rsp, 20);
+ __ subptr(rsp, 32); // more than enough on 32bit
__ fst_d(value_low_word);
__ movl(rax, value_high_word);
__ andl(rax, 0x7ff00000);
@@ -1340,7 +1518,7 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ bind(do_convert);
__ fnstcw(Address(rsp, 0));
- __ movzxw(rax, Address(rsp, 0));
+ __ movzwl(rax, Address(rsp, 0));
__ orl(rax, 0xc00);
__ movw(Address(rsp, 2), rax);
__ fldcw(Address(rsp, 2));
@@ -1348,9 +1526,11 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ fistp_d(result_low_word);
__ fldcw(Address(rsp, 0));
__ fwait();
- __ movl(rax, result_low_word);
+ // This gets the entire long in rax on 64bit
+ __ movptr(rax, result_low_word);
+ // testing of high bits
__ movl(rdx, result_high_word);
- __ movl(rcx, rax);
+ __ mov(rcx, rax);
// What the heck is the point of the next instruction???
__ xorl(rcx, 0x0);
__ movl(rsi, 0x80000000);
@@ -1360,34 +1540,52 @@ OopMapSet* Runtime1::generate_code_for(StubID id, StubAssembler* sasm) {
__ fldz();
__ fcomp_d(value_low_word);
__ fnstsw_ax();
+#ifdef _LP64
+ __ testl(rax, 0x4100); // ZF & CF == 0
+ __ jcc(Assembler::equal, return_min_jlong);
+#else
__ sahf();
__ jcc(Assembler::above, return_min_jlong);
+#endif // _LP64
// return max_jlong
+#ifndef _LP64
__ movl(rdx, 0x7fffffff);
__ movl(rax, 0xffffffff);
+#else
+ __ mov64(rax, CONST64(0x7fffffffffffffff));
+#endif // _LP64
__ jmp(do_return);
__ bind(return_min_jlong);
+#ifndef _LP64
__ movl(rdx, 0x80000000);
__ xorl(rax, rax);
+#else
+ __ mov64(rax, CONST64(0x8000000000000000));
+#endif // _LP64
__ jmp(do_return);
__ bind(return0);
__ fpop();
- __ xorl(rdx,rdx);
- __ xorl(rax,rax);
+#ifndef _LP64
+ __ xorptr(rdx,rdx);
+ __ xorptr(rax,rax);
+#else
+ __ xorptr(rax, rax);
+#endif // _LP64
__ bind(do_return);
- __ addl(rsp, 20);
- __ popl(rcx);
- __ popl(rsi);
+ __ addptr(rsp, 32);
+ LP64_ONLY(__ pop(rdx);)
+ __ pop(rcx);
+ __ pop(rsi);
__ ret(0);
}
break;
default:
{ StubFrame f(sasm, "unimplemented entry", dont_gc_arguments);
- __ movl(rax, (int)id);
+ __ movptr(rax, (int)id);
__ call_RT(noreg, noreg, CAST_FROM_FN_PTR(address, unimplemented_entry), rax);
__ should_not_reach_here();
}
diff --git a/src/cpu/x86/vm/cppInterpreter_x86.cpp b/src/cpu/x86/vm/cppInterpreter_x86.cpp
index 89c497de7..a829cb313 100644
--- a/src/cpu/x86/vm/cppInterpreter_x86.cpp
+++ b/src/cpu/x86/vm/cppInterpreter_x86.cpp
@@ -44,6 +44,14 @@ extern "C" void RecursiveInterpreterActivation(interpreterState istate )
Label fast_accessor_slow_entry_path; // fast accessor methods need to be able to jmp to unsynchronized
// c++ interpreter entry point this holds that entry point label.
+// default registers for state and sender_sp
+// state and sender_sp are the same on 32bit because we have no choice.
+// state could be rsi on 64bit but it is an arg reg and not callee save
+// so r13 is better choice.
+
+const Register state = NOT_LP64(rsi) LP64_ONLY(r13);
+const Register sender_sp_on_entry = NOT_LP64(rsi) LP64_ONLY(r13);
+
// NEEDED for JVMTI?
// address AbstractInterpreter::_remove_activation_preserving_args_entry;
@@ -88,7 +96,6 @@ bool CppInterpreter::contains(address pc) {
address CppInterpreterGenerator::generate_result_handler_for(BasicType type) {
- const Register state = rsi; // current activation object, valid on entry
address entry = __ pc();
switch (type) {
case T_BOOLEAN: __ c2bool(rax); break;
@@ -98,19 +105,22 @@ address CppInterpreterGenerator::generate_result_handler_for(BasicType type) {
case T_VOID : // fall thru
case T_LONG : // fall thru
case T_INT : /* nothing to do */ break;
+
case T_DOUBLE :
case T_FLOAT :
- { const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
- __ popl(t); // remove return address first
- __ pop_dtos_to_rsp();
+ {
+ const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
+ __ pop(t); // remove return address first
// Must return a result for interpreter or compiler. In SSE
// mode, results are returned in xmm0 and the FPU stack must
// be empty.
if (type == T_FLOAT && UseSSE >= 1) {
+#ifndef _LP64
// Load ST0
__ fld_d(Address(rsp, 0));
// Store as float and empty fpu stack
__ fstp_s(Address(rsp, 0));
+#endif // !_LP64
// and reload
__ movflt(xmm0, Address(rsp, 0));
} else if (type == T_DOUBLE && UseSSE >= 2 ) {
@@ -120,13 +130,13 @@ address CppInterpreterGenerator::generate_result_handler_for(BasicType type) {
__ fld_d(Address(rsp, 0));
}
// and pop the temp
- __ addl(rsp, 2 * wordSize);
- __ pushl(t); // restore return address
+ __ addptr(rsp, 2 * wordSize);
+ __ push(t); // restore return address
}
break;
case T_OBJECT :
// retrieve result from frame
- __ movl(rax, STATE(_oop_temp));
+ __ movptr(rax, STATE(_oop_temp));
// and verify it
__ verify_oop(rax);
break;
@@ -146,7 +156,7 @@ address CppInterpreterGenerator::generate_tosca_to_stack_converter(BasicType typ
address entry = __ pc();
const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
- __ popl(t); // remove return address first
+ __ pop(t); // remove return address first
switch (type) {
case T_VOID:
break;
@@ -154,53 +164,53 @@ address CppInterpreterGenerator::generate_tosca_to_stack_converter(BasicType typ
#ifdef EXTEND
__ c2bool(rax);
#endif
- __ pushl(rax);
+ __ push(rax);
break;
case T_CHAR :
#ifdef EXTEND
__ andl(rax, 0xFFFF);
#endif
- __ pushl(rax);
+ __ push(rax);
break;
case T_BYTE :
#ifdef EXTEND
__ sign_extend_byte (rax);
#endif
- __ pushl(rax);
+ __ push(rax);
break;
case T_SHORT :
#ifdef EXTEND
__ sign_extend_short(rax);
#endif
- __ pushl(rax);
+ __ push(rax);
break;
case T_LONG :
- __ pushl(rdx);
- __ pushl(rax);
+ __ push(rdx); // pushes useless junk on 64bit
+ __ push(rax);
break;
case T_INT :
- __ pushl(rax);
+ __ push(rax);
break;
case T_FLOAT :
- // Result is in ST(0)
+ // Result is in ST(0)/xmm0
+ __ subptr(rsp, wordSize);
if ( UseSSE < 1) {
- __ push(ftos); // and save it
+ __ fstp_s(Address(rsp, 0));
} else {
- __ subl(rsp, wordSize);
__ movflt(Address(rsp, 0), xmm0);
}
break;
case T_DOUBLE :
+ __ subptr(rsp, 2*wordSize);
if ( UseSSE < 2 ) {
- __ push(dtos); // put ST0 on java stack
+ __ fstp_d(Address(rsp, 0));
} else {
- __ subl(rsp, 2*wordSize);
__ movdbl(Address(rsp, 0), xmm0);
}
break;
case T_OBJECT :
__ verify_oop(rax); // verify it
- __ pushl(rax);
+ __ push(rax);
break;
default : ShouldNotReachHere();
}
@@ -212,7 +222,7 @@ address CppInterpreterGenerator::generate_stack_to_stack_converter(BasicType typ
// A result is in the java expression stack of the interpreted method that has just
// returned. Place this result on the java expression stack of the caller.
//
- // The current interpreter activation in rsi is for the method just returning its
+ // The current interpreter activation in rsi/r13 is for the method just returning its
// result. So we know that the result of this method is on the top of the current
// execution stack (which is pre-pushed) and will be return to the top of the caller
// stack. The top of the callers stack is the bottom of the locals of the current
@@ -222,20 +232,19 @@ address CppInterpreterGenerator::generate_stack_to_stack_converter(BasicType typ
// of the calling activation. This enable this routine to leave the return address
// to the frame manager on the stack and do a vanilla return.
//
- // On entry: rsi - interpreter state of activation returning a (potential) result
- // On Return: rsi - unchanged
+ // On entry: rsi/r13 - interpreter state of activation returning a (potential) result
+ // On Return: rsi/r13 - unchanged
// rax - new stack top for caller activation (i.e. activation in _prev_link)
//
// Can destroy rdx, rcx.
//
address entry = __ pc();
- const Register state = rsi; // current activation object, valid on entry
const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
switch (type) {
case T_VOID:
- __ movl(rax, STATE(_locals)); // pop parameters get new stack value
- __ addl(rax, wordSize); // account for prepush before we return
+ __ movptr(rax, STATE(_locals)); // pop parameters get new stack value
+ __ addptr(rax, wordSize); // account for prepush before we return
break;
case T_FLOAT :
case T_BOOLEAN:
@@ -244,10 +253,10 @@ address CppInterpreterGenerator::generate_stack_to_stack_converter(BasicType typ
case T_SHORT :
case T_INT :
// 1 word result
- __ movl(rdx, STATE(_stack));
- __ movl(rax, STATE(_locals)); // address for result
+ __ movptr(rdx, STATE(_stack));
+ __ movptr(rax, STATE(_locals)); // address for result
__ movl(rdx, Address(rdx, wordSize)); // get result
- __ movl(Address(rax, 0), rdx); // and store it
+ __ movptr(Address(rax, 0), rdx); // and store it
break;
case T_LONG :
case T_DOUBLE :
@@ -256,20 +265,20 @@ address CppInterpreterGenerator::generate_stack_to_stack_converter(BasicType typ
// except we allocated one extra word for this intepretState so we won't overwrite it
// when we return a two word result.
- __ movl(rax, STATE(_locals)); // address for result
- __ movl(rcx, STATE(_stack));
- __ subl(rax, wordSize); // need addition word besides locals[0]
- __ movl(rdx, Address(rcx, 2*wordSize)); // get result word
- __ movl(Address(rax, wordSize), rdx); // and store it
- __ movl(rdx, Address(rcx, wordSize)); // get result word
- __ movl(Address(rax, 0), rdx); // and store it
+ __ movptr(rax, STATE(_locals)); // address for result
+ __ movptr(rcx, STATE(_stack));
+ __ subptr(rax, wordSize); // need addition word besides locals[0]
+ __ movptr(rdx, Address(rcx, 2*wordSize)); // get result word (junk in 64bit)
+ __ movptr(Address(rax, wordSize), rdx); // and store it
+ __ movptr(rdx, Address(rcx, wordSize)); // get result word
+ __ movptr(Address(rax, 0), rdx); // and store it
break;
case T_OBJECT :
- __ movl(rdx, STATE(_stack));
- __ movl(rax, STATE(_locals)); // address for result
- __ movl(rdx, Address(rdx, wordSize)); // get result
+ __ movptr(rdx, STATE(_stack));
+ __ movptr(rax, STATE(_locals)); // address for result
+ __ movptr(rdx, Address(rdx, wordSize)); // get result
__ verify_oop(rdx); // verify it
- __ movl(Address(rax, 0), rdx); // and store it
+ __ movptr(Address(rax, 0), rdx); // and store it
break;
default : ShouldNotReachHere();
}
@@ -285,12 +294,11 @@ address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicTyp
// frame manager execept in this situation the caller is native code (c1/c2/call_stub)
// and so rather than return result onto caller's java expression stack we return the
// result in the expected location based on the native abi.
- // On entry: rsi - interpreter state of activation returning a (potential) result
- // On Return: rsi - unchanged
+ // On entry: rsi/r13 - interpreter state of activation returning a (potential) result
+ // On Return: rsi/r13 - unchanged
// Other registers changed [rax/rdx/ST(0) as needed for the result returned]
address entry = __ pc();
- const Register state = rsi; // current activation object, valid on entry
switch (type) {
case T_VOID:
break;
@@ -299,17 +307,16 @@ address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicTyp
case T_BYTE :
case T_SHORT :
case T_INT :
- __ movl(rdx, STATE(_stack)); // get top of stack
+ __ movptr(rdx, STATE(_stack)); // get top of stack
__ movl(rax, Address(rdx, wordSize)); // get result word 1
break;
case T_LONG :
- __ movl(rdx, STATE(_stack)); // get top of stack
- __ movl(rax, Address(rdx, wordSize)); // get result low word
- __ movl(rdx, Address(rdx, 2*wordSize)); // get result high word
- break;
+ __ movptr(rdx, STATE(_stack)); // get top of stack
+ __ movptr(rax, Address(rdx, wordSize)); // get result low word
+ NOT_LP64(__ movl(rdx, Address(rdx, 2*wordSize));) // get result high word
break;
case T_FLOAT :
- __ movl(rdx, STATE(_stack)); // get top of stack
+ __ movptr(rdx, STATE(_stack)); // get top of stack
if ( UseSSE >= 1) {
__ movflt(xmm0, Address(rdx, wordSize));
} else {
@@ -317,7 +324,7 @@ address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicTyp
}
break;
case T_DOUBLE :
- __ movl(rdx, STATE(_stack)); // get top of stack
+ __ movptr(rdx, STATE(_stack)); // get top of stack
if ( UseSSE > 1) {
__ movdbl(xmm0, Address(rdx, wordSize));
} else {
@@ -325,8 +332,8 @@ address CppInterpreterGenerator::generate_stack_to_native_abi_converter(BasicTyp
}
break;
case T_OBJECT :
- __ movl(rdx, STATE(_stack)); // get top of stack
- __ movl(rax, Address(rdx, wordSize)); // get result word 1
+ __ movptr(rdx, STATE(_stack)); // get top of stack
+ __ movptr(rax, Address(rdx, wordSize)); // get result word 1
__ verify_oop(rax); // verify it
break;
default : ShouldNotReachHere();
@@ -408,54 +415,58 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
if (!native) {
#ifdef PRODUCT
- __ subl(rsp, 2*wordSize);
+ __ subptr(rsp, 2*wordSize);
#else /* PRODUCT */
- __ pushl((int)NULL);
- __ pushl(state); // make it look like a real argument
+ __ push((int32_t)NULL_WORD);
+ __ push(state); // make it look like a real argument
#endif /* PRODUCT */
}
// Now that we are assure of space for stack result, setup typical linkage
- __ pushl(rax);
+ __ push(rax);
__ enter();
- __ movl(rax, state); // save current state
+ __ mov(rax, state); // save current state
- __ leal(rsp, Address(rsp, -(int)sizeof(BytecodeInterpreter)));
- __ movl(state, rsp);
+ __ lea(rsp, Address(rsp, -(int)sizeof(BytecodeInterpreter)));
+ __ mov(state, rsp);
- // rsi == state/locals rax == prevstate
+ // rsi/r13 == state/locals rax == prevstate
// initialize the "shadow" frame so that use since C++ interpreter not directly
// recursive. Simpler to recurse but we can't trim expression stack as we call
// new methods.
- __ movl(STATE(_locals), locals); // state->_locals = locals()
- __ movl(STATE(_self_link), state); // point to self
- __ movl(STATE(_prev_link), rax); // state->_link = state on entry (NULL or previous state)
- __ movl(STATE(_sender_sp), sender_sp); // state->_sender_sp = sender_sp
+ __ movptr(STATE(_locals), locals); // state->_locals = locals()
+ __ movptr(STATE(_self_link), state); // point to self
+ __ movptr(STATE(_prev_link), rax); // state->_link = state on entry (NULL or previous state)
+ __ movptr(STATE(_sender_sp), sender_sp); // state->_sender_sp = sender_sp
+#ifdef _LP64
+ __ movptr(STATE(_thread), r15_thread); // state->_bcp = codes()
+#else
__ get_thread(rax); // get vm's javathread*
- __ movl(STATE(_thread), rax); // state->_bcp = codes()
- __ movl(rdx, Address(rbx, methodOopDesc::const_offset())); // get constantMethodOop
- __ leal(rdx, Address(rdx, constMethodOopDesc::codes_offset())); // get code base
+ __ movptr(STATE(_thread), rax); // state->_bcp = codes()
+#endif // _LP64
+ __ movptr(rdx, Address(rbx, methodOopDesc::const_offset())); // get constantMethodOop
+ __ lea(rdx, Address(rdx, constMethodOopDesc::codes_offset())); // get code base
if (native) {
- __ movl(STATE(_bcp), (intptr_t)NULL); // state->_bcp = NULL
+ __ movptr(STATE(_bcp), (int32_t)NULL_WORD); // state->_bcp = NULL
} else {
- __ movl(STATE(_bcp), rdx); // state->_bcp = codes()
+ __ movptr(STATE(_bcp), rdx); // state->_bcp = codes()
}
- __ xorl(rdx, rdx);
- __ movl(STATE(_oop_temp), rdx); // state->_oop_temp = NULL (only really needed for native)
- __ movl(STATE(_mdx), rdx); // state->_mdx = NULL
- __ movl(rdx, Address(rbx, methodOopDesc::constants_offset()));
- __ movl(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
- __ movl(STATE(_constants), rdx); // state->_constants = constants()
+ __ xorptr(rdx, rdx);
+ __ movptr(STATE(_oop_temp), rdx); // state->_oop_temp = NULL (only really needed for native)
+ __ movptr(STATE(_mdx), rdx); // state->_mdx = NULL
+ __ movptr(rdx, Address(rbx, methodOopDesc::constants_offset()));
+ __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
+ __ movptr(STATE(_constants), rdx); // state->_constants = constants()
- __ movl(STATE(_method), rbx); // state->_method = method()
- __ movl(STATE(_msg), (int) BytecodeInterpreter::method_entry); // state->_msg = initial method entry
- __ movl(STATE(_result._to_call._callee), (int) NULL); // state->_result._to_call._callee_callee = NULL
+ __ movptr(STATE(_method), rbx); // state->_method = method()
+ __ movl(STATE(_msg), (int32_t) BytecodeInterpreter::method_entry); // state->_msg = initial method entry
+ __ movptr(STATE(_result._to_call._callee), (int32_t) NULL_WORD); // state->_result._to_call._callee_callee = NULL
- __ movl(STATE(_monitor_base), rsp); // set monitor block bottom (grows down) this would point to entry [0]
+ __ movptr(STATE(_monitor_base), rsp); // set monitor block bottom (grows down) this would point to entry [0]
// entries run from -1..x where &monitor[x] ==
{
@@ -479,36 +490,44 @@ void CppInterpreterGenerator::generate_compute_interpreter_state(const Register
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
__ movl(rax, access_flags);
__ testl(rax, JVM_ACC_STATIC);
- __ movl(rax, Address(locals, 0)); // get receiver (assume this is frequent case)
+ __ movptr(rax, Address(locals, 0)); // get receiver (assume this is frequent case)
__ jcc(Assembler::zero, done);
- __ movl(rax, Address(rbx, methodOopDesc::constants_offset()));
- __ movl(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
- __ movl(rax, Address(rax, mirror_offset));
+ __ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
+ __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
+ __ movptr(rax, Address(rax, mirror_offset));
__ bind(done);
// add space for monitor & lock
- __ subl(rsp, entry_size); // add space for a monitor entry
- __ movl(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
+ __ subptr(rsp, entry_size); // add space for a monitor entry
+ __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
__ bind(not_synced);
}
- __ movl(STATE(_stack_base), rsp); // set expression stack base ( == &monitors[-count])
+ __ movptr(STATE(_stack_base), rsp); // set expression stack base ( == &monitors[-count])
if (native) {
- __ movl(STATE(_stack), rsp); // set current expression stack tos
- __ movl(STATE(_stack_limit), rsp);
+ __ movptr(STATE(_stack), rsp); // set current expression stack tos
+ __ movptr(STATE(_stack_limit), rsp);
} else {
- __ subl(rsp, wordSize); // pre-push stack
- __ movl(STATE(_stack), rsp); // set current expression stack tos
+ __ subptr(rsp, wordSize); // pre-push stack
+ __ movptr(STATE(_stack), rsp); // set current expression stack tos
// compute full expression stack limit
const Address size_of_stack (rbx, methodOopDesc::max_stack_offset());
__ load_unsigned_word(rdx, size_of_stack); // get size of expression stack in words
- __ negl(rdx); // so we can subtract in next step
+ __ negptr(rdx); // so we can subtract in next step
// Allocate expression stack
- __ leal(rsp, Address(rsp, rdx, Address::times_4));
- __ movl(STATE(_stack_limit), rsp);
+ __ lea(rsp, Address(rsp, rdx, Address::times_ptr));
+ __ movptr(STATE(_stack_limit), rsp);
}
+#ifdef _LP64
+ // Make sure stack is properly aligned and sized for the abi
+ __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
+ __ andptr(rsp, -16); // must be 16 byte boundry (see amd64 ABI)
+#endif // _LP64
+
+
+
}
// Helpers for commoning out cases in the various type of method entries.
@@ -528,7 +547,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset());
if (ProfileInterpreter) { // %%% Merge this into methodDataOop
- __ increment(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset()));
+ __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset()));
}
// Update standard invocation counters
__ movl(rax, backedge_counter); // load backedge counter
@@ -552,7 +571,7 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
// C++ interpreter on entry
- // rsi - new interpreter state pointer
+ // rsi/r13 - new interpreter state pointer
// rbp - interpreter frame pointer
// rbx - method
@@ -563,7 +582,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
// rsp - sender_sp
// C++ interpreter only
- // rsi - previous interpreter state pointer
+ // rsi/r13 - previous interpreter state pointer
const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
@@ -571,16 +590,14 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
// indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
// The call returns the address of the verified entry point for the method or NULL
// if the compilation did not complete (either went background or bailed out).
- __ movl(rax, (int)false);
+ __ movptr(rax, (int32_t)false);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
// for c++ interpreter can rsi really be munged?
- __ leal(rsi, Address(rbp, -sizeof(BytecodeInterpreter))); // restore state
- __ movl(rbx, Address(rsi, byte_offset_of(BytecodeInterpreter, _method))); // restore method
- __ movl(rdi, Address(rsi, byte_offset_of(BytecodeInterpreter, _locals))); // get locals pointer
+ __ lea(state, Address(rbp, -sizeof(BytecodeInterpreter))); // restore state
+ __ movptr(rbx, Address(state, byte_offset_of(BytecodeInterpreter, _method))); // restore method
+ __ movptr(rdi, Address(state, byte_offset_of(BytecodeInterpreter, _locals))); // get locals pointer
- // Preserve invariant that rsi/rdi contain bcp/locals of sender frame
- // and jump to the interpreted entry.
__ jmp(*do_continue, relocInfo::none);
}
@@ -597,7 +614,7 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
// rbx,: methodOop
// C++ Interpreter
- // rsi: previous interpreter frame state object
+ // rsi/r13: previous interpreter frame state object
// rdi: &locals[0]
// rcx: # of locals
// rdx: number of additional locals this frame needs (what we must check)
@@ -628,11 +645,11 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
// save rsi == caller's bytecode ptr (c++ previous interp. state)
// QQQ problem here?? rsi overload????
- __ pushl(rsi);
+ __ push(state);
- const Register thread = rsi;
+ const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rsi);
- __ get_thread(thread);
+ NOT_LP64(__ get_thread(thread));
const Address stack_base(thread, Thread::stack_base_offset());
const Address stack_size(thread, Thread::stack_size_offset());
@@ -643,26 +660,26 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
// Any additional monitors need a check when moving the expression stack
const one_monitor = frame::interpreter_frame_monitor_size() * wordSize;
__ load_unsigned_word(rax, size_of_stack); // get size of expression stack in words
- __ leal(rax, Address(noreg, rax, Interpreter::stackElementScale(), one_monitor));
- __ leal(rax, Address(rax, rdx, Interpreter::stackElementScale(), overhead_size));
+ __ lea(rax, Address(noreg, rax, Interpreter::stackElementScale(), one_monitor));
+ __ lea(rax, Address(rax, rdx, Interpreter::stackElementScale(), overhead_size));
#ifdef ASSERT
Label stack_base_okay, stack_size_okay;
// verify that thread stack base is non-zero
- __ cmpl(stack_base, 0);
+ __ cmpptr(stack_base, (int32_t)0);
__ jcc(Assembler::notEqual, stack_base_okay);
__ stop("stack base is zero");
__ bind(stack_base_okay);
// verify that thread stack size is non-zero
- __ cmpl(stack_size, 0);
+ __ cmpptr(stack_size, (int32_t)0);
__ jcc(Assembler::notEqual, stack_size_okay);
__ stop("stack size is zero");
__ bind(stack_size_okay);
#endif
// Add stack base to locals and subtract stack size
- __ addl(rax, stack_base);
- __ subl(rax, stack_size);
+ __ addptr(rax, stack_base);
+ __ subptr(rax, stack_size);
// We should have a magic number here for the size of the c++ interpreter frame.
// We can't actually tell this ahead of time. The debug version size is around 3k
@@ -674,20 +691,20 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
(StackRedPages+StackYellowPages);
// Only need this if we are stack banging which is temporary while
// we're debugging.
- __ addl(rax, slop + 2*max_pages * page_size);
+ __ addptr(rax, slop + 2*max_pages * page_size);
// check against the current stack bottom
- __ cmpl(rsp, rax);
+ __ cmpptr(rsp, rax);
__ jcc(Assembler::above, after_frame_check_pop);
- __ popl(rsi); // get saved bcp / (c++ prev state ).
+ __ pop(state); // get c++ prev state.
// throw exception return address becomes throwing pc
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_StackOverflowError));
// all done with frame size check
__ bind(after_frame_check_pop);
- __ popl(rsi);
+ __ pop(state);
__ bind(after_frame_check);
}
@@ -696,17 +713,18 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
// rbx - methodOop
//
void InterpreterGenerator::lock_method(void) {
- // assumes state == rsi == pointer to current interpreterState
- // minimally destroys rax, rdx, rdi
+ // assumes state == rsi/r13 == pointer to current interpreterState
+ // minimally destroys rax, rdx|c_rarg1, rdi
//
// synchronize method
- const Register state = rsi;
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
const Address access_flags (rbx, methodOopDesc::access_flags_offset());
+ const Register monitor = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
+
// find initial monitor i.e. monitors[-1]
- __ movl(rdx, STATE(_monitor_base)); // get monitor bottom limit
- __ subl(rdx, entry_size); // point to initial monitor
+ __ movptr(monitor, STATE(_monitor_base)); // get monitor bottom limit
+ __ subptr(monitor, entry_size); // point to initial monitor
#ifdef ASSERT
{ Label L;
@@ -721,35 +739,34 @@ void InterpreterGenerator::lock_method(void) {
{ Label done;
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
__ movl(rax, access_flags);
- __ movl(rdi, STATE(_locals)); // prepare to get receiver (assume common case)
+ __ movptr(rdi, STATE(_locals)); // prepare to get receiver (assume common case)
__ testl(rax, JVM_ACC_STATIC);
- __ movl(rax, Address(rdi, 0)); // get receiver (assume this is frequent case)
+ __ movptr(rax, Address(rdi, 0)); // get receiver (assume this is frequent case)
__ jcc(Assembler::zero, done);
- __ movl(rax, Address(rbx, methodOopDesc::constants_offset()));
- __ movl(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
- __ movl(rax, Address(rax, mirror_offset));
+ __ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
+ __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
+ __ movptr(rax, Address(rax, mirror_offset));
__ bind(done);
}
#ifdef ASSERT
{ Label L;
- __ cmpl(rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); // correct object?
+ __ cmpptr(rax, Address(monitor, BasicObjectLock::obj_offset_in_bytes())); // correct object?
__ jcc(Assembler::equal, L);
__ stop("wrong synchronization lobject");
__ bind(L);
}
#endif // ASSERT
- // can destroy rax, rdx, rcx, and (via call_VM) rdi!
- __ lock_object(rdx);
+ // can destroy rax, rdx|c_rarg1, rcx, and (via call_VM) rdi!
+ __ lock_object(monitor);
}
// Call an accessor method (assuming it is resolved, otherwise drop into vanilla (slow path) entry
address InterpreterGenerator::generate_accessor_entry(void) {
- // rbx,: methodOop
- // rcx: receiver (preserve for slow entry into asm interpreter)
+ // rbx: methodOop
- // rsi: senderSP must preserved for slow path, set SP to it on fast path
+ // rsi/r13: senderSP must preserved for slow path, set SP to it on fast path
Label xreturn_path;
@@ -772,21 +789,21 @@ address InterpreterGenerator::generate_accessor_entry(void) {
// these conditions first and use slow path if necessary.
// rbx,: method
// rcx: receiver
- __ movl(rax, Address(rsp, wordSize));
+ __ movptr(rax, Address(rsp, wordSize));
// check if local 0 != NULL and read field
- __ testl(rax, rax);
+ __ testptr(rax, rax);
__ jcc(Assembler::zero, slow_path);
- __ movl(rdi, Address(rbx, methodOopDesc::constants_offset()));
+ __ movptr(rdi, Address(rbx, methodOopDesc::constants_offset()));
// read first instruction word and extract bytecode @ 1 and index @ 2
- __ movl(rdx, Address(rbx, methodOopDesc::const_offset()));
+ __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
__ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
// Shift codes right to get the index on the right.
// The bytecode fetched looks like <index><0xb4><0x2a>
__ shrl(rdx, 2*BitsPerByte);
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
- __ movl(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes()));
+ __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes()));
// rax,: local 0
// rbx,: method
@@ -794,7 +811,7 @@ address InterpreterGenerator::generate_accessor_entry(void) {
// rcx: scratch
// rdx: constant pool cache index
// rdi: constant pool cache
- // rsi: sender sp
+ // rsi/r13: sender sp
// check if getfield has been resolved and read constant pool cache entry
// check the validity of the cache entry by testing whether _indices field
@@ -803,21 +820,21 @@ address InterpreterGenerator::generate_accessor_entry(void) {
__ movl(rcx,
Address(rdi,
rdx,
- Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
+ Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
__ shrl(rcx, 2*BitsPerByte);
__ andl(rcx, 0xFF);
__ cmpl(rcx, Bytecodes::_getfield);
__ jcc(Assembler::notEqual, slow_path);
// Note: constant pool entry is not valid before bytecode is resolved
- __ movl(rcx,
+ __ movptr(rcx,
Address(rdi,
rdx,
- Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset()));
+ Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset()));
__ movl(rdx,
Address(rdi,
rdx,
- Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset()));
+ Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset()));
Label notByte, notShort, notChar;
const Address field_address (rax, rcx, Address::times_1);
@@ -828,6 +845,16 @@ address InterpreterGenerator::generate_accessor_entry(void) {
__ shrl(rdx, ConstantPoolCacheEntry::tosBits);
// Make sure we don't need to mask rdx for tosBits after the above shift
ConstantPoolCacheEntry::verify_tosBits();
+#ifdef _LP64
+ Label notObj;
+ __ cmpl(rdx, atos);
+ __ jcc(Assembler::notEqual, notObj);
+ // atos
+ __ movptr(rax, field_address);
+ __ jmp(xreturn_path);
+
+ __ bind(notObj);
+#endif // _LP64
__ cmpl(rdx, btos);
__ jcc(Assembler::notEqual, notByte);
__ load_signed_byte(rax, field_address);
@@ -848,8 +875,10 @@ address InterpreterGenerator::generate_accessor_entry(void) {
__ bind(notChar);
#ifdef ASSERT
Label okay;
+#ifndef _LP64
__ cmpl(rdx, atos);
__ jcc(Assembler::equal, okay);
+#endif // _LP64
__ cmpl(rdx, itos);
__ jcc(Assembler::equal, okay);
__ stop("what type is this?");
@@ -861,8 +890,8 @@ address InterpreterGenerator::generate_accessor_entry(void) {
__ bind(xreturn_path);
// _ireturn/_areturn
- __ popl(rdi); // get return address
- __ movl(rsp, rsi); // set sp to sender sp
+ __ pop(rdi); // get return address
+ __ mov(rsp, sender_sp_on_entry); // set sp to sender sp
__ jmp(rdi);
// generate a vanilla interpreter entry as the slow path
@@ -894,8 +923,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// rbx: methodOop
// rcx: receiver (unused)
- // rsi: previous interpreter state (if called from C++ interpreter) must preserve
- // in any case. If called via c1/c2/call_stub rsi is junk (to use) but harmless
+ // rsi/r13: previous interpreter state (if called from C++ interpreter) must preserve
+ // in any case. If called via c1/c2/call_stub rsi/r13 is junk (to use) but harmless
// to save/restore.
address entry_point = __ pc();
@@ -904,8 +933,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
const Address invocation_counter(rbx, methodOopDesc::invocation_counter_offset() + InvocationCounter::counter_offset());
const Address access_flags (rbx, methodOopDesc::access_flags_offset());
- // rsi == state/locals rdi == prevstate
- const Register state = rsi;
+ // rsi/r13 == state/locals rdi == prevstate
const Register locals = rdi;
// get parameter size (always needed)
@@ -913,11 +941,11 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// rbx: methodOop
// rcx: size of parameters
- __ popl(rax); // get return address
+ __ pop(rax); // get return address
// for natives the size of locals is zero
// compute beginning of parameters /locals
- __ leal(locals, Address(rsp, rcx, Address::times_4, -wordSize));
+ __ lea(locals, Address(rsp, rcx, Address::times_ptr, -wordSize));
// initialize fixed part of activation frame
@@ -931,15 +959,20 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// OUT(rsp) -> bottom of methods expression stack
// save sender_sp
- __ movl(rcx, rsi);
+ __ mov(rcx, sender_sp_on_entry);
// start with NULL previous state
- __ movl(state, 0);
+ __ movptr(state, (int32_t)NULL_WORD);
generate_compute_interpreter_state(state, locals, rcx, true);
#ifdef ASSERT
{ Label L;
- __ movl(rax, STATE(_stack_base));
- __ cmpl(rax, rsp);
+ __ movptr(rax, STATE(_stack_base));
+#ifdef _LP64
+ // duplicate the alignment rsp got after setting stack_base
+ __ subptr(rax, frame::arg_reg_save_area_bytes); // windows
+ __ andptr(rax, -16); // must be 16 byte boundry (see amd64 ABI)
+#endif // _LP64
+ __ cmpptr(rax, rsp);
__ jcc(Assembler::equal, L);
__ stop("broken stack frame setup in interpreter");
__ bind(L);
@@ -948,14 +981,15 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count
- __ movl(rax, STATE(_thread)); // get thread
+ const Register unlock_thread = LP64_ONLY(r15_thread) NOT_LP64(rax);
+ NOT_LP64(__ movptr(unlock_thread, STATE(_thread));) // get thread
// Since at this point in the method invocation the exception handler
// would try to exit the monitor of synchronized methods which hasn't
// been entered yet, we set the thread local variable
// _do_not_unlock_if_synchronized to true. The remove_activation will
// check this flag.
- const Address do_not_unlock_if_synchronized(rax,
+ const Address do_not_unlock_if_synchronized(unlock_thread,
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
__ movbool(do_not_unlock_if_synchronized, true);
@@ -991,7 +1025,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
bang_stack_shadow_pages(true);
// reset the _do_not_unlock_if_synchronized flag
- __ movl(rax, STATE(_thread)); // get thread
+ NOT_LP64(__ movl(rax, STATE(_thread));) // get thread
__ movbool(do_not_unlock_if_synchronized, false);
@@ -1022,62 +1056,81 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// work registers
const Register method = rbx;
- const Register thread = rdi;
- const Register t = rcx;
+ const Register thread = LP64_ONLY(r15_thread) NOT_LP64(rdi);
+ const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp(); // rcx|rscratch1
// allocate space for parameters
- __ movl(method, STATE(_method));
+ __ movptr(method, STATE(_method));
__ verify_oop(method);
__ load_unsigned_word(t, Address(method, methodOopDesc::size_of_parameters_offset()));
__ shll(t, 2);
- __ addl(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
- __ subl(rsp, t);
- __ andl(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
+#ifdef _LP64
+ __ subptr(rsp, t);
+ __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
+ __ andptr(rsp, -16); // must be 16 byte boundry (see amd64 ABI)
+#else
+ __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
+ __ subptr(rsp, t);
+ __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
+#endif // _LP64
// get signature handler
Label pending_exception_present;
{ Label L;
- __ movl(t, Address(method, methodOopDesc::signature_handler_offset()));
- __ testl(t, t);
+ __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
+ __ testptr(t, t);
__ jcc(Assembler::notZero, L);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method, false);
- __ movl(method, STATE(_method));
- __ cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
+ __ movptr(method, STATE(_method));
+ __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, pending_exception_present);
__ verify_oop(method);
- __ movl(t, Address(method, methodOopDesc::signature_handler_offset()));
+ __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
__ bind(L);
}
#ifdef ASSERT
{
Label L;
- __ pushl(t);
+ __ push(t);
__ get_thread(t); // get vm's javathread*
- __ cmpl(t, STATE(_thread));
+ __ cmpptr(t, STATE(_thread));
__ jcc(Assembler::equal, L);
__ int3();
__ bind(L);
- __ popl(t);
+ __ pop(t);
}
#endif //
+ const Register from_ptr = InterpreterRuntime::SignatureHandlerGenerator::from();
// call signature handler
- assert(InterpreterRuntime::SignatureHandlerGenerator::from() == rdi, "adjust this code");
assert(InterpreterRuntime::SignatureHandlerGenerator::to () == rsp, "adjust this code");
- assert(InterpreterRuntime::SignatureHandlerGenerator::temp() == t , "adjust this code");
+
// The generated handlers do not touch RBX (the method oop).
// However, large signatures cannot be cached and are generated
// each time here. The slow-path generator will blow RBX
// sometime, so we must reload it after the call.
- __ movl(rdi, STATE(_locals)); // get the from pointer
+ __ movptr(from_ptr, STATE(_locals)); // get the from pointer
__ call(t);
- __ movl(method, STATE(_method));
+ __ movptr(method, STATE(_method));
__ verify_oop(method);
// result handler is in rax
// set result handler
- __ movl(STATE(_result_handler), rax);
+ __ movptr(STATE(_result_handler), rax);
+
+
+ // get native function entry point
+ { Label L;
+ __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
+ __ testptr(rax, rax);
+ __ jcc(Assembler::notZero, L);
+ __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
+ __ movptr(method, STATE(_method));
+ __ verify_oop(method);
+ __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
+ __ bind(L);
+ }
// pass mirror handle if static call
{ Label L;
@@ -1086,55 +1139,53 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ testl(t, JVM_ACC_STATIC);
__ jcc(Assembler::zero, L);
// get mirror
- __ movl(t, Address(method, methodOopDesc:: constants_offset()));
- __ movl(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
- __ movl(t, Address(t, mirror_offset));
+ __ movptr(t, Address(method, methodOopDesc:: constants_offset()));
+ __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
+ __ movptr(t, Address(t, mirror_offset));
// copy mirror into activation object
- __ movl(STATE(_oop_temp), t);
+ __ movptr(STATE(_oop_temp), t);
// pass handle to mirror
- __ leal(t, STATE(_oop_temp));
- __ movl(Address(rsp, wordSize), t);
+#ifdef _LP64
+ __ lea(c_rarg1, STATE(_oop_temp));
+#else
+ __ lea(t, STATE(_oop_temp));
+ __ movptr(Address(rsp, wordSize), t);
+#endif // _LP64
__ bind(L);
}
#ifdef ASSERT
{
Label L;
- __ pushl(t);
+ __ push(t);
__ get_thread(t); // get vm's javathread*
- __ cmpl(t, STATE(_thread));
+ __ cmpptr(t, STATE(_thread));
__ jcc(Assembler::equal, L);
__ int3();
__ bind(L);
- __ popl(t);
+ __ pop(t);
}
#endif //
- // get native function entry point
- { Label L;
- __ movl(rax, Address(method, methodOopDesc::native_function_offset()));
- __ testl(rax, rax);
- __ jcc(Assembler::notZero, L);
- __ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
- __ movl(method, STATE(_method));
- __ verify_oop(method);
- __ movl(rax, Address(method, methodOopDesc::native_function_offset()));
- __ bind(L);
- }
-
// pass JNIEnv
- __ movl(thread, STATE(_thread)); // get thread
- __ leal(t, Address(thread, JavaThread::jni_environment_offset()));
- __ movl(Address(rsp, 0), t);
+#ifdef _LP64
+ __ lea(c_rarg0, Address(thread, JavaThread::jni_environment_offset()));
+#else
+ __ movptr(thread, STATE(_thread)); // get thread
+ __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
+
+ __ movptr(Address(rsp, 0), t);
+#endif // _LP64
+
#ifdef ASSERT
{
Label L;
- __ pushl(t);
+ __ push(t);
__ get_thread(t); // get vm's javathread*
- __ cmpl(t, STATE(_thread));
+ __ cmpptr(t, STATE(_thread));
__ jcc(Assembler::equal, L);
__ int3();
__ bind(L);
- __ popl(t);
+ __ pop(t);
}
#endif //
@@ -1159,8 +1210,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ call(rax);
// result potentially in rdx:rax or ST0
- __ movl(method, STATE(_method));
- __ movl(thread, STATE(_thread)); // get thread
+ __ movptr(method, STATE(_method));
+ NOT_LP64(__ movptr(thread, STATE(_thread));) // get thread
// The potential result is in ST(0) & rdx:rax
// With C++ interpreter we leave any possible result in ST(0) until we are in result handler and then
@@ -1170,7 +1221,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// It is safe to do these pushes because state is _thread_in_native and return address will be found
// via _last_native_pc and not via _last_jave_sp
- // Must save the value of ST(0) since it could be destroyed before we get to result handler
+ // Must save the value of ST(0)/xmm0 since it could be destroyed before we get to result handler
{ Label Lpush, Lskip;
ExternalAddress float_handler(AbstractInterpreter::result_handler(T_FLOAT));
ExternalAddress double_handler(AbstractInterpreter::result_handler(T_DOUBLE));
@@ -1179,11 +1230,20 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ cmpptr(STATE(_result_handler), double_handler.addr());
__ jcc(Assembler::notEqual, Lskip);
__ bind(Lpush);
- __ push(dtos);
+ __ subptr(rsp, 2*wordSize);
+ if ( UseSSE < 2 ) {
+ __ fstp_d(Address(rsp, 0));
+ } else {
+ __ movdbl(Address(rsp, 0), xmm0);
+ }
__ bind(Lskip);
}
- __ push(ltos); // save rax:rdx for potential use by result handler.
+ // save rax:rdx for potential use by result handler.
+ __ push(rax);
+#ifndef _LP64
+ __ push(rdx);
+#endif // _LP64
// Either restore the MXCSR register after returning from the JNI Call
// or verify that it wasn't changed.
@@ -1192,15 +1252,17 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
}
else if (CheckJNICalls ) {
- __ call(RuntimeAddress(StubRoutines::i486::verify_mxcsr_entry()));
+ __ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
}
}
+#ifndef _LP64
// Either restore the x87 floating pointer control word after returning
// from the JNI call or verify that it wasn't changed.
if (CheckJNICalls) {
- __ call(RuntimeAddress(StubRoutines::i486::verify_fpu_cntrl_wrd_entry()));
+ __ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
}
+#endif // _LP64
// change thread state
@@ -1231,17 +1293,16 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// Don't use call_VM as it will see a possible pending exception and forward it
// and never return here preventing us from clearing _last_native_pc down below.
// Also can't use call_VM_leaf either as it will check to see if rsi & rdi are
- // preserved and correspond to the bcp/locals pointers. So we do a runtime call
- // by hand.
+ // preserved and correspond to the bcp/locals pointers.
//
- __ pushl(thread);
- __ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
- JavaThread::check_special_condition_for_native_trans)));
+
+ ((MacroAssembler*)_masm)->call_VM_leaf(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans),
+ thread);
__ increment(rsp, wordSize);
- __ movl(method, STATE(_method));
+ __ movptr(method, STATE(_method));
__ verify_oop(method);
- __ movl(thread, STATE(_thread)); // get thread
+ __ movptr(thread, STATE(_thread)); // get thread
__ bind(Continue);
}
@@ -1252,8 +1313,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ reset_last_Java_frame(thread, true, true);
// reset handle block
- __ movl(t, Address(thread, JavaThread::active_handles_offset()));
- __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
+ __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
+ __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
// If result was an oop then unbox and save it in the frame
{ Label L;
@@ -1261,15 +1322,21 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
ExternalAddress oop_handler(AbstractInterpreter::result_handler(T_OBJECT));
__ cmpptr(STATE(_result_handler), oop_handler.addr());
__ jcc(Assembler::notEqual, no_oop);
- __ pop(ltos);
- __ testl(rax, rax);
+#ifndef _LP64
+ __ pop(rdx);
+#endif // _LP64
+ __ pop(rax);
+ __ testptr(rax, rax);
__ jcc(Assembler::zero, store_result);
// unbox
- __ movl(rax, Address(rax, 0));
+ __ movptr(rax, Address(rax, 0));
__ bind(store_result);
- __ movl(STATE(_oop_temp), rax);
+ __ movptr(STATE(_oop_temp), rax);
// keep stack depth as expected by pushing oop which will eventually be discarded
- __ push(ltos);
+ __ push(rax);
+#ifndef _LP64
+ __ push(rdx);
+#endif // _LP64
__ bind(no_oop);
}
@@ -1278,9 +1345,9 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
__ jcc(Assembler::notEqual, no_reguard);
- __ pushad();
+ __ pusha();
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
- __ popad();
+ __ popa();
__ bind(no_reguard);
}
@@ -1295,7 +1362,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// handle exceptions (exception handling will handle unlocking!)
{ Label L;
- __ cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
+ __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::zero, L);
__ bind(pending_exception_present);
@@ -1307,12 +1374,12 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// remove activation
- __ movl(t, STATE(_sender_sp));
+ __ movptr(t, STATE(_sender_sp));
__ leave(); // remove frame anchor
- __ popl(rdi); // get return address
- __ movl(state, STATE(_prev_link)); // get previous state for return
- __ movl(rsp, t); // set sp to sender sp
- __ pushl(rdi); // [ush throwing pc
+ __ pop(rdi); // get return address
+ __ movptr(state, STATE(_prev_link)); // get previous state for return
+ __ mov(rsp, t); // set sp to sender sp
+ __ push(rdi); // push throwing pc
// The skips unlocking!! This seems to be what asm interpreter does but seems
// very wrong. Not clear if this violates the spec.
__ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
@@ -1326,13 +1393,14 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ jcc(Assembler::zero, L);
// the code below should be shared with interpreter macro assembler implementation
{ Label unlock;
+ const Register monitor = NOT_LP64(rdx) LP64_ONLY(c_rarg1);
// BasicObjectLock will be first in list, since this is a synchronized method. However, need
// to check that the object has not been unlocked by an explicit monitorexit bytecode.
- __ movl(rdx, STATE(_monitor_base));
- __ subl(rdx, frame::interpreter_frame_monitor_size() * wordSize); // address of initial monitor
+ __ movptr(monitor, STATE(_monitor_base));
+ __ subptr(monitor, frame::interpreter_frame_monitor_size() * wordSize); // address of initial monitor
- __ movl(t, Address(rdx, BasicObjectLock::obj_offset_in_bytes()));
- __ testl(t, t);
+ __ movptr(t, Address(monitor, BasicObjectLock::obj_offset_in_bytes()));
+ __ testptr(t, t);
__ jcc(Assembler::notZero, unlock);
// Entry already unlocked, need to throw exception
@@ -1340,9 +1408,9 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ should_not_reach_here();
__ bind(unlock);
- __ unlock_object(rdx);
+ __ unlock_object(monitor);
// unlock can blow rbx so restore it for path that needs it below
- __ movl(method, STATE(_method));
+ __ movptr(method, STATE(_method));
}
__ bind(L);
}
@@ -1355,18 +1423,21 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ notify_method_exit(vtos, InterpreterMacroAssembler::NotifyJVMTI);
// restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result
- __ pop(ltos); // restore rax/rdx floating result if present still on stack
- __ movl(t, STATE(_result_handler)); // get result handler
+#ifndef _LP64
+ __ pop(rdx);
+#endif // _LP64
+ __ pop(rax);
+ __ movptr(t, STATE(_result_handler)); // get result handler
__ call(t); // call result handler to convert to tosca form
// remove activation
- __ movl(t, STATE(_sender_sp));
+ __ movptr(t, STATE(_sender_sp));
__ leave(); // remove frame anchor
- __ popl(rdi); // get return address
- __ movl(state, STATE(_prev_link)); // get previous state for return (if c++ interpreter was caller)
- __ movl(rsp, t); // set sp to sender sp
+ __ pop(rdi); // get return address
+ __ movptr(state, STATE(_prev_link)); // get previous state for return (if c++ interpreter was caller)
+ __ mov(rsp, t); // set sp to sender sp
__ jmp(rdi);
// invocation counter overflow
@@ -1382,7 +1453,6 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// Generate entries that will put a result type index into rcx
void CppInterpreterGenerator::generate_deopt_handling() {
- const Register state = rsi;
Label return_from_deopt_common;
// Generate entries that will put a result type index into rcx
@@ -1449,51 +1519,50 @@ void CppInterpreterGenerator::generate_deopt_handling() {
//
__ bind(return_from_deopt_common);
- __ leal(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
+ __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
// setup rsp so we can push the "result" as needed.
- __ movl(rsp, STATE(_stack)); // trim stack (is prepushed)
- __ addl(rsp, wordSize); // undo prepush
+ __ movptr(rsp, STATE(_stack)); // trim stack (is prepushed)
+ __ addptr(rsp, wordSize); // undo prepush
ExternalAddress tosca_to_stack((address)CppInterpreter::_tosca_to_stack);
- // Address index(noreg, rcx, Address::times_4);
- __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_4)));
- // __ movl(rcx, Address(noreg, rcx, Address::times_4, int(AbstractInterpreter::_tosca_to_stack)));
+ // Address index(noreg, rcx, Address::times_ptr);
+ __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_ptr)));
+ // __ movl(rcx, Address(noreg, rcx, Address::times_ptr, int(AbstractInterpreter::_tosca_to_stack)));
__ call(rcx); // call result converter
__ movl(STATE(_msg), (int)BytecodeInterpreter::deopt_resume);
- __ leal(rsp, Address(rsp, -wordSize)); // prepush stack (result if any already present)
- __ movl(STATE(_stack), rsp); // inform interpreter of new stack depth (parameters removed,
+ __ lea(rsp, Address(rsp, -wordSize)); // prepush stack (result if any already present)
+ __ movptr(STATE(_stack), rsp); // inform interpreter of new stack depth (parameters removed,
// result if any on stack already )
- __ movl(rsp, STATE(_stack_limit)); // restore expression stack to full depth
+ __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth
}
// Generate the code to handle a more_monitors message from the c++ interpreter
void CppInterpreterGenerator::generate_more_monitors() {
- const Register state = rsi;
Label entry, loop;
const int entry_size = frame::interpreter_frame_monitor_size() * wordSize;
- // 1. compute new pointers // rsp: old expression stack top
- __ movl(rdx, STATE(_stack_base)); // rdx: old expression stack bottom
- __ subl(rsp, entry_size); // move expression stack top limit
- __ subl(STATE(_stack), entry_size); // update interpreter stack top
- __ movl(STATE(_stack_limit), rsp); // inform interpreter
- __ subl(rdx, entry_size); // move expression stack bottom
- __ movl(STATE(_stack_base), rdx); // inform interpreter
- __ movl(rcx, STATE(_stack)); // set start value for copy loop
+ // 1. compute new pointers // rsp: old expression stack top
+ __ movptr(rdx, STATE(_stack_base)); // rdx: old expression stack bottom
+ __ subptr(rsp, entry_size); // move expression stack top limit
+ __ subptr(STATE(_stack), entry_size); // update interpreter stack top
+ __ subptr(STATE(_stack_limit), entry_size); // inform interpreter
+ __ subptr(rdx, entry_size); // move expression stack bottom
+ __ movptr(STATE(_stack_base), rdx); // inform interpreter
+ __ movptr(rcx, STATE(_stack)); // set start value for copy loop
__ jmp(entry);
// 2. move expression stack contents
__ bind(loop);
- __ movl(rbx, Address(rcx, entry_size)); // load expression stack word from old location
- __ movl(Address(rcx, 0), rbx); // and store it at new location
- __ addl(rcx, wordSize); // advance to next word
+ __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location
+ __ movptr(Address(rcx, 0), rbx); // and store it at new location
+ __ addptr(rcx, wordSize); // advance to next word
__ bind(entry);
- __ cmpl(rcx, rdx); // check if bottom reached
- __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word
+ __ cmpptr(rcx, rdx); // check if bottom reached
+ __ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word
// now zero the slot so we can find it.
- __ movl(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), (int) NULL);
+ __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
__ movl(STATE(_msg), (int)BytecodeInterpreter::got_monitors);
}
@@ -1517,7 +1586,7 @@ void CppInterpreterGenerator::generate_more_monitors() {
//
// rbx: methodOop
// rcx: receiver - unused (retrieved from stack as needed)
-// rsi: previous frame manager state (NULL from the call_stub/c1/c2)
+// rsi/r13: previous frame manager state (NULL from the call_stub/c1/c2)
//
//
// Stack layout at entry
@@ -1539,7 +1608,7 @@ static address interpreter_frame_manager = NULL;
address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// rbx: methodOop
- // rsi: sender sp
+ // rsi/r13: sender sp
// Because we redispatch "recursive" interpreter entries thru this same entry point
// the "input" register usage is a little strange and not what you expect coming
@@ -1562,12 +1631,11 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
if (UseFastAccessorMethods && !synchronized) __ bind(fast_accessor_slow_entry_path);
Label dispatch_entry_2;
- __ movl(rcx, rsi);
- __ movl(rsi, 0); // no current activation
+ __ movptr(rcx, sender_sp_on_entry);
+ __ movptr(state, (int32_t)NULL_WORD); // no current activation
__ jmp(dispatch_entry_2);
- const Register state = rsi; // current activation object, valid on entry
const Register locals = rdi;
Label re_dispatch;
@@ -1575,12 +1643,12 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ bind(re_dispatch);
// save sender sp (doesn't include return address
- __ leal(rcx, Address(rsp, wordSize));
+ __ lea(rcx, Address(rsp, wordSize));
__ bind(dispatch_entry_2);
// save sender sp
- __ pushl(rcx);
+ __ push(rcx);
const Address size_of_parameters(rbx, methodOopDesc::size_of_parameters_offset());
const Address size_of_locals (rbx, methodOopDesc::size_of_locals_offset());
@@ -1597,7 +1665,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// rcx: size of parameters
__ load_unsigned_word(rdx, size_of_locals); // get size of locals in words
- __ subl(rdx, rcx); // rdx = no. of additional locals
+ __ subptr(rdx, rcx); // rdx = no. of additional locals
// see if we've got enough room on the stack for locals plus overhead.
generate_stack_overflow_check(); // C++
@@ -1609,26 +1677,26 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// compute beginning of parameters (rdi)
- __ leal(locals, Address(rsp, rcx, Address::times_4, wordSize));
+ __ lea(locals, Address(rsp, rcx, Address::times_ptr, wordSize));
// save sender's sp
// __ movl(rcx, rsp);
// get sender's sp
- __ popl(rcx);
+ __ pop(rcx);
// get return address
- __ popl(rax);
+ __ pop(rax);
// rdx - # of additional locals
// allocate space for locals
// explicitly initialize locals
{
Label exit, loop;
- __ testl(rdx, rdx);
+ __ testl(rdx, rdx); // (32bit ok)
__ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
__ bind(loop);
- __ pushl((int)NULL); // initialize local variables
+ __ push((int32_t)NULL_WORD); // initialize local variables
__ decrement(rdx); // until everything initialized
__ jcc(Assembler::greater, loop);
__ bind(exit);
@@ -1664,17 +1732,21 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ bind(call_interpreter_2);
{
- const Register thread = rcx;
+ const Register thread = NOT_LP64(rcx) LP64_ONLY(r15_thread);
- __ pushl(state); // push arg to interpreter
- __ movl(thread, STATE(_thread));
+#ifdef _LP64
+ __ mov(c_rarg0, state);
+#else
+ __ push(state); // push arg to interpreter
+ __ movptr(thread, STATE(_thread));
+#endif // _LP64
// We can setup the frame anchor with everything we want at this point
// as we are thread_in_Java and no safepoints can occur until we go to
// vm mode. We do have to clear flags on return from vm but that is it
//
- __ movl(Address(thread, JavaThread::last_Java_fp_offset()), rbp);
- __ movl(Address(thread, JavaThread::last_Java_sp_offset()), rsp);
+ __ movptr(Address(thread, JavaThread::last_Java_fp_offset()), rbp);
+ __ movptr(Address(thread, JavaThread::last_Java_sp_offset()), rsp);
// Call the interpreter
@@ -1682,14 +1754,14 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
RuntimeAddress checking(CAST_FROM_FN_PTR(address, BytecodeInterpreter::runWithChecks));
__ call(JvmtiExport::can_post_interpreter_events() ? checking : normal);
- __ popl(rax); // discard parameter to run
+ NOT_LP64(__ pop(rax);) // discard parameter to run
//
// state is preserved since it is callee saved
//
// reset_last_Java_frame
- __ movl(thread, STATE(_thread));
+ NOT_LP64(__ movl(thread, STATE(_thread));)
__ reset_last_Java_frame(thread, true, true);
}
@@ -1703,15 +1775,15 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
Label bad_msg;
Label do_OSR;
- __ cmpl(rdx, (int)BytecodeInterpreter::call_method);
+ __ cmpl(rdx, (int32_t)BytecodeInterpreter::call_method);
__ jcc(Assembler::equal, call_method);
- __ cmpl(rdx, (int)BytecodeInterpreter::return_from_method);
+ __ cmpl(rdx, (int32_t)BytecodeInterpreter::return_from_method);
__ jcc(Assembler::equal, return_from_interpreted_method);
- __ cmpl(rdx, (int)BytecodeInterpreter::do_osr);
+ __ cmpl(rdx, (int32_t)BytecodeInterpreter::do_osr);
__ jcc(Assembler::equal, do_OSR);
- __ cmpl(rdx, (int)BytecodeInterpreter::throwing_exception);
+ __ cmpl(rdx, (int32_t)BytecodeInterpreter::throwing_exception);
__ jcc(Assembler::equal, throw_exception);
- __ cmpl(rdx, (int)BytecodeInterpreter::more_monitors);
+ __ cmpl(rdx, (int32_t)BytecodeInterpreter::more_monitors);
__ jcc(Assembler::notEqual, bad_msg);
// Allocate more monitor space, shuffle expression stack....
@@ -1724,8 +1796,8 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
unctrap_frame_manager_entry = __ pc();
//
// Load the registers we need.
- __ leal(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
- __ movl(rsp, STATE(_stack_limit)); // restore expression stack to full depth
+ __ lea(state, Address(rbp, -(int)sizeof(BytecodeInterpreter)));
+ __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth
__ jmp(call_interpreter_2);
@@ -1757,13 +1829,17 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
Label unwind_and_forward;
// restore state pointer.
- __ leal(state, Address(rbp, -sizeof(BytecodeInterpreter)));
+ __ lea(state, Address(rbp, -sizeof(BytecodeInterpreter)));
- __ movl(rbx, STATE(_method)); // get method
+ __ movptr(rbx, STATE(_method)); // get method
+#ifdef _LP64
+ __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax);
+#else
__ movl(rcx, STATE(_thread)); // get thread
// Store exception with interpreter will expect it
- __ movl(Address(rcx, Thread::pending_exception_offset()), rax);
+ __ movptr(Address(rcx, Thread::pending_exception_offset()), rax);
+#endif // _LP64
// is current frame vanilla or native?
@@ -1779,11 +1855,11 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// unwind rbp, return stack to unextended value and re-push return address
- __ movl(rcx, STATE(_sender_sp));
+ __ movptr(rcx, STATE(_sender_sp));
__ leave();
- __ popl(rdx);
- __ movl(rsp, rcx);
- __ pushl(rdx);
+ __ pop(rdx);
+ __ mov(rsp, rcx);
+ __ push(rdx);
__ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
// Return point from a call which returns a result in the native abi
@@ -1801,8 +1877,8 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// The FPU stack is clean if UseSSE >= 2 but must be cleaned in other cases
if (UseSSE < 2) {
- __ leal(state, Address(rbp, -sizeof(BytecodeInterpreter)));
- __ movl(rbx, STATE(_result._to_call._callee)); // get method just executed
+ __ lea(state, Address(rbp, -sizeof(BytecodeInterpreter)));
+ __ movptr(rbx, STATE(_result._to_call._callee)); // get method just executed
__ movl(rcx, Address(rbx, methodOopDesc::result_index_offset()));
__ cmpl(rcx, AbstractInterpreter::BasicType_as_index(T_FLOAT)); // Result stub address array index
__ jcc(Assembler::equal, do_float);
@@ -1832,10 +1908,12 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ jmp(done_conv);
}
+#if 0
// emit a sentinel we can test for when converting an interpreter
// entry point to a compiled entry point.
__ a_long(Interpreter::return_sentinel);
__ a_long((int)compiled_entry);
+#endif
// Return point to interpreter from compiled/native method
@@ -1848,33 +1926,37 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// calling convention left it (i.e. params may or may not be present)
// Copy the result from tosca and place it on java expression stack.
- // Restore rsi as compiled code may not preserve it
+ // Restore rsi/r13 as compiled code may not preserve it
- __ leal(state, Address(rbp, -sizeof(BytecodeInterpreter)));
+ __ lea(state, Address(rbp, -sizeof(BytecodeInterpreter)));
// restore stack to what we had when we left (in case i2c extended it)
- __ movl(rsp, STATE(_stack));
- __ leal(rsp, Address(rsp, wordSize));
+ __ movptr(rsp, STATE(_stack));
+ __ lea(rsp, Address(rsp, wordSize));
// If there is a pending exception then we don't really have a result to process
- __ movl(rcx, STATE(_thread)); // get thread
- __ cmpl(Address(rcx, Thread::pending_exception_offset()), (int)NULL);
+#ifdef _LP64
+ __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
+#else
+ __ movptr(rcx, STATE(_thread)); // get thread
+ __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
+#endif / __LP64
__ jcc(Assembler::notZero, return_with_exception);
// get method just executed
- __ movl(rbx, STATE(_result._to_call._callee));
+ __ movptr(rbx, STATE(_result._to_call._callee));
// callee left args on top of expression stack, remove them
__ load_unsigned_word(rcx, Address(rbx, methodOopDesc::size_of_parameters_offset()));
- __ leal(rsp, Address(rsp, rcx, Address::times_4));
+ __ lea(rsp, Address(rsp, rcx, Address::times_ptr));
__ movl(rcx, Address(rbx, methodOopDesc::result_index_offset()));
ExternalAddress tosca_to_stack((address)CppInterpreter::_tosca_to_stack);
- // Address index(noreg, rax, Address::times_4);
- __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_4)));
- // __ movl(rcx, Address(noreg, rcx, Address::times_4, int(AbstractInterpreter::_tosca_to_stack)));
+ // Address index(noreg, rax, Address::times_ptr);
+ __ movptr(rcx, ArrayAddress(tosca_to_stack, Address(noreg, rcx, Address::times_ptr)));
+ // __ movl(rcx, Address(noreg, rcx, Address::times_ptr, int(AbstractInterpreter::_tosca_to_stack)));
__ call(rcx); // call result converter
__ jmp(resume_interpreter);
@@ -1884,7 +1966,7 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ bind(return_with_exception);
// Exception present, empty stack
- __ movl(rsp, STATE(_stack_base));
+ __ movptr(rsp, STATE(_stack_base));
__ jmp(resume_interpreter);
// Return from interpreted method we return result appropriate to the caller (i.e. "recursive"
@@ -1895,17 +1977,17 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
Label return_to_initial_caller;
- __ movl(rbx, STATE(_method)); // get method just executed
- __ cmpl(STATE(_prev_link), (int)NULL); // returning from "recursive" interpreter call?
+ __ movptr(rbx, STATE(_method)); // get method just executed
+ __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from "recursive" interpreter call?
__ movl(rax, Address(rbx, methodOopDesc::result_index_offset())); // get result type index
__ jcc(Assembler::equal, return_to_initial_caller); // back to native code (call_stub/c1/c2)
// Copy result to callers java stack
ExternalAddress stack_to_stack((address)CppInterpreter::_stack_to_stack);
- // Address index(noreg, rax, Address::times_4);
+ // Address index(noreg, rax, Address::times_ptr);
- __ movptr(rax, ArrayAddress(stack_to_stack, Address(noreg, rax, Address::times_4)));
- // __ movl(rax, Address(noreg, rax, Address::times_4, int(AbstractInterpreter::_stack_to_stack)));
+ __ movptr(rax, ArrayAddress(stack_to_stack, Address(noreg, rax, Address::times_ptr)));
+ // __ movl(rax, Address(noreg, rax, Address::times_ptr, int(AbstractInterpreter::_stack_to_stack)));
__ call(rax); // call result converter
Label unwind_recursive_activation;
@@ -1915,9 +1997,9 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// result converter left rax pointing to top of the java stack for method we are returning
// to. Now all we must do is unwind the state from the completed call
- __ movl(state, STATE(_prev_link)); // unwind state
+ __ movptr(state, STATE(_prev_link)); // unwind state
__ leave(); // pop the frame
- __ movl(rsp, rax); // unwind stack to remove args
+ __ mov(rsp, rax); // unwind stack to remove args
// Resume the interpreter. The current frame contains the current interpreter
// state object.
@@ -1928,10 +2010,10 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// state == interpreterState object for method we are resuming
__ movl(STATE(_msg), (int)BytecodeInterpreter::method_resume);
- __ leal(rsp, Address(rsp, -wordSize)); // prepush stack (result if any already present)
- __ movl(STATE(_stack), rsp); // inform interpreter of new stack depth (parameters removed,
+ __ lea(rsp, Address(rsp, -wordSize)); // prepush stack (result if any already present)
+ __ movptr(STATE(_stack), rsp); // inform interpreter of new stack depth (parameters removed,
// result if any on stack already )
- __ movl(rsp, STATE(_stack_limit)); // restore expression stack to full depth
+ __ movptr(rsp, STATE(_stack_limit)); // restore expression stack to full depth
__ jmp(call_interpreter_2); // No need to bang
// interpreter returning to native code (call_stub/c1/c2)
@@ -1940,9 +2022,9 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ bind(return_to_initial_caller);
ExternalAddress stack_to_native((address)CppInterpreter::_stack_to_native_abi);
- // Address index(noreg, rax, Address::times_4);
+ // Address index(noreg, rax, Address::times_ptr);
- __ movptr(rax, ArrayAddress(stack_to_native, Address(noreg, rax, Address::times_4)));
+ __ movptr(rax, ArrayAddress(stack_to_native, Address(noreg, rax, Address::times_ptr)));
__ call(rax); // call result converter
Label unwind_initial_activation;
@@ -1964,11 +2046,11 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// return restoring the stack to the original sender_sp value
- __ movl(rcx, STATE(_sender_sp));
+ __ movptr(rcx, STATE(_sender_sp));
__ leave();
- __ popl(rdi); // get return address
+ __ pop(rdi); // get return address
// set stack to sender's sp
- __ movl(rsp, rcx);
+ __ mov(rsp, rcx);
__ jmp(rdi); // return to call_stub
// OSR request, adjust return address to make current frame into adapter frame
@@ -1982,17 +2064,16 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// it or is it callstub/compiled?
// Move buffer to the expected parameter location
- __ movl(rcx, STATE(_result._osr._osr_buf));
+ __ movptr(rcx, STATE(_result._osr._osr_buf));
- __ movl(rax, STATE(_result._osr._osr_entry));
+ __ movptr(rax, STATE(_result._osr._osr_entry));
- __ cmpl(STATE(_prev_link), (int)NULL); // returning from "recursive" interpreter call?
+ __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from "recursive" interpreter call?
__ jcc(Assembler::equal, remove_initial_frame); // back to native code (call_stub/c1/c2)
- // __ movl(state, STATE(_prev_link)); // unwind state
- __ movl(rsi, STATE(_sender_sp)); // get sender's sp in expected register
+ __ movptr(sender_sp_on_entry, STATE(_sender_sp)); // get sender's sp in expected register
__ leave(); // pop the frame
- __ movl(rsp, rsi); // trim any stack expansion
+ __ mov(rsp, sender_sp_on_entry); // trim any stack expansion
// We know we are calling compiled so push specialized return
@@ -2006,14 +2087,14 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ bind(remove_initial_frame);
- __ movl(rdx, STATE(_sender_sp));
+ __ movptr(rdx, STATE(_sender_sp));
__ leave();
// get real return
- __ popl(rsi);
+ __ pop(rsi);
// set stack to sender's sp
- __ movl(rsp, rdx);
+ __ mov(rsp, rdx);
// repush real return
- __ pushl(rsi);
+ __ push(rsi);
// Enter OSR nmethod
__ jmp(rax);
@@ -2028,10 +2109,10 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
// stack points to next free location and not top element on expression stack
// method expects sp to be pointing to topmost element
- __ movl(rsp, STATE(_stack)); // pop args to c++ interpreter, set sp to java stack top
- __ leal(rsp, Address(rsp, wordSize));
+ __ movptr(rsp, STATE(_stack)); // pop args to c++ interpreter, set sp to java stack top
+ __ lea(rsp, Address(rsp, wordSize));
- __ movl(rbx, STATE(_result._to_call._callee)); // get method to execute
+ __ movptr(rbx, STATE(_result._to_call._callee)); // get method to execute
// don't need a return address if reinvoking interpreter
@@ -2047,13 +2128,13 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ cmpptr(STATE(_result._to_call._callee_entry_point), entry.addr()); // returning to interpreter?
__ jcc(Assembler::equal, re_dispatch); // yes
- __ popl(rax); // pop dummy address
+ __ pop(rax); // pop dummy address
// get specialized entry
- __ movl(rax, STATE(_result._to_call._callee_entry_point));
+ __ movptr(rax, STATE(_result._to_call._callee_entry_point));
// set sender SP
- __ movl(rsi, rsp);
+ __ mov(sender_sp_on_entry, rsp);
// method uses specialized entry, push a return so we look like call stub setup
// this path will handle fact that result is returned in registers and not
@@ -2073,10 +2154,10 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
Label unwind_initial_with_pending_exception;
__ bind(throw_exception);
- __ cmpl(STATE(_prev_link), (int)NULL); // returning from recursive interpreter call?
+ __ cmpptr(STATE(_prev_link), (int32_t)NULL_WORD); // returning from recursive interpreter call?
__ jcc(Assembler::equal, unwind_initial_with_pending_exception); // no, back to native code (call_stub/c1/c2)
- __ movl(rax, STATE(_locals)); // pop parameters get new stack value
- __ addl(rax, wordSize); // account for prepush before we return
+ __ movptr(rax, STATE(_locals)); // pop parameters get new stack value
+ __ addptr(rax, wordSize); // account for prepush before we return
__ jmp(unwind_recursive_activation);
__ bind(unwind_initial_with_pending_exception);
diff --git a/src/cpu/x86/vm/dump_x86_32.cpp b/src/cpu/x86/vm/dump_x86_32.cpp
index db80c6367..8bb427e3b 100644
--- a/src/cpu/x86/vm/dump_x86_32.cpp
+++ b/src/cpu/x86/vm/dump_x86_32.cpp
@@ -98,24 +98,24 @@ void CompactingPermGenGen::generate_vtable_methods(void** vtbl_list,
// table.
#ifdef WIN32
- __ pushl(rcx); // save "this"
+ __ push(rcx); // save "this"
#endif
- __ movl(rcx, rax);
- __ shrl(rcx, 8); // isolate vtable identifier.
- __ shll(rcx, LogBytesPerWord);
+ __ mov(rcx, rax);
+ __ shrptr(rcx, 8); // isolate vtable identifier.
+ __ shlptr(rcx, LogBytesPerWord);
Address index(noreg, rcx, Address::times_1);
ExternalAddress vtbl((address)vtbl_list);
__ movptr(rdx, ArrayAddress(vtbl, index)); // get correct vtable address.
#ifdef WIN32
- __ popl(rcx); // restore "this"
+ __ pop(rcx); // restore "this"
#else
- __ movl(rcx, Address(rsp, 4)); // fetch "this"
+ __ movptr(rcx, Address(rsp, BytesPerWord)); // fetch "this"
#endif
- __ movl(Address(rcx, 0), rdx); // update vtable pointer.
+ __ movptr(Address(rcx, 0), rdx); // update vtable pointer.
- __ andl(rax, 0x00ff); // isolate vtable method index
- __ shll(rax, LogBytesPerWord);
- __ addl(rax, rdx); // address of real method pointer.
+ __ andptr(rax, 0x00ff); // isolate vtable method index
+ __ shlptr(rax, LogBytesPerWord);
+ __ addptr(rax, rdx); // address of real method pointer.
__ jmp(Address(rax, 0)); // get real method pointer.
__ flush();
diff --git a/src/cpu/x86/vm/dump_x86_64.cpp b/src/cpu/x86/vm/dump_x86_64.cpp
index 7e80bbeac..3a8b02e64 100644
--- a/src/cpu/x86/vm/dump_x86_64.cpp
+++ b/src/cpu/x86/vm/dump_x86_64.cpp
@@ -90,22 +90,22 @@ void CompactingPermGenGen::generate_vtable_methods(void** vtbl_list,
// are on the stack and the "this" pointer is in c_rarg0. In addition, rax
// was set (above) to the offset of the method in the table.
- __ pushq(c_rarg1); // save & free register
- __ pushq(c_rarg0); // save "this"
- __ movq(c_rarg0, rax);
- __ shrq(c_rarg0, 8); // isolate vtable identifier.
- __ shlq(c_rarg0, LogBytesPerWord);
+ __ push(c_rarg1); // save & free register
+ __ push(c_rarg0); // save "this"
+ __ mov(c_rarg0, rax);
+ __ shrptr(c_rarg0, 8); // isolate vtable identifier.
+ __ shlptr(c_rarg0, LogBytesPerWord);
__ lea(c_rarg1, ExternalAddress((address)vtbl_list)); // ptr to correct vtable list.
- __ addq(c_rarg1, c_rarg0); // ptr to list entry.
- __ movq(c_rarg1, Address(c_rarg1, 0)); // get correct vtable address.
- __ popq(c_rarg0); // restore "this"
- __ movq(Address(c_rarg0, 0), c_rarg1); // update vtable pointer.
-
- __ andq(rax, 0x00ff); // isolate vtable method index
- __ shlq(rax, LogBytesPerWord);
- __ addq(rax, c_rarg1); // address of real method pointer.
- __ popq(c_rarg1); // restore register.
- __ movq(rax, Address(rax, 0)); // get real method pointer.
+ __ addptr(c_rarg1, c_rarg0); // ptr to list entry.
+ __ movptr(c_rarg1, Address(c_rarg1, 0)); // get correct vtable address.
+ __ pop(c_rarg0); // restore "this"
+ __ movptr(Address(c_rarg0, 0), c_rarg1); // update vtable pointer.
+
+ __ andptr(rax, 0x00ff); // isolate vtable method index
+ __ shlptr(rax, LogBytesPerWord);
+ __ addptr(rax, c_rarg1); // address of real method pointer.
+ __ pop(c_rarg1); // restore register.
+ __ movptr(rax, Address(rax, 0)); // get real method pointer.
__ jmp(rax); // jump to the real method.
__ flush();
diff --git a/src/cpu/x86/vm/frame_x86.cpp b/src/cpu/x86/vm/frame_x86.cpp
index 9fe370e85..4df2c039d 100644
--- a/src/cpu/x86/vm/frame_x86.cpp
+++ b/src/cpu/x86/vm/frame_x86.cpp
@@ -217,7 +217,8 @@ bool frame::safe_for_sender(JavaThread *thread) {
void frame::patch_pc(Thread* thread, address pc) {
if (TracePcPatching) {
- tty->print_cr("patch_pc at address 0x%x [0x%x -> 0x%x] ", &((address *)sp())[-1], ((address *)sp())[-1], pc);
+ tty->print_cr("patch_pc at address" INTPTR_FORMAT " [" INTPTR_FORMAT " -> " INTPTR_FORMAT "] ",
+ &((address *)sp())[-1], ((address *)sp())[-1], pc);
}
((address *)sp())[-1] = pc;
_cb = CodeCache::find_blob(pc);
diff --git a/src/cpu/x86/vm/frame_x86.inline.hpp b/src/cpu/x86/vm/frame_x86.inline.hpp
index 85a1944bd..f06b40de3 100644
--- a/src/cpu/x86/vm/frame_x86.inline.hpp
+++ b/src/cpu/x86/vm/frame_x86.inline.hpp
@@ -159,7 +159,7 @@ inline intptr_t** frame::interpreter_frame_locals_addr() const {
inline intptr_t* frame::interpreter_frame_bcx_addr() const {
assert(is_interpreted_frame(), "must be interpreted");
- return (jint*) &(get_interpreterState()->_bcp);
+ return (intptr_t*) &(get_interpreterState()->_bcp);
}
@@ -179,7 +179,7 @@ inline methodOop* frame::interpreter_frame_method_addr() const {
inline intptr_t* frame::interpreter_frame_mdx_addr() const {
assert(is_interpreted_frame(), "must be interpreted");
- return (jint*) &(get_interpreterState()->_mdx);
+ return (intptr_t*) &(get_interpreterState()->_mdx);
}
// top of expression stack
diff --git a/src/cpu/x86/vm/icache_x86.cpp b/src/cpu/x86/vm/icache_x86.cpp
index 6a031f074..f48926228 100644
--- a/src/cpu/x86/vm/icache_x86.cpp
+++ b/src/cpu/x86/vm/icache_x86.cpp
@@ -48,7 +48,7 @@ void ICacheStubGenerator::generate_icache_flush(ICache::flush_icache_stub_t* flu
__ bind(flush_line);
__ clflush(Address(addr, 0));
- __ addq(addr, ICache::line_size);
+ __ addptr(addr, ICache::line_size);
__ decrementl(lines);
__ jcc(Assembler::notZero, flush_line);
@@ -60,7 +60,7 @@ void ICacheStubGenerator::generate_icache_flush(ICache::flush_icache_stub_t* flu
const Address magic(rsp, 3*wordSize);
__ lock(); __ addl(Address(rsp, 0), 0);
#endif // AMD64
- __ movl(rax, magic); // Handshake with caller to make sure it happened!
+ __ movptr(rax, magic); // Handshake with caller to make sure it happened!
__ ret(0);
// Must be set here so StubCodeMark destructor can call the flush stub.
diff --git a/src/cpu/x86/vm/interp_masm_x86_32.cpp b/src/cpu/x86/vm/interp_masm_x86_32.cpp
index 4b174357a..3e6e35eb9 100644
--- a/src/cpu/x86/vm/interp_masm_x86_32.cpp
+++ b/src/cpu/x86/vm/interp_masm_x86_32.cpp
@@ -29,8 +29,8 @@
// Implementation of InterpreterMacroAssembler
#ifdef CC_INTERP
void InterpreterMacroAssembler::get_method(Register reg) {
- movl(reg, Address(rbp, -(sizeof(BytecodeInterpreter) + 2 * wordSize)));
- movl(reg, Address(reg, byte_offset_of(BytecodeInterpreter, _method)));
+ movptr(reg, Address(rbp, -(sizeof(BytecodeInterpreter) + 2 * wordSize)));
+ movptr(reg, Address(reg, byte_offset_of(BytecodeInterpreter, _method)));
}
#endif // CC_INTERP
@@ -53,7 +53,7 @@ void InterpreterMacroAssembler::call_VM_leaf_base(
// when jvm built with ASSERTs.
#ifdef ASSERT
{ Label L;
- cmpl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
+ cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_leaf_base: last_sp != NULL");
bind(L);
@@ -79,7 +79,7 @@ void InterpreterMacroAssembler::call_VM_base(
) {
#ifdef ASSERT
{ Label L;
- cmpl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
+ cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_base: last_sp != NULL");
bind(L);
@@ -132,10 +132,11 @@ void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
const Address val_addr1(rcx, JvmtiThreadState::earlyret_value_offset()
+ in_ByteSize(wordSize));
switch (state) {
- case atos: movl(rax, oop_addr);
- movl(oop_addr, NULL_WORD);
+ case atos: movptr(rax, oop_addr);
+ movptr(oop_addr, (int32_t)NULL_WORD);
verify_oop(rax, state); break;
- case ltos: movl(rdx, val_addr1); // fall through
+ case ltos:
+ movl(rdx, val_addr1); // fall through
case btos: // fall through
case ctos: // fall through
case stos: // fall through
@@ -146,9 +147,9 @@ void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
default : ShouldNotReachHere();
}
// Clean up tos value in the thread object
- movl(tos_addr, (int) ilgl);
- movl(val_addr, NULL_WORD);
- movl(val_addr1, NULL_WORD);
+ movl(tos_addr, (int32_t) ilgl);
+ movptr(val_addr, (int32_t)NULL_WORD);
+ NOT_LP64(movl(val_addr1, (int32_t)NULL_WORD));
}
@@ -156,8 +157,8 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread)
if (JvmtiExport::can_force_early_return()) {
Label L;
Register tmp = java_thread;
- movl(tmp, Address(tmp, JavaThread::jvmti_thread_state_offset()));
- testl(tmp, tmp);
+ movptr(tmp, Address(tmp, JavaThread::jvmti_thread_state_offset()));
+ testptr(tmp, tmp);
jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit;
// Initiate earlyret handling only if it is not already being processed.
@@ -170,7 +171,7 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread)
// Call Interpreter::remove_activation_early_entry() to get the address of the
// same-named entrypoint in the generated interpreter code.
get_thread(java_thread);
- movl(tmp, Address(java_thread, JavaThread::jvmti_thread_state_offset()));
+ movptr(tmp, Address(java_thread, JavaThread::jvmti_thread_state_offset()));
pushl(Address(tmp, JvmtiThreadState::earlyret_tos_offset()));
call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), 1);
jmp(rax);
@@ -183,7 +184,7 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread)
void InterpreterMacroAssembler::get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset) {
assert(bcp_offset >= 0, "bcp is still pointing to start of bytecode");
movl(reg, Address(rsi, bcp_offset));
- bswap(reg);
+ bswapl(reg);
shrl(reg, 16);
}
@@ -192,9 +193,9 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache, Regis
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
assert(cache != index, "must use different registers");
load_unsigned_word(index, Address(rsi, bcp_offset));
- movl(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
+ movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
assert(sizeof(ConstantPoolCacheEntry) == 4*wordSize, "adjust code below");
- shll(index, 2); // convert from field index to ConstantPoolCacheEntry index
+ shlptr(index, 2); // convert from field index to ConstantPoolCacheEntry index
}
@@ -206,10 +207,10 @@ void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache, R
// convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset
shll(tmp, 2 + LogBytesPerWord);
- movl(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
+ movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
// skip past the header
- addl(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
- addl(cache, tmp); // construct pointer to cache entry
+ addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
+ addptr(cache, tmp); // construct pointer to cache entry
}
@@ -232,22 +233,22 @@ void InterpreterMacroAssembler::gen_subtype_check( Register Rsub_klass, Label &o
// if the super-klass is an interface or exceptionally deep in the Java
// hierarchy and we have to scan the secondary superclass list the hard way.
// See if we get an immediate positive hit
- cmpl( rax, Address(Rsub_klass,rcx,Address::times_1) );
+ cmpptr( rax, Address(Rsub_klass,rcx,Address::times_1) );
jcc( Assembler::equal,ok_is_subtype );
// Check for immediate negative hit
cmpl( rcx, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes() );
jcc( Assembler::notEqual, not_subtype );
// Check for self
- cmpl( Rsub_klass, rax );
+ cmpptr( Rsub_klass, rax );
jcc( Assembler::equal, ok_is_subtype );
// Now do a linear scan of the secondary super-klass chain.
- movl( rdi, Address(Rsub_klass, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes()) );
+ movptr( rdi, Address(Rsub_klass, sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes()) );
// EDI holds the objArrayOop of secondary supers.
movl( rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));// Load the array length
// Skip to start of data; also clear Z flag incase ECX is zero
- addl( rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT) );
+ addptr( rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT) );
// Scan ECX words at [EDI] for occurance of EAX
// Set NZ/Z based on last compare
repne_scan();
@@ -255,7 +256,7 @@ void InterpreterMacroAssembler::gen_subtype_check( Register Rsub_klass, Label &o
// Not equal?
jcc( Assembler::notEqual, not_subtype );
// Must be equal but missed in cache. Update cache.
- movl( Address(Rsub_klass, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()), rax );
+ movptr( Address(Rsub_klass, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()), rax );
jmp( ok_is_subtype );
bind(not_subtype);
@@ -276,7 +277,6 @@ void InterpreterMacroAssembler::d2ieee() {
fld_d(Address(rsp, 0));
}
}
-#endif // CC_INTERP
// Java Expression Stack
@@ -284,11 +284,11 @@ void InterpreterMacroAssembler::d2ieee() {
void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t) {
if (TaggedStackInterpreter) {
Label okay;
- cmpl(Address(rsp, wordSize), (int)t);
+ cmpptr(Address(rsp, wordSize), (int32_t)t);
jcc(Assembler::equal, okay);
// Also compare if the stack value is zero, then the tag might
// not have been set coming from deopt.
- cmpl(Address(rsp, 0), 0);
+ cmpptr(Address(rsp, 0), 0);
jcc(Assembler::equal, okay);
stop("Java Expression stack tag value is bad");
bind(okay);
@@ -298,43 +298,43 @@ void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t) {
void InterpreterMacroAssembler::pop_ptr(Register r) {
debug_only(verify_stack_tag(frame::TagReference));
- popl(r);
- if (TaggedStackInterpreter) addl(rsp, 1 * wordSize);
+ pop(r);
+ if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
}
void InterpreterMacroAssembler::pop_ptr(Register r, Register tag) {
- popl(r);
+ pop(r);
// Tag may not be reference for jsr, can be returnAddress
- if (TaggedStackInterpreter) popl(tag);
+ if (TaggedStackInterpreter) pop(tag);
}
void InterpreterMacroAssembler::pop_i(Register r) {
debug_only(verify_stack_tag(frame::TagValue));
- popl(r);
- if (TaggedStackInterpreter) addl(rsp, 1 * wordSize);
+ pop(r);
+ if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
}
void InterpreterMacroAssembler::pop_l(Register lo, Register hi) {
debug_only(verify_stack_tag(frame::TagValue));
- popl(lo);
- if (TaggedStackInterpreter) addl(rsp, 1 * wordSize);
+ pop(lo);
+ if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
debug_only(verify_stack_tag(frame::TagValue));
- popl(hi);
- if (TaggedStackInterpreter) addl(rsp, 1 * wordSize);
+ pop(hi);
+ if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
}
void InterpreterMacroAssembler::pop_f() {
debug_only(verify_stack_tag(frame::TagValue));
fld_s(Address(rsp, 0));
- addl(rsp, 1 * wordSize);
- if (TaggedStackInterpreter) addl(rsp, 1 * wordSize);
+ addptr(rsp, 1 * wordSize);
+ if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
}
void InterpreterMacroAssembler::pop_d() {
// Write double to stack contiguously and load into ST0
pop_dtos_to_rsp();
fld_d(Address(rsp, 0));
- addl(rsp, 2 * wordSize);
+ addptr(rsp, 2 * wordSize);
}
@@ -344,22 +344,22 @@ void InterpreterMacroAssembler::pop_dtos_to_rsp() {
if (TaggedStackInterpreter) {
// Pop double value into scratch registers
debug_only(verify_stack_tag(frame::TagValue));
- popl(rax);
- addl(rsp, 1* wordSize);
+ pop(rax);
+ addptr(rsp, 1* wordSize);
debug_only(verify_stack_tag(frame::TagValue));
- popl(rdx);
- addl(rsp, 1* wordSize);
- pushl(rdx);
- pushl(rax);
+ pop(rdx);
+ addptr(rsp, 1* wordSize);
+ push(rdx);
+ push(rax);
}
}
void InterpreterMacroAssembler::pop_ftos_to_rsp() {
if (TaggedStackInterpreter) {
debug_only(verify_stack_tag(frame::TagValue));
- popl(rax);
- addl(rsp, 1 * wordSize);
- pushl(rax); // ftos is at rsp
+ pop(rax);
+ addptr(rsp, 1 * wordSize);
+ push(rax); // ftos is at rsp
}
}
@@ -380,31 +380,31 @@ void InterpreterMacroAssembler::pop(TosState state) {
}
void InterpreterMacroAssembler::push_ptr(Register r) {
- if (TaggedStackInterpreter) pushl(frame::TagReference);
- pushl(r);
+ if (TaggedStackInterpreter) push(frame::TagReference);
+ push(r);
}
void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
- if (TaggedStackInterpreter) pushl(tag); // tag first
- pushl(r);
+ if (TaggedStackInterpreter) push(tag); // tag first
+ push(r);
}
void InterpreterMacroAssembler::push_i(Register r) {
- if (TaggedStackInterpreter) pushl(frame::TagValue);
- pushl(r);
+ if (TaggedStackInterpreter) push(frame::TagValue);
+ push(r);
}
void InterpreterMacroAssembler::push_l(Register lo, Register hi) {
- if (TaggedStackInterpreter) pushl(frame::TagValue);
- pushl(hi);
- if (TaggedStackInterpreter) pushl(frame::TagValue);
- pushl(lo);
+ if (TaggedStackInterpreter) push(frame::TagValue);
+ push(hi);
+ if (TaggedStackInterpreter) push(frame::TagValue);
+ push(lo);
}
void InterpreterMacroAssembler::push_f() {
- if (TaggedStackInterpreter) pushl(frame::TagValue);
+ if (TaggedStackInterpreter) push(frame::TagValue);
// Do not schedule for no AGI! Never write beyond rsp!
- subl(rsp, 1 * wordSize);
+ subptr(rsp, 1 * wordSize);
fstp_s(Address(rsp, 0));
}
@@ -415,8 +415,8 @@ void InterpreterMacroAssembler::push_d(Register r) {
// high
// tag
// low
- pushl(frame::TagValue);
- subl(rsp, 3 * wordSize);
+ push(frame::TagValue);
+ subptr(rsp, 3 * wordSize);
fstp_d(Address(rsp, 0));
// move high word up to slot n-1
movl(r, Address(rsp, 1*wordSize));
@@ -425,7 +425,7 @@ void InterpreterMacroAssembler::push_d(Register r) {
movl(Address(rsp, 1*wordSize), frame::TagValue);
} else {
// Do not schedule for no AGI! Never write beyond rsp!
- subl(rsp, 2 * wordSize);
+ subptr(rsp, 2 * wordSize);
fstp_d(Address(rsp, 0));
}
}
@@ -447,22 +447,21 @@ void InterpreterMacroAssembler::push(TosState state) {
}
}
-#ifndef CC_INTERP
// Tagged stack helpers for swap and dup
void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val,
Register tag) {
- movl(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));
+ movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));
if (TaggedStackInterpreter) {
- movl(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)));
+ movptr(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)));
}
}
void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
Register tag) {
- movl(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
+ movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
if (TaggedStackInterpreter) {
- movl(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag);
+ movptr(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag);
}
}
@@ -471,10 +470,10 @@ void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) {
if (TaggedStackInterpreter) {
if (tag == frame::TagCategory2) {
- movl(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int)frame::TagValue);
- movl(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int)frame::TagValue);
+ movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)frame::TagValue);
+ movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)frame::TagValue);
} else {
- movl(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int)tag);
+ movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)tag);
}
}
}
@@ -482,13 +481,13 @@ void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) {
void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) {
if (TaggedStackInterpreter) {
if (tag == frame::TagCategory2) {
- movl(Address(rdi, idx, Interpreter::stackElementScale(),
- Interpreter::local_tag_offset_in_bytes(1)), (int)frame::TagValue);
- movl(Address(rdi, idx, Interpreter::stackElementScale(),
- Interpreter::local_tag_offset_in_bytes(0)), (int)frame::TagValue);
+ movptr(Address(rdi, idx, Interpreter::stackElementScale(),
+ Interpreter::local_tag_offset_in_bytes(1)), (int32_t)frame::TagValue);
+ movptr(Address(rdi, idx, Interpreter::stackElementScale(),
+ Interpreter::local_tag_offset_in_bytes(0)), (int32_t)frame::TagValue);
} else {
- movl(Address(rdi, idx, Interpreter::stackElementScale(),
- Interpreter::local_tag_offset_in_bytes(0)), (int)tag);
+ movptr(Address(rdi, idx, Interpreter::stackElementScale(),
+ Interpreter::local_tag_offset_in_bytes(0)), (int32_t)tag);
}
}
}
@@ -496,7 +495,7 @@ void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) {
void InterpreterMacroAssembler::tag_local(Register tag, Register idx) {
if (TaggedStackInterpreter) {
// can only be TagValue or TagReference
- movl(Address(rdi, idx, Interpreter::stackElementScale(),
+ movptr(Address(rdi, idx, Interpreter::stackElementScale(),
Interpreter::local_tag_offset_in_bytes(0)), tag);
}
}
@@ -505,7 +504,7 @@ void InterpreterMacroAssembler::tag_local(Register tag, Register idx) {
void InterpreterMacroAssembler::tag_local(Register tag, int n) {
if (TaggedStackInterpreter) {
// can only be TagValue or TagReference
- movl(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), tag);
+ movptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), tag);
}
}
@@ -516,17 +515,17 @@ void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, int n) {
if (tag == frame::TagCategory2) {
Label nbl;
t = frame::TagValue; // change to what is stored in locals
- cmpl(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int)t);
+ cmpptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)t);
jcc(Assembler::equal, nbl);
stop("Local tag is bad for long/double");
bind(nbl);
}
Label notBad;
- cmpl(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int)t);
+ cmpptr(Address(rdi, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)t);
jcc(Assembler::equal, notBad);
// Also compare if the local value is zero, then the tag might
// not have been set coming from deopt.
- cmpl(Address(rdi, Interpreter::local_offset_in_bytes(n)), 0);
+ cmpptr(Address(rdi, Interpreter::local_offset_in_bytes(n)), 0);
jcc(Assembler::equal, notBad);
stop("Local tag is bad");
bind(notBad);
@@ -539,19 +538,19 @@ void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, Register idx) {
if (tag == frame::TagCategory2) {
Label nbl;
t = frame::TagValue; // change to what is stored in locals
- cmpl(Address(rdi, idx, Interpreter::stackElementScale(),
- Interpreter::local_tag_offset_in_bytes(1)), (int)t);
+ cmpptr(Address(rdi, idx, Interpreter::stackElementScale(),
+ Interpreter::local_tag_offset_in_bytes(1)), (int32_t)t);
jcc(Assembler::equal, nbl);
stop("Local tag is bad for long/double");
bind(nbl);
}
Label notBad;
cmpl(Address(rdi, idx, Interpreter::stackElementScale(),
- Interpreter::local_tag_offset_in_bytes(0)), (int)t);
+ Interpreter::local_tag_offset_in_bytes(0)), (int32_t)t);
jcc(Assembler::equal, notBad);
// Also compare if the local value is zero, then the tag might
// not have been set coming from deopt.
- cmpl(Address(rdi, idx, Interpreter::stackElementScale(),
+ cmpptr(Address(rdi, idx, Interpreter::stackElementScale(),
Interpreter::local_offset_in_bytes(0)), 0);
jcc(Assembler::equal, notBad);
stop("Local tag is bad");
@@ -567,22 +566,22 @@ void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point) {
void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1) {
- pushl(arg_1);
+ push(arg_1);
MacroAssembler::call_VM_leaf_base(entry_point, 1);
}
void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2) {
- pushl(arg_2);
- pushl(arg_1);
+ push(arg_2);
+ push(arg_1);
MacroAssembler::call_VM_leaf_base(entry_point, 2);
}
void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register arg_1, Register arg_2, Register arg_3) {
- pushl(arg_3);
- pushl(arg_2);
- pushl(arg_1);
+ push(arg_3);
+ push(arg_2);
+ push(arg_1);
MacroAssembler::call_VM_leaf_base(entry_point, 3);
}
@@ -591,9 +590,9 @@ void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point, Register
// in this thread in which case we must call the i2i entry
void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) {
// set sender sp
- leal(rsi, Address(rsp, wordSize));
+ lea(rsi, Address(rsp, wordSize));
// record last_sp
- movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), rsi);
+ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), rsi);
if (JvmtiExport::can_post_interpreter_events()) {
Label run_compiled_code;
@@ -629,16 +628,16 @@ void InterpreterMacroAssembler::dispatch_base(TosState state, address* table,
verify_FPU(1, state);
if (VerifyActivationFrameSize) {
Label L;
- movl(rcx, rbp);
- subl(rcx, rsp);
+ mov(rcx, rbp);
+ subptr(rcx, rsp);
int min_frame_size = (frame::link_offset - frame::interpreter_frame_initial_sp_offset) * wordSize;
- cmpl(rcx, min_frame_size);
+ cmpptr(rcx, min_frame_size);
jcc(Assembler::greaterEqual, L);
stop("broken stack frame");
bind(L);
}
if (verifyoop) verify_oop(rax, state);
- Address index(noreg, rbx, Address::times_4);
+ Address index(noreg, rbx, Address::times_ptr);
ExternalAddress tbl((address)table);
ArrayAddress dispatch(tbl, index);
jump(dispatch);
@@ -700,10 +699,10 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
in_bytes(JavaThread::do_not_unlock_if_synchronized_offset()));
movbool(rbx, do_not_unlock_if_synchronized);
- movl(rdi,rbx);
+ mov(rdi,rbx);
movbool(do_not_unlock_if_synchronized, false); // reset the flag
- movl(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); // get method access flags
+ movptr(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); // get method access flags
movl(rcx, Address(rbx, methodOopDesc::access_flags_offset()));
testl(rcx, JVM_ACC_SYNCHRONIZED);
@@ -711,7 +710,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
// Don't unlock anything if the _do_not_unlock_if_synchronized flag
// is set.
- movl(rcx,rdi);
+ mov(rcx,rdi);
testbool(rcx);
jcc(Assembler::notZero, no_unlock);
@@ -721,11 +720,11 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
// BasicObjectLock will be first in list, since this is a synchronized method. However, need
// to check that the object has not been unlocked by an explicit monitorexit bytecode.
const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
- leal (rdx, monitor); // address of first monitor
+ lea (rdx, monitor); // address of first monitor
- movl (rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes()));
- testl (rax, rax);
- jcc (Assembler::notZero, unlock);
+ movptr (rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes()));
+ testptr(rax, rax);
+ jcc (Assembler::notZero, unlock);
pop(state);
if (throw_monitor_exception) {
@@ -762,8 +761,8 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
const Address monitor_block_bot(rbp, frame::interpreter_frame_initial_sp_offset * wordSize);
bind(restart);
- movl(rcx, monitor_block_top); // points to current entry, starting with top-most entry
- leal(rbx, monitor_block_bot); // points to word before bottom of monitor block
+ movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry
+ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
jmp(entry);
// Entry already locked, need to throw exception
@@ -780,7 +779,7 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
// Unlock does not block, so don't have to worry about the frame
push(state);
- movl(rdx, rcx);
+ mov(rdx, rcx);
unlock_object(rdx);
pop(state);
@@ -793,12 +792,12 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
}
bind(loop);
- cmpl(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD); // check if current entry is used
+ cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used
jcc(Assembler::notEqual, exception);
- addl(rcx, entry_size); // otherwise advance to next entry
+ addptr(rcx, entry_size); // otherwise advance to next entry
bind(entry);
- cmpl(rcx, rbx); // check if bottom reached
+ cmpptr(rcx, rbx); // check if bottom reached
jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
}
@@ -812,22 +811,22 @@ void InterpreterMacroAssembler::remove_activation(TosState state, Register ret_a
}
// remove activation
- movl(rbx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
+ movptr(rbx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
leave(); // remove frame anchor
- popl(ret_addr); // get return address
- movl(rsp, rbx); // set sp to sender sp
+ pop(ret_addr); // get return address
+ mov(rsp, rbx); // set sp to sender sp
if (UseSSE) {
// float and double are returned in xmm register in SSE-mode
if (state == ftos && UseSSE >= 1) {
- subl(rsp, wordSize);
+ subptr(rsp, wordSize);
fstp_s(Address(rsp, 0));
movflt(xmm0, Address(rsp, 0));
- addl(rsp, wordSize);
+ addptr(rsp, wordSize);
} else if (state == dtos && UseSSE >= 2) {
- subl(rsp, 2*wordSize);
+ subptr(rsp, 2*wordSize);
fstp_d(Address(rsp, 0));
movdbl(xmm0, Address(rsp, 0));
- addl(rsp, 2*wordSize);
+ addptr(rsp, 2*wordSize);
}
}
}
@@ -858,7 +857,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
Label slow_case;
// Load object pointer into obj_reg %rcx
- movl(obj_reg, Address(lock_reg, obj_offset));
+ movptr(obj_reg, Address(lock_reg, obj_offset));
if (UseBiasedLocking) {
// Note: we use noreg for the temporary register since it's hard
@@ -867,19 +866,19 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
}
// Load immediate 1 into swap_reg %rax,
- movl(swap_reg, 1);
+ movptr(swap_reg, (int32_t)1);
// Load (object->mark() | 1) into swap_reg %rax,
- orl(swap_reg, Address(obj_reg, 0));
+ orptr(swap_reg, Address(obj_reg, 0));
// Save (object->mark() | 1) into BasicLock's displaced header
- movl(Address(lock_reg, mark_offset), swap_reg);
+ movptr(Address(lock_reg, mark_offset), swap_reg);
assert(lock_offset == 0, "displached header must be first word in BasicObjectLock");
if (os::is_MP()) {
lock();
}
- cmpxchg(lock_reg, Address(obj_reg, 0));
+ cmpxchgptr(lock_reg, Address(obj_reg, 0));
if (PrintBiasedLockingStatistics) {
cond_inc32(Assembler::zero,
ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
@@ -895,11 +894,11 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
// assuming both stack pointer and pagesize have their
// least significant 2 bits clear.
// NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
- subl(swap_reg, rsp);
- andl(swap_reg, 3 - os::vm_page_size());
+ subptr(swap_reg, rsp);
+ andptr(swap_reg, 3 - os::vm_page_size());
// Save the test result, for recursive case, the result is zero
- movl(Address(lock_reg, mark_offset), swap_reg);
+ movptr(Address(lock_reg, mark_offset), swap_reg);
if (PrintBiasedLockingStatistics) {
cond_inc32(Assembler::zero,
@@ -939,36 +938,36 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
// Convert from BasicObjectLock structure to object and BasicLock structure
// Store the BasicLock address into %rax,
- leal(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
+ lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
// Load oop into obj_reg(%rcx)
- movl(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes ()));
+ movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes ()));
// Free entry
- movl(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD);
+ movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);
if (UseBiasedLocking) {
biased_locking_exit(obj_reg, header_reg, done);
}
// Load the old header from BasicLock structure
- movl(header_reg, Address(swap_reg, BasicLock::displaced_header_offset_in_bytes()));
+ movptr(header_reg, Address(swap_reg, BasicLock::displaced_header_offset_in_bytes()));
// Test for recursion
- testl(header_reg, header_reg);
+ testptr(header_reg, header_reg);
// zero for recursive case
jcc(Assembler::zero, done);
// Atomic swap back the old header
if (os::is_MP()) lock();
- cmpxchg(header_reg, Address(obj_reg, 0));
+ cmpxchgptr(header_reg, Address(obj_reg, 0));
// zero for recursive case
jcc(Assembler::zero, done);
// Call the runtime routine for slow case.
- movl(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), obj_reg); // restore obj
+ movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), obj_reg); // restore obj
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit), lock_reg);
bind(done);
@@ -983,8 +982,8 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
// Test ImethodDataPtr. If it is null, continue at the specified label
void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, Label& zero_continue) {
assert(ProfileInterpreter, "must be profiling interpreter");
- movl(mdp, Address(rbp, frame::interpreter_frame_mdx_offset * wordSize));
- testl(mdp, mdp);
+ movptr(mdp, Address(rbp, frame::interpreter_frame_mdx_offset * wordSize));
+ testptr(mdp, mdp);
jcc(Assembler::zero, zero_continue);
}
@@ -993,13 +992,13 @@ void InterpreterMacroAssembler::test_method_data_pointer(Register mdp, Label& ze
void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
assert(ProfileInterpreter, "must be profiling interpreter");
Label zero_continue;
- pushl(rax);
- pushl(rbx);
+ push(rax);
+ push(rbx);
get_method(rbx);
// Test MDO to avoid the call if it is NULL.
- movl(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
- testl(rax, rax);
+ movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
+ testptr(rax, rax);
jcc(Assembler::zero, zero_continue);
// rbx,: method
@@ -1007,53 +1006,55 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, rsi);
// rax,: mdi
- movl(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
- testl(rbx, rbx);
+ movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
+ testptr(rbx, rbx);
jcc(Assembler::zero, zero_continue);
- addl(rbx, in_bytes(methodDataOopDesc::data_offset()));
- addl(rbx, rax);
- movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx);
+ addptr(rbx, in_bytes(methodDataOopDesc::data_offset()));
+ addptr(rbx, rax);
+ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx);
bind(zero_continue);
- popl(rbx);
- popl(rax);
+ pop(rbx);
+ pop(rax);
}
void InterpreterMacroAssembler::verify_method_data_pointer() {
assert(ProfileInterpreter, "must be profiling interpreter");
#ifdef ASSERT
Label verify_continue;
- pushl(rax);
- pushl(rbx);
- pushl(rcx);
- pushl(rdx);
+ push(rax);
+ push(rbx);
+ push(rcx);
+ push(rdx);
test_method_data_pointer(rcx, verify_continue); // If mdp is zero, continue
get_method(rbx);
// If the mdp is valid, it will point to a DataLayout header which is
// consistent with the bcp. The converse is highly probable also.
load_unsigned_word(rdx, Address(rcx, in_bytes(DataLayout::bci_offset())));
- addl(rdx, Address(rbx, methodOopDesc::const_offset()));
- leal(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
- cmpl(rdx, rsi);
+ addptr(rdx, Address(rbx, methodOopDesc::const_offset()));
+ lea(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
+ cmpptr(rdx, rsi);
jcc(Assembler::equal, verify_continue);
// rbx,: method
// rsi: bcp
// rcx: mdp
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp), rbx, rsi, rcx);
bind(verify_continue);
- popl(rdx);
- popl(rcx);
- popl(rbx);
- popl(rax);
+ pop(rdx);
+ pop(rcx);
+ pop(rbx);
+ pop(rax);
#endif // ASSERT
}
void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in, int constant, Register value) {
+ // %%% this seems to be used to store counter data which is surely 32bits
+ // however 64bit side stores 64 bits which seems wrong
assert(ProfileInterpreter, "must be profiling interpreter");
Address data(mdp_in, constant);
- movl(data, value);
+ movptr(data, value);
}
@@ -1073,6 +1074,7 @@ void InterpreterMacroAssembler::increment_mdp_data_at(Address data,
assert( DataLayout::counter_increment==1, "flow-free idiom only works with 1" );
assert(ProfileInterpreter, "must be profiling interpreter");
+ // %%% 64bit treats this as 64 bit which seems unlikely
if (decrement) {
// Decrement the register. Set condition codes.
addl(data, -DataLayout::counter_increment);
@@ -1119,11 +1121,11 @@ void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,
Label& not_equal_continue) {
assert(ProfileInterpreter, "must be profiling interpreter");
if (test_value_out == noreg) {
- cmpl(value, Address(mdp_in, offset));
+ cmpptr(value, Address(mdp_in, offset));
} else {
// Put the test value into a register, so caller can use it:
- movl(test_value_out, Address(mdp_in, offset));
- cmpl(test_value_out, value);
+ movptr(test_value_out, Address(mdp_in, offset));
+ cmpptr(test_value_out, value);
}
jcc(Assembler::notEqual, not_equal_continue);
}
@@ -1132,31 +1134,31 @@ void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,
void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, int offset_of_disp) {
assert(ProfileInterpreter, "must be profiling interpreter");
Address disp_address(mdp_in, offset_of_disp);
- addl(mdp_in,disp_address);
- movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
+ addptr(mdp_in,disp_address);
+ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
}
void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in, Register reg, int offset_of_disp) {
assert(ProfileInterpreter, "must be profiling interpreter");
Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp);
- addl(mdp_in, disp_address);
- movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
+ addptr(mdp_in, disp_address);
+ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
}
void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in, int constant) {
assert(ProfileInterpreter, "must be profiling interpreter");
- addl(mdp_in, constant);
- movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
+ addptr(mdp_in, constant);
+ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
}
void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
assert(ProfileInterpreter, "must be profiling interpreter");
- pushl(return_bci); // save/restore across call_VM
+ push(return_bci); // save/restore across call_VM
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret), return_bci);
- popl(return_bci);
+ pop(return_bci);
}
@@ -1172,6 +1174,8 @@ void InterpreterMacroAssembler::profile_taken_branch(Register mdp, Register bump
// We inline increment_mdp_data_at to return bumped_count in a register
//increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
Address data(mdp, in_bytes(JumpData::taken_offset()));
+
+ // %%% 64bit treats these cells as 64 bit but they seem to be 32 bit
movl(bumped_count,data);
assert( DataLayout::counter_increment==1, "flow-free idiom only works with 1" );
addl(bumped_count, DataLayout::counter_increment);
@@ -1289,7 +1293,7 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
if (row == start_row) {
// Failed the equality check on receiver[n]... Test for null.
- testl(reg2, reg2);
+ testptr(reg2, reg2);
if (start_row == last_row) {
// The only thing left to do is handle the null case.
jcc(Assembler::notZero, done);
@@ -1315,7 +1319,7 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
int recvr_offset = in_bytes(VirtualCallData::receiver_offset(start_row));
set_mdp_data_at(mdp, recvr_offset, receiver);
int count_offset = in_bytes(VirtualCallData::receiver_count_offset(start_row));
- movl(reg2, DataLayout::counter_increment);
+ movptr(reg2, (int32_t)DataLayout::counter_increment);
set_mdp_data_at(mdp, count_offset, reg2);
jmp(done);
}
@@ -1454,9 +1458,11 @@ void InterpreterMacroAssembler::profile_switch_case(Register index, Register mdp
test_method_data_pointer(mdp, profile_continue);
// Build the base (index * per_case_size_in_bytes()) + case_array_offset_in_bytes()
- movl(reg2, in_bytes(MultiBranchData::per_case_size()));
- imull(index, reg2);
- addl(index, in_bytes(MultiBranchData::case_array_offset()));
+ movptr(reg2, (int32_t)in_bytes(MultiBranchData::per_case_size()));
+ // index is positive and so should have correct value if this code were
+ // used on 64bits
+ imulptr(index, reg2);
+ addptr(index, in_bytes(MultiBranchData::case_array_offset()));
// Update the case count
increment_mdp_data_at(mdp, index, in_bytes(MultiBranchData::relative_count_offset()));
@@ -1535,12 +1541,12 @@ void InterpreterMacroAssembler::notify_method_exit(
{
SkipIfEqual skip_if(this, &DTraceMethodProbes, 0);
- push(state);
+ NOT_CC_INTERP(push(state));
get_thread(rbx);
get_method(rcx);
call_VM_leaf(
CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
rbx, rcx);
- pop(state);
+ NOT_CC_INTERP(pop(state));
}
}
diff --git a/src/cpu/x86/vm/interp_masm_x86_32.hpp b/src/cpu/x86/vm/interp_masm_x86_32.hpp
index 10cf8140a..be3dee662 100644
--- a/src/cpu/x86/vm/interp_masm_x86_32.hpp
+++ b/src/cpu/x86/vm/interp_masm_x86_32.hpp
@@ -65,15 +65,15 @@ class InterpreterMacroAssembler: public MacroAssembler {
#else
- void save_bcp() { movl(Address(rbp, frame::interpreter_frame_bcx_offset * wordSize), rsi); }
- void restore_bcp() { movl(rsi, Address(rbp, frame::interpreter_frame_bcx_offset * wordSize)); }
- void restore_locals() { movl(rdi, Address(rbp, frame::interpreter_frame_locals_offset * wordSize)); }
+ void save_bcp() { movptr(Address(rbp, frame::interpreter_frame_bcx_offset * wordSize), rsi); }
+ void restore_bcp() { movptr(rsi, Address(rbp, frame::interpreter_frame_bcx_offset * wordSize)); }
+ void restore_locals() { movptr(rdi, Address(rbp, frame::interpreter_frame_locals_offset * wordSize)); }
// Helpers for runtime call arguments/results
- void get_method(Register reg) { movl(reg, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); }
- void get_constant_pool(Register reg) { get_method(reg); movl(reg, Address(reg, methodOopDesc::constants_offset())); }
- void get_constant_pool_cache(Register reg) { get_constant_pool(reg); movl(reg, Address(reg, constantPoolOopDesc::cache_offset_in_bytes())); }
- void get_cpool_and_tags(Register cpool, Register tags) { get_constant_pool(cpool); movl(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes()));
+ void get_method(Register reg) { movptr(reg, Address(rbp, frame::interpreter_frame_method_offset * wordSize)); }
+ void get_constant_pool(Register reg) { get_method(reg); movptr(reg, Address(reg, methodOopDesc::constants_offset())); }
+ void get_constant_pool_cache(Register reg) { get_constant_pool(reg); movptr(reg, Address(reg, constantPoolOopDesc::cache_offset_in_bytes())); }
+ void get_cpool_and_tags(Register cpool, Register tags) { get_constant_pool(cpool); movptr(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes()));
}
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
void get_cache_and_index_at_bcp(Register cache, Register index, int bcp_offset);
@@ -82,8 +82,6 @@ class InterpreterMacroAssembler: public MacroAssembler {
// Expression stack
void f2ieee(); // truncate ftos to 32bits
void d2ieee(); // truncate dtos to 64bits
-#endif // CC_INTERP
-
void pop_ptr(Register r = rax);
void pop_ptr(Register r, Register tag);
@@ -104,14 +102,25 @@ class InterpreterMacroAssembler: public MacroAssembler {
void pop(TosState state); // transition vtos -> state
void push(TosState state); // transition state -> vtos
+ void pop(Register r ) { ((MacroAssembler*)this)->pop(r); }
+
+ void push(Register r ) { ((MacroAssembler*)this)->push(r); }
+ void push(int32_t imm ) { ((MacroAssembler*)this)->push(imm); }
+
+ // These are dummies to prevent surprise implicit conversions to Register
+ void pop(void* v ); // Add unimplemented ambiguous method
+ void push(void* v ); // Add unimplemented ambiguous method
+
DEBUG_ONLY(void verify_stack_tag(frame::Tag t);)
+#endif // CC_INTERP
+
#ifndef CC_INTERP
void empty_expression_stack() {
- movl(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
+ movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
// NULL last_sp until next java call
- movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
+ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
}
// Tagged stack helpers for swap and dup
diff --git a/src/cpu/x86/vm/interp_masm_x86_64.cpp b/src/cpu/x86/vm/interp_masm_x86_64.cpp
index 4756a2587..75ca463ac 100644
--- a/src/cpu/x86/vm/interp_masm_x86_64.cpp
+++ b/src/cpu/x86/vm/interp_masm_x86_64.cpp
@@ -28,6 +28,15 @@
// Implementation of InterpreterMacroAssembler
+#ifdef CC_INTERP
+void InterpreterMacroAssembler::get_method(Register reg) {
+ movptr(reg, Address(rbp, -(sizeof(BytecodeInterpreter) + 2 * wordSize)));
+ movptr(reg, Address(reg, byte_offset_of(BytecodeInterpreter, _method)));
+}
+#endif // CC_INTERP
+
+#ifndef CC_INTERP
+
void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
int number_of_arguments) {
// interpreter specific
@@ -39,7 +48,7 @@ void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
save_bcp();
{
Label L;
- cmpq(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int)NULL_WORD);
+ cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_leaf_base:"
" last_sp != NULL");
@@ -52,7 +61,7 @@ void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
#ifdef ASSERT
{
Label L;
- cmpq(r13, Address(rbp, frame::interpreter_frame_bcx_offset * wordSize));
+ cmpptr(r13, Address(rbp, frame::interpreter_frame_bcx_offset * wordSize));
jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_leaf_base:"
" r13 not callee saved?");
@@ -60,7 +69,7 @@ void InterpreterMacroAssembler::call_VM_leaf_base(address entry_point,
}
{
Label L;
- cmpq(r14, Address(rbp, frame::interpreter_frame_locals_offset * wordSize));
+ cmpptr(r14, Address(rbp, frame::interpreter_frame_locals_offset * wordSize));
jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_leaf_base:"
" r14 not callee saved?");
@@ -86,7 +95,7 @@ void InterpreterMacroAssembler::call_VM_base(Register oop_result,
#ifdef ASSERT
{
Label L;
- cmpq(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int)NULL_WORD);
+ cmpptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
jcc(Assembler::equal, L);
stop("InterpreterMacroAssembler::call_VM_leaf_base:"
" last_sp != NULL");
@@ -127,15 +136,15 @@ void InterpreterMacroAssembler::check_and_handle_popframe(Register java_thread)
void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
- movq(rcx, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
+ movptr(rcx, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
const Address tos_addr(rcx, JvmtiThreadState::earlyret_tos_offset());
const Address oop_addr(rcx, JvmtiThreadState::earlyret_oop_offset());
const Address val_addr(rcx, JvmtiThreadState::earlyret_value_offset());
switch (state) {
- case atos: movq(rax, oop_addr);
- movptr(oop_addr, NULL_WORD);
+ case atos: movptr(rax, oop_addr);
+ movptr(oop_addr, (int32_t)NULL_WORD);
verify_oop(rax, state); break;
- case ltos: movq(rax, val_addr); break;
+ case ltos: movptr(rax, val_addr); break;
case btos: // fall through
case ctos: // fall through
case stos: // fall through
@@ -147,15 +156,15 @@ void InterpreterMacroAssembler::load_earlyret_value(TosState state) {
}
// Clean up tos value in the thread object
movl(tos_addr, (int) ilgl);
- movl(val_addr, (int) NULL_WORD);
+ movl(val_addr, (int32_t) NULL_WORD);
}
void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread) {
if (JvmtiExport::can_force_early_return()) {
Label L;
- movq(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
- testq(c_rarg0, c_rarg0);
+ movptr(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
+ testptr(c_rarg0, c_rarg0);
jcc(Assembler::zero, L); // if (thread->jvmti_thread_state() == NULL) exit;
// Initiate earlyret handling only if it is not already being processed.
@@ -167,7 +176,7 @@ void InterpreterMacroAssembler::check_and_handle_earlyret(Register java_thread)
// Call Interpreter::remove_activation_early_entry() to get the address of the
// same-named entrypoint in the generated interpreter code.
- movq(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
+ movptr(c_rarg0, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
movl(c_rarg0, Address(c_rarg0, JvmtiThreadState::earlyret_tos_offset()));
call_VM_leaf(CAST_FROM_FN_PTR(address, Interpreter::remove_activation_early_entry), c_rarg0);
jmp(rax);
@@ -192,7 +201,7 @@ void InterpreterMacroAssembler::get_cache_and_index_at_bcp(Register cache,
assert(bcp_offset > 0, "bcp is still pointing to start of bytecode");
assert(cache != index, "must use different registers");
load_unsigned_word(index, Address(r13, bcp_offset));
- movq(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
+ movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
assert(sizeof(ConstantPoolCacheEntry) == 4 * wordSize, "adjust code below");
// convert from field index to ConstantPoolCacheEntry index
shll(index, 2);
@@ -209,10 +218,10 @@ void InterpreterMacroAssembler::get_cache_entry_pointer_at_bcp(Register cache,
// convert from field index to ConstantPoolCacheEntry index
// and from word offset to byte offset
shll(tmp, 2 + LogBytesPerWord);
- movq(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
+ movptr(cache, Address(rbp, frame::interpreter_frame_cache_offset * wordSize));
// skip past the header
- addq(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
- addq(cache, tmp); // construct pointer to cache entry
+ addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
+ addptr(cache, tmp); // construct pointer to cache entry
}
@@ -247,24 +256,24 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
// exceptionally deep in the Java hierarchy and we have to scan the
// secondary superclass list the hard way. See if we get an
// immediate positive hit
- cmpq(rax, Address(Rsub_klass, rcx, Address::times_1));
+ cmpptr(rax, Address(Rsub_klass, rcx, Address::times_1));
jcc(Assembler::equal,ok_is_subtype);
// Check for immediate negative hit
cmpl(rcx, sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes());
jcc( Assembler::notEqual, not_subtype );
// Check for self
- cmpq(Rsub_klass, rax);
+ cmpptr(Rsub_klass, rax);
jcc(Assembler::equal, ok_is_subtype);
// Now do a linear scan of the secondary super-klass chain.
- movq(rdi, Address(Rsub_klass, sizeof(oopDesc) +
- Klass::secondary_supers_offset_in_bytes()));
+ movptr(rdi, Address(Rsub_klass, sizeof(oopDesc) +
+ Klass::secondary_supers_offset_in_bytes()));
// rdi holds the objArrayOop of secondary supers.
// Load the array length
movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));
// Skip to start of data; also clear Z flag incase rcx is zero
- addq(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
+ addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
// Scan rcx words at [rdi] for occurance of rax
// Set NZ/Z based on last compare
@@ -272,30 +281,31 @@ void InterpreterMacroAssembler::gen_subtype_check(Register Rsub_klass,
// and we store values in objArrays always encoded, thus we need to encode value
// before repne
if (UseCompressedOops) {
- pushq(rax);
+ push(rax);
encode_heap_oop(rax);
repne_scanl();
// Not equal?
jcc(Assembler::notEqual, not_subtype_pop);
// restore heap oop here for movq
- popq(rax);
+ pop(rax);
} else {
- repne_scanq();
+ repne_scan();
jcc(Assembler::notEqual, not_subtype);
}
// Must be equal but missed in cache. Update cache.
- movq(Address(Rsub_klass, sizeof(oopDesc) +
+ movptr(Address(Rsub_klass, sizeof(oopDesc) +
Klass::secondary_super_cache_offset_in_bytes()), rax);
jmp(ok_is_subtype);
bind(not_subtype_pop);
// restore heap oop here for miss
- if (UseCompressedOops) popq(rax);
+ if (UseCompressedOops) pop(rax);
bind(not_subtype);
profile_typecheck_failed(rcx); // blows rcx
}
+
// Java Expression Stack
#ifdef ASSERT
@@ -307,17 +317,17 @@ void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t) {
if (t == frame::TagCategory2) {
tag = frame::TagValue;
Label hokay;
- cmpq(Address(rsp, 3*wordSize), (int)tag);
+ cmpptr(Address(rsp, 3*wordSize), (int32_t)tag);
jcc(Assembler::equal, hokay);
stop("Java Expression stack tag high value is bad");
bind(hokay);
}
Label okay;
- cmpq(Address(rsp, wordSize), (int)tag);
+ cmpptr(Address(rsp, wordSize), (int32_t)tag);
jcc(Assembler::equal, okay);
// Also compare if the stack value is zero, then the tag might
// not have been set coming from deopt.
- cmpq(Address(rsp, 0), 0);
+ cmpptr(Address(rsp, 0), 0);
jcc(Assembler::equal, okay);
stop("Java Expression stack tag value is bad");
bind(okay);
@@ -327,83 +337,83 @@ void InterpreterMacroAssembler::verify_stack_tag(frame::Tag t) {
void InterpreterMacroAssembler::pop_ptr(Register r) {
debug_only(verify_stack_tag(frame::TagReference));
- popq(r);
- if (TaggedStackInterpreter) addq(rsp, 1 * wordSize);
+ pop(r);
+ if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
}
void InterpreterMacroAssembler::pop_ptr(Register r, Register tag) {
- popq(r);
- if (TaggedStackInterpreter) popq(tag);
+ pop(r);
+ if (TaggedStackInterpreter) pop(tag);
}
void InterpreterMacroAssembler::pop_i(Register r) {
- // XXX can't use popq currently, upper half non clean
+ // XXX can't use pop currently, upper half non clean
debug_only(verify_stack_tag(frame::TagValue));
movl(r, Address(rsp, 0));
- addq(rsp, wordSize);
- if (TaggedStackInterpreter) addq(rsp, 1 * wordSize);
+ addptr(rsp, wordSize);
+ if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
}
void InterpreterMacroAssembler::pop_l(Register r) {
debug_only(verify_stack_tag(frame::TagCategory2));
movq(r, Address(rsp, 0));
- addq(rsp, 2 * Interpreter::stackElementSize());
+ addptr(rsp, 2 * Interpreter::stackElementSize());
}
void InterpreterMacroAssembler::pop_f(XMMRegister r) {
debug_only(verify_stack_tag(frame::TagValue));
movflt(r, Address(rsp, 0));
- addq(rsp, wordSize);
- if (TaggedStackInterpreter) addq(rsp, 1 * wordSize);
+ addptr(rsp, wordSize);
+ if (TaggedStackInterpreter) addptr(rsp, 1 * wordSize);
}
void InterpreterMacroAssembler::pop_d(XMMRegister r) {
debug_only(verify_stack_tag(frame::TagCategory2));
movdbl(r, Address(rsp, 0));
- addq(rsp, 2 * Interpreter::stackElementSize());
+ addptr(rsp, 2 * Interpreter::stackElementSize());
}
void InterpreterMacroAssembler::push_ptr(Register r) {
- if (TaggedStackInterpreter) pushq(frame::TagReference);
- pushq(r);
+ if (TaggedStackInterpreter) push(frame::TagReference);
+ push(r);
}
void InterpreterMacroAssembler::push_ptr(Register r, Register tag) {
- if (TaggedStackInterpreter) pushq(tag);
- pushq(r);
+ if (TaggedStackInterpreter) push(tag);
+ push(r);
}
void InterpreterMacroAssembler::push_i(Register r) {
- if (TaggedStackInterpreter) pushq(frame::TagValue);
- pushq(r);
+ if (TaggedStackInterpreter) push(frame::TagValue);
+ push(r);
}
void InterpreterMacroAssembler::push_l(Register r) {
if (TaggedStackInterpreter) {
- pushq(frame::TagValue);
- subq(rsp, 1 * wordSize);
- pushq(frame::TagValue);
- subq(rsp, 1 * wordSize);
+ push(frame::TagValue);
+ subptr(rsp, 1 * wordSize);
+ push(frame::TagValue);
+ subptr(rsp, 1 * wordSize);
} else {
- subq(rsp, 2 * wordSize);
+ subptr(rsp, 2 * wordSize);
}
movq(Address(rsp, 0), r);
}
void InterpreterMacroAssembler::push_f(XMMRegister r) {
- if (TaggedStackInterpreter) pushq(frame::TagValue);
- subq(rsp, wordSize);
+ if (TaggedStackInterpreter) push(frame::TagValue);
+ subptr(rsp, wordSize);
movflt(Address(rsp, 0), r);
}
void InterpreterMacroAssembler::push_d(XMMRegister r) {
if (TaggedStackInterpreter) {
- pushq(frame::TagValue);
- subq(rsp, 1 * wordSize);
- pushq(frame::TagValue);
- subq(rsp, 1 * wordSize);
+ push(frame::TagValue);
+ subptr(rsp, 1 * wordSize);
+ push(frame::TagValue);
+ subptr(rsp, 1 * wordSize);
} else {
- subq(rsp, 2 * wordSize);
+ subptr(rsp, 2 * wordSize);
}
movdbl(Address(rsp, 0), r);
}
@@ -441,20 +451,22 @@ void InterpreterMacroAssembler::push(TosState state) {
}
+
+
// Tagged stack helpers for swap and dup
void InterpreterMacroAssembler::load_ptr_and_tag(int n, Register val,
Register tag) {
- movq(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));
+ movptr(val, Address(rsp, Interpreter::expr_offset_in_bytes(n)));
if (TaggedStackInterpreter) {
- movq(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)));
+ movptr(tag, Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)));
}
}
void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
Register tag) {
- movq(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
+ movptr(Address(rsp, Interpreter::expr_offset_in_bytes(n)), val);
if (TaggedStackInterpreter) {
- movq(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag);
+ movptr(Address(rsp, Interpreter::expr_tag_offset_in_bytes(n)), tag);
}
}
@@ -463,12 +475,12 @@ void InterpreterMacroAssembler::store_ptr_and_tag(int n, Register val,
void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) {
if (TaggedStackInterpreter) {
if (tag == frame::TagCategory2) {
- mov64(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)),
- (intptr_t)frame::TagValue);
- mov64(Address(r14, Interpreter::local_tag_offset_in_bytes(n)),
- (intptr_t)frame::TagValue);
+ movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)),
+ (int32_t)frame::TagValue);
+ movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)),
+ (int32_t)frame::TagValue);
} else {
- mov64(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (intptr_t)tag);
+ movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)tag);
}
}
}
@@ -476,13 +488,13 @@ void InterpreterMacroAssembler::tag_local(frame::Tag tag, int n) {
void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) {
if (TaggedStackInterpreter) {
if (tag == frame::TagCategory2) {
- mov64(Address(r14, idx, Address::times_8,
- Interpreter::local_tag_offset_in_bytes(1)), (intptr_t)frame::TagValue);
- mov64(Address(r14, idx, Address::times_8,
- Interpreter::local_tag_offset_in_bytes(0)), (intptr_t)frame::TagValue);
+ movptr(Address(r14, idx, Address::times_8,
+ Interpreter::local_tag_offset_in_bytes(1)), (int32_t)frame::TagValue);
+ movptr(Address(r14, idx, Address::times_8,
+ Interpreter::local_tag_offset_in_bytes(0)), (int32_t)frame::TagValue);
} else {
- mov64(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)),
- (intptr_t)tag);
+ movptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)),
+ (int32_t)tag);
}
}
}
@@ -490,7 +502,7 @@ void InterpreterMacroAssembler::tag_local(frame::Tag tag, Register idx) {
void InterpreterMacroAssembler::tag_local(Register tag, Register idx) {
if (TaggedStackInterpreter) {
// can only be TagValue or TagReference
- movq(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), tag);
+ movptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), tag);
}
}
@@ -498,7 +510,7 @@ void InterpreterMacroAssembler::tag_local(Register tag, Register idx) {
void InterpreterMacroAssembler::tag_local(Register tag, int n) {
if (TaggedStackInterpreter) {
// can only be TagValue or TagReference
- movq(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), tag);
+ movptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), tag);
}
}
@@ -509,17 +521,17 @@ void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, int n) {
if (tag == frame::TagCategory2) {
Label nbl;
t = frame::TagValue; // change to what is stored in locals
- cmpq(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)), (int)t);
+ cmpptr(Address(r14, Interpreter::local_tag_offset_in_bytes(n+1)), (int32_t)t);
jcc(Assembler::equal, nbl);
stop("Local tag is bad for long/double");
bind(nbl);
}
Label notBad;
- cmpq(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int)t);
+ cmpq(Address(r14, Interpreter::local_tag_offset_in_bytes(n)), (int32_t)t);
jcc(Assembler::equal, notBad);
// Also compare if the local value is zero, then the tag might
// not have been set coming from deopt.
- cmpq(Address(r14, Interpreter::local_offset_in_bytes(n)), 0);
+ cmpptr(Address(r14, Interpreter::local_offset_in_bytes(n)), 0);
jcc(Assembler::equal, notBad);
stop("Local tag is bad");
bind(notBad);
@@ -532,17 +544,17 @@ void InterpreterMacroAssembler::verify_local_tag(frame::Tag tag, Register idx) {
if (tag == frame::TagCategory2) {
Label nbl;
t = frame::TagValue; // change to what is stored in locals
- cmpq(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(1)), (int)t);
+ cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(1)), (int32_t)t);
jcc(Assembler::equal, nbl);
stop("Local tag is bad for long/double");
bind(nbl);
}
Label notBad;
- cmpq(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), (int)t);
+ cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_tag_offset_in_bytes(0)), (int32_t)t);
jcc(Assembler::equal, notBad);
// Also compare if the local value is zero, then the tag might
// not have been set coming from deopt.
- cmpq(Address(r14, idx, Address::times_8, Interpreter::local_offset_in_bytes(0)), 0);
+ cmpptr(Address(r14, idx, Address::times_8, Interpreter::local_offset_in_bytes(0)), 0);
jcc(Assembler::equal, notBad);
stop("Local tag is bad");
bind(notBad);
@@ -559,7 +571,7 @@ void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point) {
void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point,
Register arg_1) {
if (c_rarg0 != arg_1) {
- movq(c_rarg0, arg_1);
+ mov(c_rarg0, arg_1);
}
MacroAssembler::call_VM_leaf_base(entry_point, 1);
}
@@ -571,10 +583,10 @@ void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point,
assert(c_rarg0 != arg_2, "smashed argument");
assert(c_rarg1 != arg_1, "smashed argument");
if (c_rarg0 != arg_1) {
- movq(c_rarg0, arg_1);
+ mov(c_rarg0, arg_1);
}
if (c_rarg1 != arg_2) {
- movq(c_rarg1, arg_2);
+ mov(c_rarg1, arg_2);
}
MacroAssembler::call_VM_leaf_base(entry_point, 2);
}
@@ -590,13 +602,13 @@ void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point,
assert(c_rarg2 != arg_1, "smashed argument");
assert(c_rarg2 != arg_2, "smashed argument");
if (c_rarg0 != arg_1) {
- movq(c_rarg0, arg_1);
+ mov(c_rarg0, arg_1);
}
if (c_rarg1 != arg_2) {
- movq(c_rarg1, arg_2);
+ mov(c_rarg1, arg_2);
}
if (c_rarg2 != arg_3) {
- movq(c_rarg2, arg_3);
+ mov(c_rarg2, arg_3);
}
MacroAssembler::call_VM_leaf_base(entry_point, 3);
}
@@ -605,9 +617,9 @@ void InterpreterMacroAssembler::super_call_VM_leaf(address entry_point,
// in this thread in which case we must call the i2i entry
void InterpreterMacroAssembler::jump_from_interpreted(Register method, Register temp) {
// set sender sp
- leaq(r13, Address(rsp, wordSize));
+ lea(r13, Address(rsp, wordSize));
// record last_sp
- movq(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), r13);
+ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), r13);
if (JvmtiExport::can_post_interpreter_events()) {
Label run_compiled_code;
@@ -644,12 +656,12 @@ void InterpreterMacroAssembler::dispatch_base(TosState state,
verify_FPU(1, state);
if (VerifyActivationFrameSize) {
Label L;
- movq(rcx, rbp);
- subq(rcx, rsp);
- int min_frame_size =
+ mov(rcx, rbp);
+ subptr(rcx, rsp);
+ int32_t min_frame_size =
(frame::link_offset - frame::interpreter_frame_initial_sp_offset) *
wordSize;
- cmpq(rcx, min_frame_size);
+ cmpptr(rcx, (int32_t)min_frame_size);
jcc(Assembler::greaterEqual, L);
stop("broken stack frame");
bind(L);
@@ -678,7 +690,7 @@ void InterpreterMacroAssembler::dispatch_next(TosState state, int step) {
// load next bytecode (load before advancing r13 to prevent AGI)
load_unsigned_byte(rbx, Address(r13, step));
// advance r13
- incrementq(r13, step);
+ increment(r13, step);
dispatch_base(state, Interpreter::dispatch_table(state));
}
@@ -718,7 +730,7 @@ void InterpreterMacroAssembler::remove_activation(
movbool(do_not_unlock_if_synchronized, false); // reset the flag
// get method access flags
- movq(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
+ movptr(rbx, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
movl(rcx, Address(rbx, methodOopDesc::access_flags_offset()));
testl(rcx, JVM_ACC_SYNCHRONIZED);
jcc(Assembler::zero, unlocked);
@@ -738,10 +750,10 @@ void InterpreterMacroAssembler::remove_activation(
wordSize - (int) sizeof(BasicObjectLock));
// We use c_rarg1 so that if we go slow path it will be the correct
// register for unlock_object to pass to VM directly
- leaq(c_rarg1, monitor); // address of first monitor
+ lea(c_rarg1, monitor); // address of first monitor
- movq(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
- testq(rax, rax);
+ movptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
+ testptr(rax, rax);
jcc(Assembler::notZero, unlock);
pop(state);
@@ -783,9 +795,9 @@ void InterpreterMacroAssembler::remove_activation(
bind(restart);
// We use c_rarg1 so that if we go slow path it will be the correct
// register for unlock_object to pass to VM directly
- movq(c_rarg1, monitor_block_top); // points to current entry, starting
+ movptr(c_rarg1, monitor_block_top); // points to current entry, starting
// with top-most entry
- leaq(rbx, monitor_block_bot); // points to word before bottom of
+ lea(rbx, monitor_block_bot); // points to word before bottom of
// monitor block
jmp(entry);
@@ -818,12 +830,12 @@ void InterpreterMacroAssembler::remove_activation(
bind(loop);
// check if current entry is used
- cmpq(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), (int) NULL);
+ cmpptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL);
jcc(Assembler::notEqual, exception);
- addq(c_rarg1, entry_size); // otherwise advance to next entry
+ addptr(c_rarg1, entry_size); // otherwise advance to next entry
bind(entry);
- cmpq(c_rarg1, rbx); // check if bottom reached
+ cmpptr(c_rarg1, rbx); // check if bottom reached
jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
}
@@ -838,13 +850,15 @@ void InterpreterMacroAssembler::remove_activation(
// remove activation
// get sender sp
- movq(rbx,
- Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
+ movptr(rbx,
+ Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize));
leave(); // remove frame anchor
- popq(ret_addr); // get return address
- movq(rsp, rbx); // set sp to sender sp
+ pop(ret_addr); // get return address
+ mov(rsp, rbx); // set sp to sender sp
}
+#endif // C_INTERP
+
// Lock object
//
// Args:
@@ -875,7 +889,7 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
Label slow_case;
// Load object pointer into obj_reg %c_rarg3
- movq(obj_reg, Address(lock_reg, obj_offset));
+ movptr(obj_reg, Address(lock_reg, obj_offset));
if (UseBiasedLocking) {
biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, done, &slow_case);
@@ -885,16 +899,16 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
movl(swap_reg, 1);
// Load (object->mark() | 1) into swap_reg %rax
- orq(swap_reg, Address(obj_reg, 0));
+ orptr(swap_reg, Address(obj_reg, 0));
// Save (object->mark() | 1) into BasicLock's displaced header
- movq(Address(lock_reg, mark_offset), swap_reg);
+ movptr(Address(lock_reg, mark_offset), swap_reg);
assert(lock_offset == 0,
"displached header must be first word in BasicObjectLock");
if (os::is_MP()) lock();
- cmpxchgq(lock_reg, Address(obj_reg, 0));
+ cmpxchgptr(lock_reg, Address(obj_reg, 0));
if (PrintBiasedLockingStatistics) {
cond_inc32(Assembler::zero,
ExternalAddress((address) BiasedLocking::fast_path_entry_count_addr()));
@@ -910,11 +924,11 @@ void InterpreterMacroAssembler::lock_object(Register lock_reg) {
// assuming both stack pointer and pagesize have their
// least significant 3 bits clear.
// NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
- subq(swap_reg, rsp);
- andq(swap_reg, 7 - os::vm_page_size());
+ subptr(swap_reg, rsp);
+ andptr(swap_reg, 7 - os::vm_page_size());
// Save the test result, for recursive case, the result is zero
- movq(Address(lock_reg, mark_offset), swap_reg);
+ movptr(Address(lock_reg, mark_offset), swap_reg);
if (PrintBiasedLockingStatistics) {
cond_inc32(Assembler::zero,
@@ -963,37 +977,37 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
// Convert from BasicObjectLock structure to object and BasicLock
// structure Store the BasicLock address into %rax
- leaq(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
+ lea(swap_reg, Address(lock_reg, BasicObjectLock::lock_offset_in_bytes()));
// Load oop into obj_reg(%c_rarg3)
- movq(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
+ movptr(obj_reg, Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()));
// Free entry
- movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD);
+ movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD);
if (UseBiasedLocking) {
biased_locking_exit(obj_reg, header_reg, done);
}
// Load the old header from BasicLock structure
- movq(header_reg, Address(swap_reg,
- BasicLock::displaced_header_offset_in_bytes()));
+ movptr(header_reg, Address(swap_reg,
+ BasicLock::displaced_header_offset_in_bytes()));
// Test for recursion
- testq(header_reg, header_reg);
+ testptr(header_reg, header_reg);
// zero for recursive case
jcc(Assembler::zero, done);
// Atomic swap back the old header
if (os::is_MP()) lock();
- cmpxchgq(header_reg, Address(obj_reg, 0));
+ cmpxchgptr(header_reg, Address(obj_reg, 0));
// zero for recursive case
jcc(Assembler::zero, done);
// Call the runtime routine for slow case.
- movq(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()),
+ movptr(Address(lock_reg, BasicObjectLock::obj_offset_in_bytes()),
obj_reg); // restore obj
call_VM(noreg,
CAST_FROM_FN_PTR(address, InterpreterRuntime::monitorexit),
@@ -1005,12 +1019,13 @@ void InterpreterMacroAssembler::unlock_object(Register lock_reg) {
}
}
+#ifndef CC_INTERP
void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
Label& zero_continue) {
assert(ProfileInterpreter, "must be profiling interpreter");
- movq(mdp, Address(rbp, frame::interpreter_frame_mdx_offset * wordSize));
- testq(mdp, mdp);
+ movptr(mdp, Address(rbp, frame::interpreter_frame_mdx_offset * wordSize));
+ testptr(mdp, mdp);
jcc(Assembler::zero, zero_continue);
}
@@ -1019,13 +1034,13 @@ void InterpreterMacroAssembler::test_method_data_pointer(Register mdp,
void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
assert(ProfileInterpreter, "must be profiling interpreter");
Label zero_continue;
- pushq(rax);
- pushq(rbx);
+ push(rax);
+ push(rbx);
get_method(rbx);
// Test MDO to avoid the call if it is NULL.
- movq(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
- testq(rax, rax);
+ movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
+ testptr(rax, rax);
jcc(Assembler::zero, zero_continue);
// rbx: method
@@ -1033,26 +1048,26 @@ void InterpreterMacroAssembler::set_method_data_pointer_for_bcp() {
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::bcp_to_di), rbx, r13);
// rax: mdi
- movq(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
- testq(rbx, rbx);
+ movptr(rbx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
+ testptr(rbx, rbx);
jcc(Assembler::zero, zero_continue);
- addq(rbx, in_bytes(methodDataOopDesc::data_offset()));
- addq(rbx, rax);
- movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx);
+ addptr(rbx, in_bytes(methodDataOopDesc::data_offset()));
+ addptr(rbx, rax);
+ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rbx);
bind(zero_continue);
- popq(rbx);
- popq(rax);
+ pop(rbx);
+ pop(rax);
}
void InterpreterMacroAssembler::verify_method_data_pointer() {
assert(ProfileInterpreter, "must be profiling interpreter");
#ifdef ASSERT
Label verify_continue;
- pushq(rax);
- pushq(rbx);
- pushq(c_rarg3);
- pushq(c_rarg2);
+ push(rax);
+ push(rbx);
+ push(c_rarg3);
+ push(c_rarg2);
test_method_data_pointer(c_rarg3, verify_continue); // If mdp is zero, continue
get_method(rbx);
@@ -1060,9 +1075,9 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
// consistent with the bcp. The converse is highly probable also.
load_unsigned_word(c_rarg2,
Address(c_rarg3, in_bytes(DataLayout::bci_offset())));
- addq(c_rarg2, Address(rbx, methodOopDesc::const_offset()));
- leaq(c_rarg2, Address(c_rarg2, constMethodOopDesc::codes_offset()));
- cmpq(c_rarg2, r13);
+ addptr(c_rarg2, Address(rbx, methodOopDesc::const_offset()));
+ lea(c_rarg2, Address(c_rarg2, constMethodOopDesc::codes_offset()));
+ cmpptr(c_rarg2, r13);
jcc(Assembler::equal, verify_continue);
// rbx: method
// r13: bcp
@@ -1070,10 +1085,10 @@ void InterpreterMacroAssembler::verify_method_data_pointer() {
call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::verify_mdp),
rbx, r13, c_rarg3);
bind(verify_continue);
- popq(c_rarg2);
- popq(c_rarg3);
- popq(rbx);
- popq(rax);
+ pop(c_rarg2);
+ pop(c_rarg3);
+ pop(rbx);
+ pop(rax);
#endif // ASSERT
}
@@ -1083,7 +1098,7 @@ void InterpreterMacroAssembler::set_mdp_data_at(Register mdp_in,
Register value) {
assert(ProfileInterpreter, "must be profiling interpreter");
Address data(mdp_in, constant);
- movq(data, value);
+ movptr(data, value);
}
@@ -1099,22 +1114,24 @@ void InterpreterMacroAssembler::increment_mdp_data_at(Register mdp_in,
void InterpreterMacroAssembler::increment_mdp_data_at(Address data,
bool decrement) {
assert(ProfileInterpreter, "must be profiling interpreter");
+ // %%% this does 64bit counters at best it is wasting space
+ // at worst it is a rare bug when counters overflow
if (decrement) {
// Decrement the register. Set condition codes.
- addq(data, -DataLayout::counter_increment);
+ addptr(data, (int32_t) -DataLayout::counter_increment);
// If the decrement causes the counter to overflow, stay negative
Label L;
jcc(Assembler::negative, L);
- addq(data, DataLayout::counter_increment);
+ addptr(data, (int32_t) DataLayout::counter_increment);
bind(L);
} else {
assert(DataLayout::counter_increment == 1,
"flow-free idiom only works with 1");
// Increment the register. Set carry flag.
- addq(data, DataLayout::counter_increment);
+ addptr(data, DataLayout::counter_increment);
// If the increment causes the counter to overflow, pull back by 1.
- sbbq(data, 0);
+ sbbptr(data, (int32_t)0);
}
}
@@ -1146,11 +1163,11 @@ void InterpreterMacroAssembler::test_mdp_data_at(Register mdp_in,
Label& not_equal_continue) {
assert(ProfileInterpreter, "must be profiling interpreter");
if (test_value_out == noreg) {
- cmpq(value, Address(mdp_in, offset));
+ cmpptr(value, Address(mdp_in, offset));
} else {
// Put the test value into a register, so caller can use it:
- movq(test_value_out, Address(mdp_in, offset));
- cmpq(test_value_out, value);
+ movptr(test_value_out, Address(mdp_in, offset));
+ cmpptr(test_value_out, value);
}
jcc(Assembler::notEqual, not_equal_continue);
}
@@ -1160,8 +1177,8 @@ void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
int offset_of_disp) {
assert(ProfileInterpreter, "must be profiling interpreter");
Address disp_address(mdp_in, offset_of_disp);
- addq(mdp_in, disp_address);
- movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
+ addptr(mdp_in, disp_address);
+ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
}
@@ -1170,26 +1187,26 @@ void InterpreterMacroAssembler::update_mdp_by_offset(Register mdp_in,
int offset_of_disp) {
assert(ProfileInterpreter, "must be profiling interpreter");
Address disp_address(mdp_in, reg, Address::times_1, offset_of_disp);
- addq(mdp_in, disp_address);
- movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
+ addptr(mdp_in, disp_address);
+ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
}
void InterpreterMacroAssembler::update_mdp_by_constant(Register mdp_in,
int constant) {
assert(ProfileInterpreter, "must be profiling interpreter");
- addq(mdp_in, constant);
- movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
+ addptr(mdp_in, constant);
+ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), mdp_in);
}
void InterpreterMacroAssembler::update_mdp_for_ret(Register return_bci) {
assert(ProfileInterpreter, "must be profiling interpreter");
- pushq(return_bci); // save/restore across call_VM
+ push(return_bci); // save/restore across call_VM
call_VM(noreg,
CAST_FROM_FN_PTR(address, InterpreterRuntime::update_mdp_for_ret),
return_bci);
- popq(return_bci);
+ pop(return_bci);
}
@@ -1206,12 +1223,12 @@ void InterpreterMacroAssembler::profile_taken_branch(Register mdp,
// We inline increment_mdp_data_at to return bumped_count in a register
//increment_mdp_data_at(mdp, in_bytes(JumpData::taken_offset()));
Address data(mdp, in_bytes(JumpData::taken_offset()));
- movq(bumped_count, data);
+ movptr(bumped_count, data);
assert(DataLayout::counter_increment == 1,
"flow-free idiom only works with 1");
- addq(bumped_count, DataLayout::counter_increment);
- sbbq(bumped_count, 0);
- movq(data, bumped_count); // Store back out
+ addptr(bumped_count, DataLayout::counter_increment);
+ sbbptr(bumped_count, 0);
+ movptr(data, bumped_count); // Store back out
// The method data pointer needs to be updated to reflect the new target.
update_mdp_by_offset(mdp, in_bytes(JumpData::displacement_offset()));
@@ -1339,7 +1356,7 @@ void InterpreterMacroAssembler::record_klass_in_profile_helper(
if (test_for_null_also) {
// Failed the equality check on receiver[n]... Test for null.
- testq(reg2, reg2);
+ testptr(reg2, reg2);
if (start_row == last_row) {
// The only thing left to do is handle the null case.
jcc(Assembler::notZero, done);
@@ -1535,8 +1552,8 @@ void InterpreterMacroAssembler::profile_switch_case(Register index,
// Build the base (index * per_case_size_in_bytes()) +
// case_array_offset_in_bytes()
movl(reg2, in_bytes(MultiBranchData::per_case_size()));
- imulq(index, reg2); // XXX l ?
- addq(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ?
+ imulptr(index, reg2); // XXX l ?
+ addptr(index, in_bytes(MultiBranchData::case_array_offset())); // XXX l ?
// Update the case count
increment_mdp_data_at(mdp,
@@ -1554,6 +1571,7 @@ void InterpreterMacroAssembler::profile_switch_case(Register index,
}
+
void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
if (state == atos) {
MacroAssembler::verify_oop(reg);
@@ -1562,6 +1580,7 @@ void InterpreterMacroAssembler::verify_oop(Register reg, TosState state) {
void InterpreterMacroAssembler::verify_FPU(int stack_depth, TosState state) {
}
+#endif // !CC_INTERP
void InterpreterMacroAssembler::notify_method_entry() {
@@ -1598,22 +1617,25 @@ void InterpreterMacroAssembler::notify_method_exit(
// method result is saved across the call to post_method_exit. If this
// is changed then the interpreter_frame_result implementation will
// need to be updated too.
- push(state);
+
+ // For c++ interpreter the result is always stored at a known location in the frame
+ // template interpreter will leave it on the top of the stack.
+ NOT_CC_INTERP(push(state);)
movl(rdx, Address(r15_thread, JavaThread::interp_only_mode_offset()));
testl(rdx, rdx);
jcc(Assembler::zero, L);
call_VM(noreg,
CAST_FROM_FN_PTR(address, InterpreterRuntime::post_method_exit));
bind(L);
- pop(state);
+ NOT_CC_INTERP(pop(state));
}
{
SkipIfEqual skip(this, &DTraceMethodProbes, false);
- push(state);
+ NOT_CC_INTERP(push(state));
get_method(c_rarg1);
call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::dtrace_method_exit),
r15_thread, c_rarg1);
- pop(state);
+ NOT_CC_INTERP(pop(state));
}
}
diff --git a/src/cpu/x86/vm/interp_masm_x86_64.hpp b/src/cpu/x86/vm/interp_masm_x86_64.hpp
index 3b1baa258..3267b4829 100644
--- a/src/cpu/x86/vm/interp_masm_x86_64.hpp
+++ b/src/cpu/x86/vm/interp_masm_x86_64.hpp
@@ -25,8 +25,8 @@
// This file specializes the assember with interpreter-specific macros
-class InterpreterMacroAssembler
- : public MacroAssembler {
+class InterpreterMacroAssembler: public MacroAssembler {
+#ifndef CC_INTERP
protected:
// Interpreter specific version of call_VM_base
virtual void call_VM_leaf_base(address entry_point,
@@ -44,52 +44,53 @@ class InterpreterMacroAssembler
// base routine for all dispatches
void dispatch_base(TosState state, address* table, bool verifyoop = true);
+#endif // CC_INTERP
public:
- InterpreterMacroAssembler(CodeBuffer* code)
- : MacroAssembler(code)
- {}
+ InterpreterMacroAssembler(CodeBuffer* code) : MacroAssembler(code) {}
void load_earlyret_value(TosState state);
+#ifdef CC_INTERP
+ void save_bcp() { /* not needed in c++ interpreter and harmless */ }
+ void restore_bcp() { /* not needed in c++ interpreter and harmless */ }
+
+ // Helpers for runtime call arguments/results
+ void get_method(Register reg);
+
+#else
+
// Interpreter-specific registers
- void save_bcp()
- {
- movq(Address(rbp, frame::interpreter_frame_bcx_offset * wordSize), r13);
+ void save_bcp() {
+ movptr(Address(rbp, frame::interpreter_frame_bcx_offset * wordSize), r13);
}
- void restore_bcp()
- {
- movq(r13, Address(rbp, frame::interpreter_frame_bcx_offset * wordSize));
+ void restore_bcp() {
+ movptr(r13, Address(rbp, frame::interpreter_frame_bcx_offset * wordSize));
}
- void restore_locals()
- {
- movq(r14, Address(rbp, frame::interpreter_frame_locals_offset * wordSize));
+ void restore_locals() {
+ movptr(r14, Address(rbp, frame::interpreter_frame_locals_offset * wordSize));
}
// Helpers for runtime call arguments/results
- void get_method(Register reg)
- {
- movq(reg, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
+ void get_method(Register reg) {
+ movptr(reg, Address(rbp, frame::interpreter_frame_method_offset * wordSize));
}
- void get_constant_pool(Register reg)
- {
+ void get_constant_pool(Register reg) {
get_method(reg);
- movq(reg, Address(reg, methodOopDesc::constants_offset()));
+ movptr(reg, Address(reg, methodOopDesc::constants_offset()));
}
- void get_constant_pool_cache(Register reg)
- {
+ void get_constant_pool_cache(Register reg) {
get_constant_pool(reg);
- movq(reg, Address(reg, constantPoolOopDesc::cache_offset_in_bytes()));
+ movptr(reg, Address(reg, constantPoolOopDesc::cache_offset_in_bytes()));
}
- void get_cpool_and_tags(Register cpool, Register tags)
- {
+ void get_cpool_and_tags(Register cpool, Register tags) {
get_constant_pool(cpool);
- movq(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes()));
+ movptr(tags, Address(cpool, constantPoolOopDesc::tags_offset_in_bytes()));
}
void get_unsigned_2_byte_index_at_bcp(Register reg, int bcp_offset);
@@ -98,6 +99,7 @@ class InterpreterMacroAssembler
void get_cache_entry_pointer_at_bcp(Register cache, Register tmp,
int bcp_offset);
+
void pop_ptr(Register r = rax);
void pop_i(Register r = rax);
void pop_l(Register r = rax);
@@ -109,15 +111,23 @@ class InterpreterMacroAssembler
void push_f(XMMRegister r = xmm0);
void push_d(XMMRegister r = xmm0);
+ void pop(Register r ) { ((MacroAssembler*)this)->pop(r); }
+
+ void push(Register r ) { ((MacroAssembler*)this)->push(r); }
+ void push(int32_t imm ) { ((MacroAssembler*)this)->push(imm); }
+
void pop(TosState state); // transition vtos -> state
void push(TosState state); // transition state -> vtos
// Tagged stack support, pop and push both tag and value.
void pop_ptr(Register r, Register tag);
void push_ptr(Register r, Register tag);
+#endif // CC_INTERP
DEBUG_ONLY(void verify_stack_tag(frame::Tag t);)
+#ifndef CC_INTERP
+
// Tagged stack helpers for swap and dup
void load_ptr_and_tag(int n, Register val, Register tag);
void store_ptr_and_tag(int n, Register val, Register tag);
@@ -133,12 +143,12 @@ class InterpreterMacroAssembler
void verify_local_tag(frame::Tag tag, Register idx);
#endif // ASSERT
+
void empty_expression_stack()
{
- movq(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset *
- wordSize));
+ movptr(rsp, Address(rbp, frame::interpreter_frame_monitor_block_top_offset * wordSize));
// NULL last_sp until next java call
- movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
+ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
}
// Super call_VM calls - correspond to MacroAssembler::call_VM(_leaf) calls
@@ -185,11 +195,14 @@ class InterpreterMacroAssembler
bool throw_monitor_exception = true,
bool install_monitor_exception = true,
bool notify_jvmdi = true);
+#endif // CC_INTERP
// Object locking
void lock_object (Register lock_reg);
void unlock_object(Register lock_reg);
+#ifndef CC_INTERP
+
// Interpreter profiling operations
void set_method_data_pointer_for_bcp();
void test_method_data_pointer(Register mdp, Label& zero_continue);
@@ -237,6 +250,8 @@ class InterpreterMacroAssembler
// only if +VerifyFPU && (state == ftos || state == dtos)
void verify_FPU(int stack_depth, TosState state = ftos);
+#endif // !CC_INTERP
+
typedef enum { NotifyJVMTI, SkipNotifyJVMTI } NotifyMethodExitMode;
// support for jvmti/dtrace
diff --git a/src/cpu/x86/vm/interpreterRT_x86_32.cpp b/src/cpu/x86/vm/interpreterRT_x86_32.cpp
index 73f11d64c..34bf83502 100644
--- a/src/cpu/x86/vm/interpreterRT_x86_32.cpp
+++ b/src/cpu/x86/vm/interpreterRT_x86_32.cpp
@@ -50,13 +50,13 @@ void InterpreterRuntime::SignatureHandlerGenerator::move(int from_offset, int to
void InterpreterRuntime::SignatureHandlerGenerator::box(int from_offset, int to_offset) {
- __ leal(temp(), Address(from(), Interpreter::local_offset_in_bytes(from_offset)));
- __ cmpl(Address(from(), Interpreter::local_offset_in_bytes(from_offset)), 0); // do not use temp() to avoid AGI
+ __ lea(temp(), Address(from(), Interpreter::local_offset_in_bytes(from_offset)));
+ __ cmpptr(Address(from(), Interpreter::local_offset_in_bytes(from_offset)), (int32_t)NULL_WORD); // do not use temp() to avoid AGI
Label L;
__ jcc(Assembler::notZero, L);
- __ movl(temp(), 0);
+ __ movptr(temp(), ((int32_t)NULL_WORD));
__ bind(L);
- __ movl(Address(to(), to_offset * wordSize), temp());
+ __ movptr(Address(to(), to_offset * wordSize), temp());
}
diff --git a/src/cpu/x86/vm/interpreterRT_x86_64.cpp b/src/cpu/x86/vm/interpreterRT_x86_64.cpp
index 90d0ad9d8..99381900b 100644
--- a/src/cpu/x86/vm/interpreterRT_x86_64.cpp
+++ b/src/cpu/x86/vm/interpreterRT_x86_64.cpp
@@ -93,49 +93,49 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_long() {
#ifdef _WIN64
switch (_num_args) {
case 0:
- __ movq(c_rarg1, src);
+ __ movptr(c_rarg1, src);
_num_args++;
break;
case 1:
- __ movq(c_rarg2, src);
+ __ movptr(c_rarg2, src);
_num_args++;
break;
case 2:
- __ movq(c_rarg3, src);
+ __ movptr(c_rarg3, src);
_num_args++;
break;
case 3:
default:
- __ movq(rax, src);
- __ movq(Address(to(), _stack_offset), rax);
+ __ movptr(rax, src);
+ __ movptr(Address(to(), _stack_offset), rax);
_stack_offset += wordSize;
break;
}
#else
switch (_num_int_args) {
case 0:
- __ movq(c_rarg1, src);
+ __ movptr(c_rarg1, src);
_num_int_args++;
break;
case 1:
- __ movq(c_rarg2, src);
+ __ movptr(c_rarg2, src);
_num_int_args++;
break;
case 2:
- __ movq(c_rarg3, src);
+ __ movptr(c_rarg3, src);
_num_int_args++;
break;
case 3:
- __ movq(c_rarg4, src);
+ __ movptr(c_rarg4, src);
_num_int_args++;
break;
case 4:
- __ movq(c_rarg5, src);
+ __ movptr(c_rarg5, src);
_num_int_args++;
break;
default:
- __ movq(rax, src);
- __ movq(Address(to(), _stack_offset), rax);
+ __ movptr(rax, src);
+ __ movptr(Address(to(), _stack_offset), rax);
_stack_offset += wordSize;
break;
}
@@ -171,16 +171,16 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_double() {
if (_num_args < Argument::n_float_register_parameters_c-1) {
__ movdbl(as_XMMRegister(++_num_args), src);
} else {
- __ movq(rax, src);
- __ movq(Address(to(), _stack_offset), rax);
+ __ movptr(rax, src);
+ __ movptr(Address(to(), _stack_offset), rax);
_stack_offset += wordSize;
}
#else
if (_num_fp_args < Argument::n_float_register_parameters_c) {
__ movdbl(as_XMMRegister(_num_fp_args++), src);
} else {
- __ movq(rax, src);
- __ movq(Address(to(), _stack_offset), rax);
+ __ movptr(rax, src);
+ __ movptr(Address(to(), _stack_offset), rax);
_stack_offset += wordSize;
}
#endif
@@ -193,29 +193,29 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
switch (_num_args) {
case 0:
assert(offset() == 0, "argument register 1 can only be (non-null) receiver");
- __ leaq(c_rarg1, src);
+ __ lea(c_rarg1, src);
_num_args++;
break;
case 1:
- __ leaq(rax, src);
+ __ lea(rax, src);
__ xorl(c_rarg2, c_rarg2);
- __ cmpq(src, 0);
- __ cmovq(Assembler::notEqual, c_rarg2, rax);
+ __ cmpptr(src, 0);
+ __ cmov(Assembler::notEqual, c_rarg2, rax);
_num_args++;
break;
case 2:
- __ leaq(rax, src);
+ __ lea(rax, src);
__ xorl(c_rarg3, c_rarg3);
- __ cmpq(src, 0);
- __ cmovq(Assembler::notEqual, c_rarg3, rax);
+ __ cmpptr(src, 0);
+ __ cmov(Assembler::notEqual, c_rarg3, rax);
_num_args++;
break;
default:
- __ leaq(rax, src);
+ __ lea(rax, src);
__ xorl(temp(), temp());
- __ cmpq(src, 0);
- __ cmovq(Assembler::notEqual, temp(), rax);
- __ movq(Address(to(), _stack_offset), temp());
+ __ cmpptr(src, 0);
+ __ cmov(Assembler::notEqual, temp(), rax);
+ __ movptr(Address(to(), _stack_offset), temp());
_stack_offset += wordSize;
break;
}
@@ -223,43 +223,43 @@ void InterpreterRuntime::SignatureHandlerGenerator::pass_object() {
switch (_num_int_args) {
case 0:
assert(offset() == 0, "argument register 1 can only be (non-null) receiver");
- __ leaq(c_rarg1, src);
+ __ lea(c_rarg1, src);
_num_int_args++;
break;
case 1:
- __ leaq(rax, src);
+ __ lea(rax, src);
__ xorl(c_rarg2, c_rarg2);
- __ cmpq(src, 0);
- __ cmovq(Assembler::notEqual, c_rarg2, rax);
+ __ cmpptr(src, 0);
+ __ cmov(Assembler::notEqual, c_rarg2, rax);
_num_int_args++;
break;
case 2:
- __ leaq(rax, src);
+ __ lea(rax, src);
__ xorl(c_rarg3, c_rarg3);
- __ cmpq(src, 0);
- __ cmovq(Assembler::notEqual, c_rarg3, rax);
+ __ cmpptr(src, 0);
+ __ cmov(Assembler::notEqual, c_rarg3, rax);
_num_int_args++;
break;
case 3:
- __ leaq(rax, src);
+ __ lea(rax, src);
__ xorl(c_rarg4, c_rarg4);
- __ cmpq(src, 0);
- __ cmovq(Assembler::notEqual, c_rarg4, rax);
+ __ cmpptr(src, 0);
+ __ cmov(Assembler::notEqual, c_rarg4, rax);
_num_int_args++;
break;
case 4:
- __ leaq(rax, src);
+ __ lea(rax, src);
__ xorl(c_rarg5, c_rarg5);
- __ cmpq(src, 0);
- __ cmovq(Assembler::notEqual, c_rarg5, rax);
+ __ cmpptr(src, 0);
+ __ cmov(Assembler::notEqual, c_rarg5, rax);
_num_int_args++;
break;
default:
- __ leaq(rax, src);
+ __ lea(rax, src);
__ xorl(temp(), temp());
- __ cmpq(src, 0);
- __ cmovq(Assembler::notEqual, temp(), rax);
- __ movq(Address(to(), _stack_offset), temp());
+ __ cmpptr(src, 0);
+ __ cmov(Assembler::notEqual, temp(), rax);
+ __ movptr(Address(to(), _stack_offset), temp());
_stack_offset += wordSize;
break;
}
diff --git a/src/cpu/x86/vm/interpreter_x86_32.cpp b/src/cpu/x86/vm/interpreter_x86_32.cpp
index fbdf398bc..cb24f12ed 100644
--- a/src/cpu/x86/vm/interpreter_x86_32.cpp
+++ b/src/cpu/x86/vm/interpreter_x86_32.cpp
@@ -38,7 +38,7 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
// rcx: temporary
// rdi: pointer to locals
// rsp: end of copied parameters area
- __ movl(rcx, rsp);
+ __ mov(rcx, rsp);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::slow_signature_handler), rbx, rdi, rcx);
__ ret(0);
return entry;
@@ -75,8 +75,8 @@ address InterpreterGenerator::generate_empty_entry(void) {
// Code: _return
// _return
// return w/o popping parameters
- __ popl(rax);
- __ movl(rsp, rsi);
+ __ pop(rax);
+ __ mov(rsp, rsi);
__ jmp(rax);
__ bind(slow_path);
@@ -135,7 +135,7 @@ address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKin
__ pushl(Address(rsp, 3*wordSize)); // push hi (and note rsp -= wordSize)
__ pushl(Address(rsp, 2*wordSize)); // push lo
__ fld_d(Address(rsp, 0)); // get double in ST0
- __ addl(rsp, 2*wordSize);
+ __ addptr(rsp, 2*wordSize);
} else {
__ fld_d(Address(rsp, 1*wordSize));
}
@@ -173,15 +173,15 @@ address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKin
// return double result in xmm0 for interpreter and compilers.
if (UseSSE >= 2) {
- __ subl(rsp, 2*wordSize);
+ __ subptr(rsp, 2*wordSize);
__ fstp_d(Address(rsp, 0));
__ movdbl(xmm0, Address(rsp, 0));
- __ addl(rsp, 2*wordSize);
+ __ addptr(rsp, 2*wordSize);
}
// done, result in FPU ST(0) or XMM0
- __ popl(rdi); // get return address
- __ movl(rsp, rsi); // set sp to sender sp
+ __ pop(rdi); // get return address
+ __ mov(rsp, rsi); // set sp to sender sp
__ jmp(rdi);
return entry_point;
@@ -202,10 +202,10 @@ address InterpreterGenerator::generate_abstract_entry(void) {
// abstract method entry
// remove return address. Not really needed, since exception handling throws away expression stack
- __ popl(rbx);
+ __ pop(rbx);
// adjust stack to what a normal return would do
- __ movl(rsp, rsi);
+ __ mov(rsp, rsi);
// throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
// the call_VM checks for exception, so we should never return here.
diff --git a/src/cpu/x86/vm/interpreter_x86_64.cpp b/src/cpu/x86/vm/interpreter_x86_64.cpp
index 9636c5739..34e5b56b8 100644
--- a/src/cpu/x86/vm/interpreter_x86_64.cpp
+++ b/src/cpu/x86/vm/interpreter_x86_64.cpp
@@ -35,9 +35,9 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
// rbx: method
// r14: pointer to locals
// c_rarg3: first stack arg - wordSize
- __ movq(c_rarg3, rsp);
+ __ mov(c_rarg3, rsp);
// adjust rsp
- __ subq(rsp, 4 * wordSize);
+ __ subptr(rsp, 4 * wordSize);
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::slow_signature_handler),
@@ -70,13 +70,13 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
case 0:
__ movl(rscratch1, Address(rbx, methodOopDesc::access_flags_offset()));
__ testl(rscratch1, JVM_ACC_STATIC);
- __ cmovq(Assembler::zero, c_rarg1, Address(rsp, 0));
+ __ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0));
break;
case 1:
- __ movq(c_rarg2, Address(rsp, wordSize));
+ __ movptr(c_rarg2, Address(rsp, wordSize));
break;
case 2:
- __ movq(c_rarg3, Address(rsp, 2 * wordSize));
+ __ movptr(c_rarg3, Address(rsp, 2 * wordSize));
break;
default:
break;
@@ -101,7 +101,7 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
// restore rsp
- __ addq(rsp, 4 * wordSize);
+ __ addptr(rsp, 4 * wordSize);
__ ret(0);
@@ -114,9 +114,9 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
// rbx: method
// r14: pointer to locals
// c_rarg3: first stack arg - wordSize
- __ movq(c_rarg3, rsp);
+ __ mov(c_rarg3, rsp);
// adjust rsp
- __ subq(rsp, 14 * wordSize);
+ __ subptr(rsp, 14 * wordSize);
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::slow_signature_handler),
@@ -155,15 +155,15 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
// Now handle integrals. Only do c_rarg1 if not static.
__ movl(c_rarg3, Address(rbx, methodOopDesc::access_flags_offset()));
__ testl(c_rarg3, JVM_ACC_STATIC);
- __ cmovq(Assembler::zero, c_rarg1, Address(rsp, 0));
+ __ cmovptr(Assembler::zero, c_rarg1, Address(rsp, 0));
- __ movq(c_rarg2, Address(rsp, wordSize));
- __ movq(c_rarg3, Address(rsp, 2 * wordSize));
- __ movq(c_rarg4, Address(rsp, 3 * wordSize));
- __ movq(c_rarg5, Address(rsp, 4 * wordSize));
+ __ movptr(c_rarg2, Address(rsp, wordSize));
+ __ movptr(c_rarg3, Address(rsp, 2 * wordSize));
+ __ movptr(c_rarg4, Address(rsp, 3 * wordSize));
+ __ movptr(c_rarg5, Address(rsp, 4 * wordSize));
// restore rsp
- __ addq(rsp, 14 * wordSize);
+ __ addptr(rsp, 14 * wordSize);
__ ret(0);
@@ -176,14 +176,13 @@ address AbstractInterpreterGenerator::generate_slow_signature_handler() {
// Various method entries
//
-address InterpreterGenerator::generate_math_entry(
- AbstractInterpreter::MethodKind kind) {
- // rbx: methodOop
+address InterpreterGenerator::generate_math_entry(AbstractInterpreter::MethodKind kind) {
- if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
+ // rbx,: methodOop
+ // rcx: scratrch
+ // r13: sender sp
- assert(kind == Interpreter::java_lang_math_sqrt,
- "Other intrinsics are not special");
+ if (!InlineIntrinsics) return NULL; // Generate a vanilla entry
address entry_point = __ pc();
@@ -197,6 +196,11 @@ address InterpreterGenerator::generate_math_entry(
// in order to avoid monotonicity bugs when switching
// from interpreter to compiler in the middle of some
// computation)
+ //
+ // stack: [ ret adr ] <-- rsp
+ // [ lo(arg) ]
+ // [ hi(arg) ]
+ //
// Note: For JDK 1.2 StrictMath doesn't exist and Math.sin/cos/sqrt are
// native methods. Interpreter::method_kind(...) does a check for
@@ -218,10 +222,46 @@ address InterpreterGenerator::generate_math_entry(
// Note: For JDK 1.3 StrictMath exists and Math.sin/cos/sqrt are
// java methods. Interpreter::method_kind(...) will select
// this entry point for the corresponding methods in JDK 1.3.
- __ sqrtsd(xmm0, Address(rsp, wordSize));
+ // get argument
- __ popq(rax);
- __ movq(rsp, r13);
+ if (kind == Interpreter::java_lang_math_sqrt) {
+ __ sqrtsd(xmm0, Address(rsp, wordSize));
+ } else {
+ __ fld_d(Address(rsp, wordSize));
+ switch (kind) {
+ case Interpreter::java_lang_math_sin :
+ __ trigfunc('s');
+ break;
+ case Interpreter::java_lang_math_cos :
+ __ trigfunc('c');
+ break;
+ case Interpreter::java_lang_math_tan :
+ __ trigfunc('t');
+ break;
+ case Interpreter::java_lang_math_abs:
+ __ fabs();
+ break;
+ case Interpreter::java_lang_math_log:
+ __ flog();
+ break;
+ case Interpreter::java_lang_math_log10:
+ __ flog10();
+ break;
+ default :
+ ShouldNotReachHere();
+ }
+
+ // return double result in xmm0 for interpreter and compilers.
+ __ subptr(rsp, 2*wordSize);
+ // Round to 64bit precision
+ __ fstp_d(Address(rsp, 0));
+ __ movdbl(xmm0, Address(rsp, 0));
+ __ addptr(rsp, 2*wordSize);
+ }
+
+
+ __ pop(rax);
+ __ mov(rsp, r13);
__ jmp(rax);
return entry_point;
@@ -239,10 +279,10 @@ address InterpreterGenerator::generate_abstract_entry(void) {
// abstract method entry
// remove return address. Not really needed, since exception
// handling throws away expression stack
- __ popq(rbx);
+ __ pop(rbx);
// adjust stack to what a normal return would do
- __ movq(rsp, r13);
+ __ mov(rsp, r13);
// throw exception
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
@@ -276,8 +316,8 @@ address InterpreterGenerator::generate_empty_entry(void) {
// Code: _return
// _return
// return w/o popping parameters
- __ popq(rax);
- __ movq(rsp, r13);
+ __ pop(rax);
+ __ mov(rsp, r13);
__ jmp(rax);
__ bind(slow_path);
@@ -286,148 +326,6 @@ address InterpreterGenerator::generate_empty_entry(void) {
}
-// Call an accessor method (assuming it is resolved, otherwise drop
-// into vanilla (slow path) entry
-address InterpreterGenerator::generate_accessor_entry(void) {
- // rbx: methodOop
-
- // r13: senderSP must preserver for slow path, set SP to it on fast path
-
- address entry_point = __ pc();
- Label xreturn_path;
-
- // do fastpath for resolved accessor methods
- if (UseFastAccessorMethods) {
- // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites
- // thereof; parameter size = 1
- // Note: We can only use this code if the getfield has been resolved
- // and if we don't have a null-pointer exception => check for
- // these conditions first and use slow path if necessary.
- Label slow_path;
- // If we need a safepoint check, generate full interpreter entry.
- __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
- SafepointSynchronize::_not_synchronized);
-
- __ jcc(Assembler::notEqual, slow_path);
- // rbx: method
- __ movq(rax, Address(rsp, wordSize));
-
- // check if local 0 != NULL and read field
- __ testq(rax, rax);
- __ jcc(Assembler::zero, slow_path);
-
- __ movq(rdi, Address(rbx, methodOopDesc::constants_offset()));
- // read first instruction word and extract bytecode @ 1 and index @ 2
- __ movq(rdx, Address(rbx, methodOopDesc::const_offset()));
- __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
- // Shift codes right to get the index on the right.
- // The bytecode fetched looks like <index><0xb4><0x2a>
- __ shrl(rdx, 2 * BitsPerByte);
- __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
- __ movq(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes()));
-
- // rax: local 0
- // rbx: method
- // rdx: constant pool cache index
- // rdi: constant pool cache
-
- // check if getfield has been resolved and read constant pool cache entry
- // check the validity of the cache entry by testing whether _indices field
- // contains Bytecode::_getfield in b1 byte.
- assert(in_words(ConstantPoolCacheEntry::size()) == 4,
- "adjust shift below");
- __ movl(rcx,
- Address(rdi,
- rdx,
- Address::times_8,
- constantPoolCacheOopDesc::base_offset() +
- ConstantPoolCacheEntry::indices_offset()));
- __ shrl(rcx, 2 * BitsPerByte);
- __ andl(rcx, 0xFF);
- __ cmpl(rcx, Bytecodes::_getfield);
- __ jcc(Assembler::notEqual, slow_path);
-
- // Note: constant pool entry is not valid before bytecode is resolved
- __ movq(rcx,
- Address(rdi,
- rdx,
- Address::times_8,
- constantPoolCacheOopDesc::base_offset() +
- ConstantPoolCacheEntry::f2_offset()));
- // edx: flags
- __ movl(rdx,
- Address(rdi,
- rdx,
- Address::times_8,
- constantPoolCacheOopDesc::base_offset() +
- ConstantPoolCacheEntry::flags_offset()));
-
- Label notObj, notInt, notByte, notShort;
- const Address field_address(rax, rcx, Address::times_1);
-
- // Need to differentiate between igetfield, agetfield, bgetfield etc.
- // because they are different sizes.
- // Use the type from the constant pool cache
- __ shrl(rdx, ConstantPoolCacheEntry::tosBits);
- // Make sure we don't need to mask edx for tosBits after the above shift
- ConstantPoolCacheEntry::verify_tosBits();
-
- __ cmpl(rdx, atos);
- __ jcc(Assembler::notEqual, notObj);
- // atos
- __ load_heap_oop(rax, field_address);
- __ jmp(xreturn_path);
-
- __ bind(notObj);
- __ cmpl(rdx, itos);
- __ jcc(Assembler::notEqual, notInt);
- // itos
- __ movl(rax, field_address);
- __ jmp(xreturn_path);
-
- __ bind(notInt);
- __ cmpl(rdx, btos);
- __ jcc(Assembler::notEqual, notByte);
- // btos
- __ load_signed_byte(rax, field_address);
- __ jmp(xreturn_path);
-
- __ bind(notByte);
- __ cmpl(rdx, stos);
- __ jcc(Assembler::notEqual, notShort);
- // stos
- __ load_signed_word(rax, field_address);
- __ jmp(xreturn_path);
-
- __ bind(notShort);
-#ifdef ASSERT
- Label okay;
- __ cmpl(rdx, ctos);
- __ jcc(Assembler::equal, okay);
- __ stop("what type is this?");
- __ bind(okay);
-#endif
- // ctos
- __ load_unsigned_word(rax, field_address);
-
- __ bind(xreturn_path);
-
- // _ireturn/_areturn
- __ popq(rdi);
- __ movq(rsp, r13);
- __ jmp(rdi);
- __ ret(0);
-
- // generate a vanilla interpreter entry as the slow path
- __ bind(slow_path);
- (void) generate_normal_entry(false);
- } else {
- (void) generate_normal_entry(false);
- }
-
- return entry_point;
-}
-
// This method tells the deoptimizer how big an interpreted frame must be:
int AbstractInterpreter::size_activation(methodOop method,
int tempcount,
diff --git a/src/cpu/x86/vm/jniFastGetField_x86_32.cpp b/src/cpu/x86/vm/jniFastGetField_x86_32.cpp
index 9643c843d..d362b2827 100644
--- a/src/cpu/x86/vm/jniFastGetField_x86_32.cpp
+++ b/src/cpu/x86/vm/jniFastGetField_x86_32.cpp
@@ -72,25 +72,25 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
__ testb (rcx, 1);
__ jcc (Assembler::notZero, slow);
if (os::is_MP()) {
- __ movl (rax, rcx);
- __ andl (rax, 1); // rax, must end up 0
- __ movl (rdx, Address(rsp, rax, Address::times_1, 2*wordSize));
+ __ mov(rax, rcx);
+ __ andptr(rax, 1); // rax, must end up 0
+ __ movptr(rdx, Address(rsp, rax, Address::times_1, 2*wordSize));
// obj, notice rax, is 0.
// rdx is data dependent on rcx.
} else {
- __ movl (rdx, Address(rsp, 2*wordSize)); // obj
+ __ movptr (rdx, Address(rsp, 2*wordSize)); // obj
}
- __ movl (rax, Address(rsp, 3*wordSize)); // jfieldID
- __ movl (rdx, Address(rdx, 0)); // *obj
- __ shrl (rax, 2); // offset
+ __ movptr(rax, Address(rsp, 3*wordSize)); // jfieldID
+ __ movptr(rdx, Address(rdx, 0)); // *obj
+ __ shrptr (rax, 2); // offset
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
speculative_load_pclist[count] = __ pc();
switch (type) {
- case T_BOOLEAN: __ movzxb (rax, Address(rdx, rax, Address::times_1)); break;
- case T_BYTE: __ movsxb (rax, Address(rdx, rax, Address::times_1)); break;
- case T_CHAR: __ movzxw (rax, Address(rdx, rax, Address::times_1)); break;
- case T_SHORT: __ movsxw (rax, Address(rdx, rax, Address::times_1)); break;
+ case T_BOOLEAN: __ movzbl (rax, Address(rdx, rax, Address::times_1)); break;
+ case T_BYTE: __ movsbl (rax, Address(rdx, rax, Address::times_1)); break;
+ case T_CHAR: __ movzwl (rax, Address(rdx, rax, Address::times_1)); break;
+ case T_SHORT: __ movswl (rax, Address(rdx, rax, Address::times_1)); break;
case T_INT: __ movl (rax, Address(rdx, rax, Address::times_1)); break;
default: ShouldNotReachHere();
}
@@ -98,8 +98,8 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
Address ca1;
if (os::is_MP()) {
__ lea(rdx, counter);
- __ xorl(rdx, rax);
- __ xorl(rdx, rax);
+ __ xorptr(rdx, rax);
+ __ xorptr(rdx, rax);
__ cmp32(rcx, Address(rdx, 0));
// ca1 is the same as ca because
// rax, ^ counter_addr ^ rax, = address
@@ -184,35 +184,37 @@ address JNI_FastGetField::generate_fast_get_long_field() {
ExternalAddress counter(SafepointSynchronize::safepoint_counter_addr());
- __ pushl (rsi);
+ __ push (rsi);
__ mov32 (rcx, counter);
__ testb (rcx, 1);
__ jcc (Assembler::notZero, slow);
if (os::is_MP()) {
- __ movl (rax, rcx);
- __ andl (rax, 1); // rax, must end up 0
- __ movl (rdx, Address(rsp, rax, Address::times_1, 3*wordSize));
+ __ mov(rax, rcx);
+ __ andptr(rax, 1); // rax, must end up 0
+ __ movptr(rdx, Address(rsp, rax, Address::times_1, 3*wordSize));
// obj, notice rax, is 0.
// rdx is data dependent on rcx.
} else {
- __ movl (rdx, Address(rsp, 3*wordSize)); // obj
+ __ movptr(rdx, Address(rsp, 3*wordSize)); // obj
}
- __ movl (rsi, Address(rsp, 4*wordSize)); // jfieldID
- __ movl (rdx, Address(rdx, 0)); // *obj
- __ shrl (rsi, 2); // offset
+ __ movptr(rsi, Address(rsp, 4*wordSize)); // jfieldID
+ __ movptr(rdx, Address(rdx, 0)); // *obj
+ __ shrptr(rsi, 2); // offset
assert(count < LIST_CAPACITY-1, "LIST_CAPACITY too small");
speculative_load_pclist[count++] = __ pc();
- __ movl (rax, Address(rdx, rsi, Address::times_1));
+ __ movptr(rax, Address(rdx, rsi, Address::times_1));
+#ifndef _LP64
speculative_load_pclist[count] = __ pc();
- __ movl (rdx, Address(rdx, rsi, Address::times_1, 4));
+ __ movl(rdx, Address(rdx, rsi, Address::times_1, 4));
+#endif // _LP64
if (os::is_MP()) {
- __ lea (rsi, counter);
- __ xorl (rsi, rdx);
- __ xorl (rsi, rax);
- __ xorl (rsi, rdx);
- __ xorl (rsi, rax);
+ __ lea(rsi, counter);
+ __ xorptr(rsi, rdx);
+ __ xorptr(rsi, rax);
+ __ xorptr(rsi, rdx);
+ __ xorptr(rsi, rax);
__ cmp32(rcx, Address(rsi, 0));
// ca1 is the same as ca because
// rax, ^ rdx ^ counter_addr ^ rax, ^ rdx = address
@@ -222,7 +224,7 @@ address JNI_FastGetField::generate_fast_get_long_field() {
}
__ jcc (Assembler::notEqual, slow);
- __ popl (rsi);
+ __ pop (rsi);
#ifndef _WINDOWS
__ ret (0);
@@ -234,7 +236,7 @@ address JNI_FastGetField::generate_fast_get_long_field() {
slowcase_entry_pclist[count-1] = __ pc();
slowcase_entry_pclist[count++] = __ pc();
__ bind (slow);
- __ popl (rsi);
+ __ pop (rsi);
address slow_case_addr = jni_GetLongField_addr();;
// tail call
__ jump (ExternalAddress(slow_case_addr));
@@ -276,23 +278,28 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
__ testb (rcx, 1);
__ jcc (Assembler::notZero, slow);
if (os::is_MP()) {
- __ movl (rax, rcx);
- __ andl (rax, 1); // rax, must end up 0
- __ movl (rdx, Address(rsp, rax, Address::times_1, 2*wordSize));
+ __ mov(rax, rcx);
+ __ andptr(rax, 1); // rax, must end up 0
+ __ movptr(rdx, Address(rsp, rax, Address::times_1, 2*wordSize));
// obj, notice rax, is 0.
// rdx is data dependent on rcx.
} else {
- __ movl (rdx, Address(rsp, 2*wordSize)); // obj
+ __ movptr(rdx, Address(rsp, 2*wordSize)); // obj
}
- __ movl (rax, Address(rsp, 3*wordSize)); // jfieldID
- __ movl (rdx, Address(rdx, 0)); // *obj
- __ shrl (rax, 2); // offset
+ __ movptr(rax, Address(rsp, 3*wordSize)); // jfieldID
+ __ movptr(rdx, Address(rdx, 0)); // *obj
+ __ shrptr(rax, 2); // offset
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
speculative_load_pclist[count] = __ pc();
switch (type) {
+#ifndef _LP64
case T_FLOAT: __ fld_s (Address(rdx, rax, Address::times_1)); break;
case T_DOUBLE: __ fld_d (Address(rdx, rax, Address::times_1)); break;
+#else
+ case T_FLOAT: __ movflt (xmm0, Address(robj, roffset, Address::times_1)); break;
+ case T_DOUBLE: __ movdbl (xmm0, Address(robj, roffset, Address::times_1)); break;
+#endif // _LP64
default: ShouldNotReachHere();
}
@@ -301,8 +308,9 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
__ fst_s (Address(rsp, -4));
__ lea(rdx, counter);
__ movl (rax, Address(rsp, -4));
- __ xorl(rdx, rax);
- __ xorl(rdx, rax);
+ // garbage hi-order bits on 64bit are harmless.
+ __ xorptr(rdx, rax);
+ __ xorptr(rdx, rax);
__ cmp32(rcx, Address(rdx, 0));
// rax, ^ counter_addr ^ rax, = address
// ca1 is data dependent on the field
diff --git a/src/cpu/x86/vm/jniFastGetField_x86_64.cpp b/src/cpu/x86/vm/jniFastGetField_x86_64.cpp
index 31160b66f..53204e70e 100644
--- a/src/cpu/x86/vm/jniFastGetField_x86_64.cpp
+++ b/src/cpu/x86/vm/jniFastGetField_x86_64.cpp
@@ -67,18 +67,18 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
ExternalAddress counter(SafepointSynchronize::safepoint_counter_addr());
__ mov32 (rcounter, counter);
- __ movq (robj, c_rarg1);
+ __ mov (robj, c_rarg1);
__ testb (rcounter, 1);
__ jcc (Assembler::notZero, slow);
if (os::is_MP()) {
- __ xorq (robj, rcounter);
- __ xorq (robj, rcounter); // obj, since
+ __ xorptr(robj, rcounter);
+ __ xorptr(robj, rcounter); // obj, since
// robj ^ rcounter ^ rcounter == robj
// robj is data dependent on rcounter.
}
- __ movq (robj, Address(robj, 0)); // *obj
- __ movq (roffset, c_rarg2);
- __ shrq (roffset, 2); // offset
+ __ movptr(robj, Address(robj, 0)); // *obj
+ __ mov (roffset, c_rarg2);
+ __ shrptr(roffset, 2); // offset
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
speculative_load_pclist[count] = __ pc();
@@ -95,8 +95,8 @@ address JNI_FastGetField::generate_fast_get_int_field0(BasicType type) {
if (os::is_MP()) {
__ lea(rcounter_addr, counter);
// ca is data dependent on rax.
- __ xorq (rcounter_addr, rax);
- __ xorq (rcounter_addr, rax);
+ __ xorptr(rcounter_addr, rax);
+ __ xorptr(rcounter_addr, rax);
__ cmpl (rcounter, Address(rcounter_addr, 0));
} else {
__ cmp32 (rcounter, counter);
@@ -165,18 +165,18 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
ExternalAddress counter(SafepointSynchronize::safepoint_counter_addr());
__ mov32 (rcounter, counter);
- __ movq (robj, c_rarg1);
+ __ mov (robj, c_rarg1);
__ testb (rcounter, 1);
__ jcc (Assembler::notZero, slow);
if (os::is_MP()) {
- __ xorq (robj, rcounter);
- __ xorq (robj, rcounter); // obj, since
+ __ xorptr(robj, rcounter);
+ __ xorptr(robj, rcounter); // obj, since
// robj ^ rcounter ^ rcounter == robj
// robj is data dependent on rcounter.
}
- __ movq (robj, Address(robj, 0)); // *obj
- __ movq (roffset, c_rarg2);
- __ shrq (roffset, 2); // offset
+ __ movptr(robj, Address(robj, 0)); // *obj
+ __ mov (roffset, c_rarg2);
+ __ shrptr(roffset, 2); // offset
assert(count < LIST_CAPACITY, "LIST_CAPACITY too small");
speculative_load_pclist[count] = __ pc();
@@ -190,8 +190,8 @@ address JNI_FastGetField::generate_fast_get_float_field0(BasicType type) {
__ lea(rcounter_addr, counter);
__ movdq (rax, xmm0);
// counter address is data dependent on xmm0.
- __ xorq (rcounter_addr, rax);
- __ xorq (rcounter_addr, rax);
+ __ xorptr(rcounter_addr, rax);
+ __ xorptr(rcounter_addr, rax);
__ cmpl (rcounter, Address(rcounter_addr, 0));
} else {
__ cmp32 (rcounter, counter);
diff --git a/src/cpu/x86/vm/nativeInst_x86.cpp b/src/cpu/x86/vm/nativeInst_x86.cpp
index f8147a4cc..b4a3ccea5 100644
--- a/src/cpu/x86/vm/nativeInst_x86.cpp
+++ b/src/cpu/x86/vm/nativeInst_x86.cpp
@@ -223,49 +223,150 @@ void NativeMovConstReg::print() {
//-------------------------------------------------------------------
-#ifndef AMD64
+int NativeMovRegMem::instruction_start() const {
+ int off = 0;
+ u_char instr_0 = ubyte_at(off);
+
+ // First check to see if we have a (prefixed or not) xor
+ if ( instr_0 >= instruction_prefix_wide_lo && // 0x40
+ instr_0 <= instruction_prefix_wide_hi) { // 0x4f
+ off++;
+ instr_0 = ubyte_at(off);
+ }
+
+ if (instr_0 == instruction_code_xor) {
+ off += 2;
+ instr_0 = ubyte_at(off);
+ }
+
+ // Now look for the real instruction and the many prefix/size specifiers.
-void NativeMovRegMem::copy_instruction_to(address new_instruction_address) {
- int inst_size = instruction_size;
+ if (instr_0 == instruction_operandsize_prefix ) { // 0x66
+ off++; // Not SSE instructions
+ instr_0 = ubyte_at(off);
+ }
+
+ if ( instr_0 == instruction_code_xmm_ss_prefix || // 0xf3
+ instr_0 == instruction_code_xmm_sd_prefix) { // 0xf2
+ off++;
+ instr_0 = ubyte_at(off);
+ }
- // See if there's an instruction size prefix override.
- if ( *(address(this)) == instruction_operandsize_prefix &&
- *(address(this)+1) != instruction_code_xmm_code ) { // Not SSE instr
- inst_size += 1;
+ if ( instr_0 >= instruction_prefix_wide_lo && // 0x40
+ instr_0 <= instruction_prefix_wide_hi) { // 0x4f
+ off++;
+ instr_0 = ubyte_at(off);
}
- if ( *(address(this)) == instruction_extended_prefix ) inst_size += 1;
- for (int i = 0; i < instruction_size; i++) {
- *(new_instruction_address + i) = *(address(this) + i);
+
+ if (instr_0 == instruction_extended_prefix ) { // 0x0f
+ off++;
}
+
+ return off;
+}
+
+address NativeMovRegMem::instruction_address() const {
+ return addr_at(instruction_start());
+}
+
+address NativeMovRegMem::next_instruction_address() const {
+ address ret = instruction_address() + instruction_size;
+ u_char instr_0 = *(u_char*) instruction_address();
+ switch (instr_0) {
+ case instruction_operandsize_prefix:
+
+ fatal("should have skipped instruction_operandsize_prefix");
+ break;
+
+ case instruction_extended_prefix:
+ fatal("should have skipped instruction_extended_prefix");
+ break;
+
+ case instruction_code_mem2reg_movslq: // 0x63
+ case instruction_code_mem2reg_movzxb: // 0xB6
+ case instruction_code_mem2reg_movsxb: // 0xBE
+ case instruction_code_mem2reg_movzxw: // 0xB7
+ case instruction_code_mem2reg_movsxw: // 0xBF
+ case instruction_code_reg2mem: // 0x89 (q/l)
+ case instruction_code_mem2reg: // 0x8B (q/l)
+ case instruction_code_reg2memb: // 0x88
+ case instruction_code_mem2regb: // 0x8a
+
+ case instruction_code_float_s: // 0xd9 fld_s a
+ case instruction_code_float_d: // 0xdd fld_d a
+
+ case instruction_code_xmm_load: // 0x10
+ case instruction_code_xmm_store: // 0x11
+ case instruction_code_xmm_lpd: // 0x12
+ {
+ // If there is an SIB then instruction is longer than expected
+ u_char mod_rm = *(u_char*)(instruction_address() + 1);
+ if ((mod_rm & 7) == 0x4) {
+ ret++;
+ }
+ }
+ case instruction_code_xor:
+ fatal("should have skipped xor lead in");
+ break;
+
+ default:
+ fatal("not a NativeMovRegMem");
+ }
+ return ret;
+
+}
+
+int NativeMovRegMem::offset() const{
+ int off = data_offset + instruction_start();
+ u_char mod_rm = *(u_char*)(instruction_address() + 1);
+ // nnnn(r12|rsp) isn't coded as simple mod/rm since that is
+ // the encoding to use an SIB byte. Which will have the nnnn
+ // field off by one byte
+ if ((mod_rm & 7) == 0x4) {
+ off++;
+ }
+ return int_at(off);
+}
+
+void NativeMovRegMem::set_offset(int x) {
+ int off = data_offset + instruction_start();
+ u_char mod_rm = *(u_char*)(instruction_address() + 1);
+ // nnnn(r12|rsp) isn't coded as simple mod/rm since that is
+ // the encoding to use an SIB byte. Which will have the nnnn
+ // field off by one byte
+ if ((mod_rm & 7) == 0x4) {
+ off++;
+ }
+ set_int_at(off, x);
}
void NativeMovRegMem::verify() {
// make sure code pattern is actually a mov [reg+offset], reg instruction
u_char test_byte = *(u_char*)instruction_address();
- if ( ! ( (test_byte == instruction_code_reg2memb)
- || (test_byte == instruction_code_mem2regb)
- || (test_byte == instruction_code_mem2regl)
- || (test_byte == instruction_code_reg2meml)
- || (test_byte == instruction_code_mem2reg_movzxb )
- || (test_byte == instruction_code_mem2reg_movzxw )
- || (test_byte == instruction_code_mem2reg_movsxb )
- || (test_byte == instruction_code_mem2reg_movsxw )
- || (test_byte == instruction_code_float_s)
- || (test_byte == instruction_code_float_d)
- || (test_byte == instruction_code_long_volatile) ) )
- {
- u_char byte1 = ((u_char*)instruction_address())[1];
- u_char byte2 = ((u_char*)instruction_address())[2];
- if ((test_byte != instruction_code_xmm_ss_prefix &&
- test_byte != instruction_code_xmm_sd_prefix &&
- test_byte != instruction_operandsize_prefix) ||
- byte1 != instruction_code_xmm_code ||
- (byte2 != instruction_code_xmm_load &&
- byte2 != instruction_code_xmm_lpd &&
- byte2 != instruction_code_xmm_store)) {
+ switch (test_byte) {
+ case instruction_code_reg2memb: // 0x88 movb a, r
+ case instruction_code_reg2mem: // 0x89 movl a, r (can be movq in 64bit)
+ case instruction_code_mem2regb: // 0x8a movb r, a
+ case instruction_code_mem2reg: // 0x8b movl r, a (can be movq in 64bit)
+ break;
+
+ case instruction_code_mem2reg_movslq: // 0x63 movsql r, a
+ case instruction_code_mem2reg_movzxb: // 0xb6 movzbl r, a (movzxb)
+ case instruction_code_mem2reg_movzxw: // 0xb7 movzwl r, a (movzxw)
+ case instruction_code_mem2reg_movsxb: // 0xbe movsbl r, a (movsxb)
+ case instruction_code_mem2reg_movsxw: // 0xbf movswl r, a (movsxw)
+ break;
+
+ case instruction_code_float_s: // 0xd9 fld_s a
+ case instruction_code_float_d: // 0xdd fld_d a
+ case instruction_code_xmm_load: // 0x10 movsd xmm, a
+ case instruction_code_xmm_store: // 0x11 movsd a, xmm
+ case instruction_code_xmm_lpd: // 0x12 movlpd xmm, a
+ break;
+
+ default:
fatal ("not a mov [reg+offs], reg instruction");
- }
}
}
@@ -279,7 +380,14 @@ void NativeMovRegMem::print() {
void NativeLoadAddress::verify() {
// make sure code pattern is actually a mov [reg+offset], reg instruction
u_char test_byte = *(u_char*)instruction_address();
- if ( ! (test_byte == instruction_code) ) {
+#ifdef _LP64
+ if ( (test_byte == instruction_prefix_wide ||
+ test_byte == instruction_prefix_wide_extended) ) {
+ test_byte = *(u_char*)(instruction_address() + 1);
+ }
+#endif // _LP64
+ if ( ! ((test_byte == lea_instruction_code)
+ LP64_ONLY(|| (test_byte == mov64_instruction_code) ))) {
fatal ("not a lea reg, [reg+offs] instruction");
}
}
@@ -289,8 +397,6 @@ void NativeLoadAddress::print() {
tty->print_cr("0x%x: lea [reg + %x], reg", instruction_address(), offset());
}
-#endif // !AMD64
-
//--------------------------------------------------------------------------------
void NativeJump::verify() {
diff --git a/src/cpu/x86/vm/nativeInst_x86.hpp b/src/cpu/x86/vm/nativeInst_x86.hpp
index 255e8210f..bd15cbb7b 100644
--- a/src/cpu/x86/vm/nativeInst_x86.hpp
+++ b/src/cpu/x86/vm/nativeInst_x86.hpp
@@ -235,16 +235,15 @@ class NativeMovConstRegPatching: public NativeMovConstReg {
}
};
-#ifndef AMD64
-
// An interface for accessing/manipulating native moves of the form:
-// mov[b/w/l] [reg + offset], reg (instruction_code_reg2mem)
-// mov[b/w/l] reg, [reg+offset] (instruction_code_mem2reg
-// mov[s/z]x[w/b] [reg + offset], reg
+// mov[b/w/l/q] [reg + offset], reg (instruction_code_reg2mem)
+// mov[b/w/l/q] reg, [reg+offset] (instruction_code_mem2reg
+// mov[s/z]x[w/b/q] [reg + offset], reg
// fld_s [reg+offset]
// fld_d [reg+offset]
// fstp_s [reg + offset]
// fstp_d [reg + offset]
+// mov_literal64 scratch,<pointer> ; mov[b/w/l/q] 0(scratch),reg | mov[b/w/l/q] reg,0(scratch)
//
// Warning: These routines must be able to handle any instruction sequences
// that are generated as a result of the load/store byte,word,long
@@ -255,15 +254,18 @@ class NativeMovConstRegPatching: public NativeMovConstReg {
class NativeMovRegMem: public NativeInstruction {
public:
enum Intel_specific_constants {
+ instruction_prefix_wide_lo = Assembler::REX,
+ instruction_prefix_wide_hi = Assembler::REX_WRXB,
instruction_code_xor = 0x33,
instruction_extended_prefix = 0x0F,
+ instruction_code_mem2reg_movslq = 0x63,
instruction_code_mem2reg_movzxb = 0xB6,
instruction_code_mem2reg_movsxb = 0xBE,
instruction_code_mem2reg_movzxw = 0xB7,
instruction_code_mem2reg_movsxw = 0xBF,
instruction_operandsize_prefix = 0x66,
- instruction_code_reg2meml = 0x89,
- instruction_code_mem2regl = 0x8b,
+ instruction_code_reg2mem = 0x89,
+ instruction_code_mem2reg = 0x8b,
instruction_code_reg2memb = 0x88,
instruction_code_mem2regb = 0x8a,
instruction_code_float_s = 0xd9,
@@ -282,73 +284,18 @@ class NativeMovRegMem: public NativeInstruction {
next_instruction_offset = 4
};
- address instruction_address() const {
- if (*addr_at(instruction_offset) == instruction_operandsize_prefix &&
- *addr_at(instruction_offset+1) != instruction_code_xmm_code) {
- return addr_at(instruction_offset+1); // Not SSE instructions
- }
- else if (*addr_at(instruction_offset) == instruction_extended_prefix) {
- return addr_at(instruction_offset+1);
- }
- else if (*addr_at(instruction_offset) == instruction_code_xor) {
- return addr_at(instruction_offset+2);
- }
- else return addr_at(instruction_offset);
- }
+ // helper
+ int instruction_start() const;
- address next_instruction_address() const {
- switch (*addr_at(instruction_offset)) {
- case instruction_operandsize_prefix:
- if (*addr_at(instruction_offset+1) == instruction_code_xmm_code)
- return instruction_address() + instruction_size; // SSE instructions
- case instruction_extended_prefix:
- return instruction_address() + instruction_size + 1;
- case instruction_code_reg2meml:
- case instruction_code_mem2regl:
- case instruction_code_reg2memb:
- case instruction_code_mem2regb:
- case instruction_code_xor:
- return instruction_address() + instruction_size + 2;
- default:
- return instruction_address() + instruction_size;
- }
- }
- int offset() const{
- if (*addr_at(instruction_offset) == instruction_operandsize_prefix &&
- *addr_at(instruction_offset+1) != instruction_code_xmm_code) {
- return int_at(data_offset+1); // Not SSE instructions
- }
- else if (*addr_at(instruction_offset) == instruction_extended_prefix) {
- return int_at(data_offset+1);
- }
- else if (*addr_at(instruction_offset) == instruction_code_xor ||
- *addr_at(instruction_offset) == instruction_code_xmm_ss_prefix ||
- *addr_at(instruction_offset) == instruction_code_xmm_sd_prefix ||
- *addr_at(instruction_offset) == instruction_operandsize_prefix) {
- return int_at(data_offset+2);
- }
- else return int_at(data_offset);
- }
+ address instruction_address() const;
- void set_offset(int x) {
- if (*addr_at(instruction_offset) == instruction_operandsize_prefix &&
- *addr_at(instruction_offset+1) != instruction_code_xmm_code) {
- set_int_at(data_offset+1, x); // Not SSE instructions
- }
- else if (*addr_at(instruction_offset) == instruction_extended_prefix) {
- set_int_at(data_offset+1, x);
- }
- else if (*addr_at(instruction_offset) == instruction_code_xor ||
- *addr_at(instruction_offset) == instruction_code_xmm_ss_prefix ||
- *addr_at(instruction_offset) == instruction_code_xmm_sd_prefix ||
- *addr_at(instruction_offset) == instruction_operandsize_prefix) {
- set_int_at(data_offset+2, x);
- }
- else set_int_at(data_offset, x);
- }
+ address next_instruction_address() const;
+
+ int offset() const;
+
+ void set_offset(int x);
void add_offset_in_bytes(int add_offset) { set_offset ( ( offset() + add_offset ) ); }
- void copy_instruction_to(address new_instruction_address);
void verify();
void print ();
@@ -385,9 +332,19 @@ class NativeMovRegMemPatching: public NativeMovRegMem {
// leal reg, [reg + offset]
class NativeLoadAddress: public NativeMovRegMem {
+#ifdef AMD64
+ static const bool has_rex = true;
+ static const int rex_size = 1;
+#else
+ static const bool has_rex = false;
+ static const int rex_size = 0;
+#endif // AMD64
public:
enum Intel_specific_constants {
- instruction_code = 0x8D
+ instruction_prefix_wide = Assembler::REX_W,
+ instruction_prefix_wide_extended = Assembler::REX_WB,
+ lea_instruction_code = 0x8D,
+ mov64_instruction_code = 0xB8
};
void verify();
@@ -406,8 +363,6 @@ class NativeLoadAddress: public NativeMovRegMem {
}
};
-#endif // AMD64
-
// jump rel32off
class NativeJump: public NativeInstruction {
@@ -424,22 +379,23 @@ class NativeJump: public NativeInstruction {
address next_instruction_address() const { return addr_at(next_instruction_offset); }
address jump_destination() const {
address dest = (int_at(data_offset)+next_instruction_address());
-#ifdef AMD64 // What is this about?
+ // 32bit used to encode unresolved jmp as jmp -1
+ // 64bit can't produce this so it used jump to self.
+ // Now 32bit and 64bit use jump to self as the unresolved address
+ // which the inline cache code (and relocs) know about
+
// return -1 if jump to self
dest = (dest == (address) this) ? (address) -1 : dest;
-#endif // AMD64
return dest;
}
void set_jump_destination(address dest) {
intptr_t val = dest - next_instruction_address();
-#ifdef AMD64
- if (dest == (address) -1) { // can't encode jump to -1
+ if (dest == (address) -1) {
val = -5; // jump to self
- } else {
- assert((labs(val) & 0xFFFFFFFF00000000) == 0,
- "must be 32bit offset");
}
+#ifdef AMD64
+ assert((labs(val) & 0xFFFFFFFF00000000) == 0 || dest == (address)-1, "must be 32bit offset or -1");
#endif // AMD64
set_int_at(data_offset, (jint)val);
}
@@ -568,11 +524,15 @@ inline bool NativeInstruction::is_cond_jump() { return (int_at(0) & 0xF0FF) =
(ubyte_at(0) & 0xF0) == 0x70; /* short jump */ }
inline bool NativeInstruction::is_safepoint_poll() {
#ifdef AMD64
- return ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
- ubyte_at(1) == 0x05 && // 00 rax 101
- ((intptr_t) addr_at(6)) + int_at(2) == (intptr_t) os::get_polling_page();
+ if ( ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl &&
+ ubyte_at(1) == 0x05 ) { // 00 rax 101
+ address fault = addr_at(6) + int_at(2);
+ return os::is_poll_address(fault);
+ } else {
+ return false;
+ }
#else
- return ( ubyte_at(0) == NativeMovRegMem::instruction_code_mem2regl ||
+ return ( ubyte_at(0) == NativeMovRegMem::instruction_code_mem2reg ||
ubyte_at(0) == NativeTstRegMem::instruction_code_memXregl ) &&
(ubyte_at(1)&0xC7) == 0x05 && /* Mod R/M == disp32 */
(os::is_poll_address((address)int_at(2)));
diff --git a/src/cpu/x86/vm/relocInfo_x86.cpp b/src/cpu/x86/vm/relocInfo_x86.cpp
index 055c23714..4f49d14bb 100644
--- a/src/cpu/x86/vm/relocInfo_x86.cpp
+++ b/src/cpu/x86/vm/relocInfo_x86.cpp
@@ -30,11 +30,11 @@ void Relocation::pd_set_data_value(address x, intptr_t o) {
#ifdef AMD64
x += o;
typedef Assembler::WhichOperand WhichOperand;
- WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm64, call32, narrow oop
+ WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm, call32, narrow oop
assert(which == Assembler::disp32_operand ||
which == Assembler::narrow_oop_operand ||
- which == Assembler::imm64_operand, "format unpacks ok");
- if (which == Assembler::imm64_operand) {
+ which == Assembler::imm_operand, "format unpacks ok");
+ if (which == Assembler::imm_operand) {
*pd_address_in_code() = x;
} else if (which == Assembler::narrow_oop_operand) {
address disp = Assembler::locate_operand(addr(), which);
@@ -81,11 +81,16 @@ void Relocation::pd_set_call_destination(address x) {
nativeCall_at(addr())->set_destination(x);
} else if (ni->is_jump()) {
NativeJump* nj = nativeJump_at(addr());
-#ifdef AMD64
+
+ // Unresolved jumps are recognized by a destination of -1
+ // However 64bit can't actually produce such an address
+ // and encodes a jump to self but jump_destination will
+ // return a -1 as the signal. We must not relocate this
+ // jmp or the ic code will not see it as unresolved.
+
if (nj->jump_destination() == (address) -1) {
- x = (address) -1; // retain jump to self
+ x = addr(); // jump to self
}
-#endif // AMD64
nj->set_jump_destination(x);
} else if (ni->is_cond_jump()) {
// %%%% kludge this, for now, until we get a jump_destination method
@@ -106,19 +111,19 @@ address* Relocation::pd_address_in_code() {
// we must parse the instruction a bit to find the embedded word.
assert(is_data(), "must be a DataRelocation");
typedef Assembler::WhichOperand WhichOperand;
- WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm64/imm32
+ WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm/imm32
#ifdef AMD64
assert(which == Assembler::disp32_operand ||
which == Assembler::call32_operand ||
- which == Assembler::imm64_operand, "format unpacks ok");
- if (which != Assembler::imm64_operand) {
+ which == Assembler::imm_operand, "format unpacks ok");
+ if (which != Assembler::imm_operand) {
// The "address" in the code is a displacement can't return it as
// and address* since it is really a jint*
ShouldNotReachHere();
return NULL;
}
#else
- assert(which == Assembler::disp32_operand || which == Assembler::imm32_operand, "format unpacks ok");
+ assert(which == Assembler::disp32_operand || which == Assembler::imm_operand, "format unpacks ok");
#endif // AMD64
return (address*) Assembler::locate_operand(addr(), which);
}
@@ -131,11 +136,11 @@ address Relocation::pd_get_address_from_code() {
// we must parse the instruction a bit to find the embedded word.
assert(is_data(), "must be a DataRelocation");
typedef Assembler::WhichOperand WhichOperand;
- WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm64/imm32
+ WhichOperand which = (WhichOperand) format(); // that is, disp32 or imm/imm32
assert(which == Assembler::disp32_operand ||
which == Assembler::call32_operand ||
- which == Assembler::imm64_operand, "format unpacks ok");
- if (which != Assembler::imm64_operand) {
+ which == Assembler::imm_operand, "format unpacks ok");
+ if (which != Assembler::imm_operand) {
address ip = addr();
address disp = Assembler::locate_operand(ip, which);
address next_ip = Assembler::locate_next_instruction(ip);
@@ -169,3 +174,44 @@ void Relocation::pd_swap_out_breakpoint(address x, short* instrs, int instrlen)
NativeInstruction* ni = nativeInstruction_at(x);
*(short*)ni->addr_at(0) = instrs[0];
}
+
+void poll_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
+#ifdef _LP64
+ typedef Assembler::WhichOperand WhichOperand;
+ WhichOperand which = (WhichOperand) format();
+ // This format is imm but it is really disp32
+ which = Assembler::disp32_operand;
+ address orig_addr = old_addr_for(addr(), src, dest);
+ NativeInstruction* oni = nativeInstruction_at(orig_addr);
+ int32_t* orig_disp = (int32_t*) Assembler::locate_operand(orig_addr, which);
+ // This poll_addr is incorrect by the size of the instruction it is irrelevant
+ intptr_t poll_addr = (intptr_t)oni + *orig_disp;
+
+ NativeInstruction* ni = nativeInstruction_at(addr());
+ intptr_t new_disp = poll_addr - (intptr_t) ni;
+
+ int32_t* disp = (int32_t*) Assembler::locate_operand(addr(), which);
+ * disp = (int32_t)new_disp;
+
+#endif // _LP64
+}
+
+void poll_return_Relocation::fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest) {
+#ifdef _LP64
+ typedef Assembler::WhichOperand WhichOperand;
+ WhichOperand which = (WhichOperand) format();
+ // This format is imm but it is really disp32
+ which = Assembler::disp32_operand;
+ address orig_addr = old_addr_for(addr(), src, dest);
+ NativeInstruction* oni = nativeInstruction_at(orig_addr);
+ int32_t* orig_disp = (int32_t*) Assembler::locate_operand(orig_addr, which);
+ // This poll_addr is incorrect by the size of the instruction it is irrelevant
+ intptr_t poll_addr = (intptr_t)oni + *orig_disp;
+
+ NativeInstruction* ni = nativeInstruction_at(addr());
+ intptr_t new_disp = poll_addr - (intptr_t) ni;
+
+ int32_t* disp = (int32_t*) Assembler::locate_operand(addr(), which);
+ * disp = (int32_t)new_disp;
+#endif // _LP64
+}
diff --git a/src/cpu/x86/vm/runtime_x86_32.cpp b/src/cpu/x86/vm/runtime_x86_32.cpp
index d8d190936..108bbee13 100644
--- a/src/cpu/x86/vm/runtime_x86_32.cpp
+++ b/src/cpu/x86/vm/runtime_x86_32.cpp
@@ -78,18 +78,18 @@ void OptoRuntime::generate_exception_blob() {
address start = __ pc();
- __ pushl(rdx);
- __ subl(rsp, return_off * wordSize); // Prolog!
+ __ push(rdx);
+ __ subptr(rsp, return_off * wordSize); // Prolog!
// rbp, location is implicitly known
- __ movl(Address(rsp,rbp_off *wordSize),rbp);
+ __ movptr(Address(rsp,rbp_off *wordSize), rbp);
// Store exception in Thread object. We cannot pass any arguments to the
// handle_exception call, since we do not want to make any assumption
// about the size of the frame where the exception happened in.
__ get_thread(rcx);
- __ movl(Address(rcx, JavaThread::exception_oop_offset()), rax);
- __ movl(Address(rcx, JavaThread::exception_pc_offset()), rdx);
+ __ movptr(Address(rcx, JavaThread::exception_oop_offset()), rax);
+ __ movptr(Address(rcx, JavaThread::exception_pc_offset()), rdx);
// This call does all the hard work. It checks if an exception handler
// exists in the method.
@@ -97,7 +97,7 @@ void OptoRuntime::generate_exception_blob() {
// If not, it prepares for stack-unwinding, restoring the callee-save
// registers of the frame being removed.
//
- __ movl(Address(rsp, thread_off * wordSize), rcx); // Thread is first argument
+ __ movptr(Address(rsp, thread_off * wordSize), rcx); // Thread is first argument
__ set_last_Java_frame(rcx, noreg, noreg, NULL);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
@@ -108,10 +108,10 @@ void OptoRuntime::generate_exception_blob() {
__ reset_last_Java_frame(rcx, false, false);
// Restore callee-saved registers
- __ movl(rbp, Address(rsp, rbp_off * wordSize));
+ __ movptr(rbp, Address(rsp, rbp_off * wordSize));
- __ addl(rsp, return_off * wordSize); // Epilog!
- __ popl(rdx); // Exception pc
+ __ addptr(rsp, return_off * wordSize); // Epilog!
+ __ pop(rdx); // Exception pc
// rax,: exception handler for given <exception oop/exception pc>
@@ -119,23 +119,23 @@ void OptoRuntime::generate_exception_blob() {
// We have a handler in rax, (could be deopt blob)
// rdx - throwing pc, deopt blob will need it.
- __ pushl(rax);
+ __ push(rax);
// rcx contains handler address
__ get_thread(rcx); // TLS
// Get the exception
- __ movl(rax, Address(rcx, JavaThread::exception_oop_offset()));
+ __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
// Get the exception pc in case we are deoptimized
- __ movl(rdx, Address(rcx, JavaThread::exception_pc_offset()));
+ __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
#ifdef ASSERT
- __ movl(Address(rcx, JavaThread::exception_handler_pc_offset()), 0);
- __ movl(Address(rcx, JavaThread::exception_pc_offset()), 0);
+ __ movptr(Address(rcx, JavaThread::exception_handler_pc_offset()), (int32_t)NULL_WORD);
+ __ movptr(Address(rcx, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
#endif
// Clear the exception oop so GC no longer processes it as a root.
- __ movl(Address(rcx, JavaThread::exception_oop_offset()), 0);
+ __ movptr(Address(rcx, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
- __ popl(rcx);
+ __ pop(rcx);
// rax,: exception oop
// rcx: exception handler
diff --git a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
index 85befcf4e..7c0aa6bf5 100644
--- a/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
+++ b/src/cpu/x86/vm/sharedRuntime_x86_32.cpp
@@ -118,12 +118,12 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
// save registers, fpu state, and flags
// We assume caller has already has return address slot on the stack
// We push epb twice in this sequence because we want the real rbp,
- // to be under the return like a normal enter and we want to use pushad
+ // to be under the return like a normal enter and we want to use pusha
// We push by hand instead of pusing push
__ enter();
- __ pushad();
- __ pushfd();
- __ subl(rsp,FPU_regs_live*sizeof(jdouble)); // Push FPU registers space
+ __ pusha();
+ __ pushf();
+ __ subptr(rsp,FPU_regs_live*sizeof(jdouble)); // Push FPU registers space
__ push_FPU_state(); // Save FPU state & init
if (verify_fpu) {
@@ -270,12 +270,12 @@ void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
__ movdbl(xmm7,Address(rsp,xmm7_off*wordSize));
}
__ pop_FPU_state();
- __ addl(rsp,FPU_regs_live*sizeof(jdouble)); // Pop FPU registers
+ __ addptr(rsp, FPU_regs_live*sizeof(jdouble)); // Pop FPU registers
- __ popfd();
- __ popad();
+ __ popf();
+ __ popa();
// Get the rbp, described implicitly by the frame sender code (no oopMap)
- __ popl(rbp);
+ __ pop(rbp);
}
@@ -296,10 +296,10 @@ void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
} else if( UseSSE >= 2 ) {
__ movdbl(xmm0, Address(rsp, xmm0_off*wordSize));
}
- __ movl(rax, Address(rsp, rax_off*wordSize));
- __ movl(rdx, Address(rsp, rdx_off*wordSize));
+ __ movptr(rax, Address(rsp, rax_off*wordSize));
+ __ movptr(rdx, Address(rsp, rdx_off*wordSize));
// Pop all of the register save are off the stack except the return address
- __ addl(rsp, return_off * wordSize);
+ __ addptr(rsp, return_off * wordSize);
}
// The java_calling_convention describes stack locations as ideal slots on
@@ -448,22 +448,22 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
static void patch_callers_callsite(MacroAssembler *masm) {
Label L;
__ verify_oop(rbx);
- __ cmpl(Address(rbx, in_bytes(methodOopDesc::code_offset())), NULL_WORD);
+ __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L);
// Schedule the branch target address early.
// Call into the VM to patch the caller, then jump to compiled callee
// rax, isn't live so capture return address while we easily can
- __ movl(rax, Address(rsp, 0));
- __ pushad();
- __ pushfd();
+ __ movptr(rax, Address(rsp, 0));
+ __ pusha();
+ __ pushf();
if (UseSSE == 1) {
- __ subl(rsp, 2*wordSize);
+ __ subptr(rsp, 2*wordSize);
__ movflt(Address(rsp, 0), xmm0);
__ movflt(Address(rsp, wordSize), xmm1);
}
if (UseSSE >= 2) {
- __ subl(rsp, 4*wordSize);
+ __ subptr(rsp, 4*wordSize);
__ movdbl(Address(rsp, 0), xmm0);
__ movdbl(Address(rsp, 2*wordSize), xmm1);
}
@@ -477,26 +477,26 @@ static void patch_callers_callsite(MacroAssembler *masm) {
#endif /* COMPILER2 */
// VM needs caller's callsite
- __ pushl(rax);
+ __ push(rax);
// VM needs target method
- __ pushl(rbx);
+ __ push(rbx);
__ verify_oop(rbx);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
- __ addl(rsp, 2*wordSize);
+ __ addptr(rsp, 2*wordSize);
if (UseSSE == 1) {
__ movflt(xmm0, Address(rsp, 0));
__ movflt(xmm1, Address(rsp, wordSize));
- __ addl(rsp, 2*wordSize);
+ __ addptr(rsp, 2*wordSize);
}
if (UseSSE >= 2) {
__ movdbl(xmm0, Address(rsp, 0));
__ movdbl(xmm1, Address(rsp, 2*wordSize));
- __ addl(rsp, 4*wordSize);
+ __ addptr(rsp, 4*wordSize);
}
- __ popfd();
- __ popad();
+ __ popf();
+ __ popa();
__ bind(L);
}
@@ -506,13 +506,13 @@ static void tag_stack(MacroAssembler *masm, const BasicType sig, int st_off) {
if (TaggedStackInterpreter) {
int tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(0);
if (sig == T_OBJECT || sig == T_ARRAY) {
- __ movl(Address(rsp, tag_offset), frame::TagReference);
+ __ movptr(Address(rsp, tag_offset), frame::TagReference);
} else if (sig == T_LONG || sig == T_DOUBLE) {
int next_tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(1);
- __ movl(Address(rsp, next_tag_offset), frame::TagValue);
- __ movl(Address(rsp, tag_offset), frame::TagValue);
+ __ movptr(Address(rsp, next_tag_offset), frame::TagValue);
+ __ movptr(Address(rsp, tag_offset), frame::TagValue);
} else {
- __ movl(Address(rsp, tag_offset), frame::TagValue);
+ __ movptr(Address(rsp, tag_offset), frame::TagValue);
}
}
}
@@ -561,12 +561,12 @@ static void gen_c2i_adapter(MacroAssembler *masm,
int extraspace = total_args_passed * Interpreter::stackElementSize();
// Get return address
- __ popl(rax);
+ __ pop(rax);
// set senderSP value
- __ movl(rsi, rsp);
+ __ movptr(rsi, rsp);
- __ subl(rsp, extraspace);
+ __ subptr(rsp, extraspace);
// Now write the args into the outgoing interpreter space
for (int i = 0; i < total_args_passed; i++) {
@@ -577,6 +577,8 @@ static void gen_c2i_adapter(MacroAssembler *masm,
// st_off points to lowest address on stack.
int st_off = ((total_args_passed - 1) - i) * Interpreter::stackElementSize();
+ int next_off = st_off - Interpreter::stackElementSize();
+
// Say 4 args:
// i st_off
// 0 12 T_LONG
@@ -596,18 +598,25 @@ static void gen_c2i_adapter(MacroAssembler *masm,
if (!r_2->is_valid()) {
__ movl(rdi, Address(rsp, ld_off));
- __ movl(Address(rsp, st_off), rdi);
+ __ movptr(Address(rsp, st_off), rdi);
tag_stack(masm, sig_bt[i], st_off);
} else {
// ld_off == LSW, ld_off+VMRegImpl::stack_slot_size == MSW
// st_off == MSW, st_off-wordSize == LSW
- int next_off = st_off - Interpreter::stackElementSize();
- __ movl(rdi, Address(rsp, ld_off));
- __ movl(Address(rsp, next_off), rdi);
- __ movl(rdi, Address(rsp, ld_off + wordSize));
- __ movl(Address(rsp, st_off), rdi);
+ __ movptr(rdi, Address(rsp, ld_off));
+ __ movptr(Address(rsp, next_off), rdi);
+#ifndef _LP64
+ __ movptr(rdi, Address(rsp, ld_off + wordSize));
+ __ movptr(Address(rsp, st_off), rdi);
+#else
+#ifdef ASSERT
+ // Overwrite the unused slot with known junk
+ __ mov64(rax, CONST64(0xdeadffffdeadaaaa));
+ __ movptr(Address(rsp, st_off), rax);
+#endif /* ASSERT */
+#endif // _LP64
tag_stack(masm, sig_bt[i], next_off);
}
} else if (r_1->is_Register()) {
@@ -617,7 +626,22 @@ static void gen_c2i_adapter(MacroAssembler *masm,
tag_stack(masm, sig_bt[i], st_off);
} else {
// long/double in gpr
- ShouldNotReachHere();
+ NOT_LP64(ShouldNotReachHere());
+ // Two VMRegs can be T_OBJECT, T_ADDRESS, T_DOUBLE, T_LONG
+ // T_DOUBLE and T_LONG use two slots in the interpreter
+ if ( sig_bt[i] == T_LONG || sig_bt[i] == T_DOUBLE) {
+ // long/double in gpr
+#ifdef ASSERT
+ // Overwrite the unused slot with known junk
+ LP64_ONLY(__ mov64(rax, CONST64(0xdeadffffdeadaaab)));
+ __ movptr(Address(rsp, st_off), rax);
+#endif /* ASSERT */
+ __ movptr(Address(rsp, next_off), r);
+ tag_stack(masm, sig_bt[i], next_off);
+ } else {
+ __ movptr(Address(rsp, st_off), r);
+ tag_stack(masm, sig_bt[i], st_off);
+ }
}
} else {
assert(r_1->is_XMMRegister(), "");
@@ -632,9 +656,9 @@ static void gen_c2i_adapter(MacroAssembler *masm,
}
// Schedule the branch target address early.
- __ movl(rcx, Address(rbx, in_bytes(methodOopDesc::interpreter_entry_offset())));
+ __ movptr(rcx, Address(rbx, in_bytes(methodOopDesc::interpreter_entry_offset())));
// And repush original return address
- __ pushl(rax);
+ __ push(rax);
__ jmp(rcx);
}
@@ -645,11 +669,11 @@ static void move_i2c_double(MacroAssembler *masm, XMMRegister r, Register saved_
int next_val_off = ld_off - Interpreter::stackElementSize();
if (TaggedStackInterpreter) {
// use tag slot temporarily for MSW
- __ movl(rsi, Address(saved_sp, ld_off));
- __ movl(Address(saved_sp, next_val_off+wordSize), rsi);
+ __ movptr(rsi, Address(saved_sp, ld_off));
+ __ movptr(Address(saved_sp, next_val_off+wordSize), rsi);
__ movdbl(r, Address(saved_sp, next_val_off));
// restore tag
- __ movl(Address(saved_sp, next_val_off+wordSize), frame::TagValue);
+ __ movptr(Address(saved_sp, next_val_off+wordSize), frame::TagValue);
} else {
__ movdbl(r, Address(saved_sp, next_val_off));
}
@@ -685,7 +709,7 @@ static void gen_i2c_adapter(MacroAssembler *masm,
// code goes non-entrant while we get args ready.
// Pick up the return address
- __ movl(rax, Address(rsp, 0));
+ __ movptr(rax, Address(rsp, 0));
// If UseSSE >= 2 then no cleanup is needed on the return to the
// interpreter so skip fixing up the return entry point unless
@@ -696,10 +720,10 @@ static void gen_i2c_adapter(MacroAssembler *masm,
// cleanup than if the interpreter returned to the call stub.
ExternalAddress stub_return_address(StubRoutines::_call_stub_return_address);
- __ cmp32(rax, stub_return_address.addr());
+ __ cmpptr(rax, stub_return_address.addr());
__ jcc(Assembler::notEqual, chk_int);
- assert(StubRoutines::i486::get_call_stub_compiled_return() != NULL, "must be set");
- __ lea(rax, ExternalAddress(StubRoutines::i486::get_call_stub_compiled_return()));
+ assert(StubRoutines::x86::get_call_stub_compiled_return() != NULL, "must be set");
+ __ lea(rax, ExternalAddress(StubRoutines::x86::get_call_stub_compiled_return()));
__ jmp(skip);
// It must be the interpreter since we never get here via a c2i (unlike Azul)
@@ -708,13 +732,13 @@ static void gen_i2c_adapter(MacroAssembler *masm,
#ifdef ASSERT
{
Label ok;
- __ cmpl(Address(rax, -8), Interpreter::return_sentinel);
+ __ cmpl(Address(rax, -2*wordSize), Interpreter::return_sentinel);
__ jcc(Assembler::equal, ok);
__ int3();
__ bind(ok);
}
#endif // ASSERT
- __ movl(rax, Address(rax, -4));
+ __ movptr(rax, Address(rax, -wordSize));
__ bind(skip);
}
@@ -723,7 +747,7 @@ static void gen_i2c_adapter(MacroAssembler *masm,
// Must preserve original SP for loading incoming arguments because
// we need to align the outgoing SP for compiled code.
- __ movl(rdi, rsp);
+ __ movptr(rdi, rsp);
// Cut-out for having no stack args. Since up to 2 int/oop args are passed
// in registers, we will occasionally have no stack args.
@@ -737,24 +761,24 @@ static void gen_i2c_adapter(MacroAssembler *masm,
comp_words_on_stack = round_to(comp_args_on_stack*4, wordSize)>>LogBytesPerWord;
// Round up to miminum stack alignment, in wordSize
comp_words_on_stack = round_to(comp_words_on_stack, 2);
- __ subl(rsp, comp_words_on_stack * wordSize);
+ __ subptr(rsp, comp_words_on_stack * wordSize);
}
// Align the outgoing SP
- __ andl(rsp, -(StackAlignmentInBytes));
+ __ andptr(rsp, -(StackAlignmentInBytes));
// push the return address on the stack (note that pushing, rather
// than storing it, yields the correct frame alignment for the callee)
- __ pushl(rax);
+ __ push(rax);
// Put saved SP in another register
const Register saved_sp = rax;
- __ movl(saved_sp, rdi);
+ __ movptr(saved_sp, rdi);
// Will jump to the compiled code just as if compiled code was doing it.
// Pre-load the register-jump target early, to schedule it better.
- __ movl(rdi, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset())));
+ __ movptr(rdi, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset())));
// Now generate the shuffle code. Pick up all register args and move the
// rest through the floating point stack top.
@@ -794,7 +818,7 @@ static void gen_i2c_adapter(MacroAssembler *masm,
// __ fld_s(Address(saved_sp, ld_off));
// __ fstp_s(Address(rsp, st_off));
__ movl(rsi, Address(saved_sp, ld_off));
- __ movl(Address(rsp, st_off), rsi);
+ __ movptr(Address(rsp, st_off), rsi);
} else {
// Interpreter local[n] == MSW, local[n+1] == LSW however locals
// are accessed as negative so LSW is at LOW address
@@ -803,20 +827,44 @@ static void gen_i2c_adapter(MacroAssembler *masm,
// st_off is LSW (i.e. reg.first())
// __ fld_d(Address(saved_sp, next_off));
// __ fstp_d(Address(rsp, st_off));
- __ movl(rsi, Address(saved_sp, next_off));
- __ movl(Address(rsp, st_off), rsi);
- __ movl(rsi, Address(saved_sp, ld_off));
- __ movl(Address(rsp, st_off + wordSize), rsi);
+ //
+ // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
+ // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
+ // So we must adjust where to pick up the data to match the interpreter.
+ //
+ // Interpreter local[n] == MSW, local[n+1] == LSW however locals
+ // are accessed as negative so LSW is at LOW address
+
+ // ld_off is MSW so get LSW
+ const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
+ next_off : ld_off;
+ __ movptr(rsi, Address(saved_sp, offset));
+ __ movptr(Address(rsp, st_off), rsi);
+#ifndef _LP64
+ __ movptr(rsi, Address(saved_sp, ld_off));
+ __ movptr(Address(rsp, st_off + wordSize), rsi);
+#endif // _LP64
}
} else if (r_1->is_Register()) { // Register argument
Register r = r_1->as_Register();
assert(r != rax, "must be different");
if (r_2->is_valid()) {
+ //
+ // We are using two VMRegs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
+ // the interpreter allocates two slots but only uses one for thr T_LONG or T_DOUBLE case
+ // So we must adjust where to pick up the data to match the interpreter.
+
+ const int offset = (NOT_LP64(true ||) sig_bt[i]==T_LONG||sig_bt[i]==T_DOUBLE)?
+ next_off : ld_off;
+
+ // this can be a misaligned move
+ __ movptr(r, Address(saved_sp, offset));
+#ifndef _LP64
assert(r_2->as_Register() != rax, "need another temporary register");
// Remember r_1 is low address (and LSB on x86)
// So r_2 gets loaded from high address regardless of the platform
- __ movl(r_2->as_Register(), Address(saved_sp, ld_off));
- __ movl(r, Address(saved_sp, next_off));
+ __ movptr(r_2->as_Register(), Address(saved_sp, ld_off));
+#endif // _LP64
} else {
__ movl(r, Address(saved_sp, ld_off));
}
@@ -841,13 +889,13 @@ static void gen_i2c_adapter(MacroAssembler *masm,
// and the vm will find there should this case occur.
__ get_thread(rax);
- __ movl(Address(rax, JavaThread::callee_target_offset()), rbx);
+ __ movptr(Address(rax, JavaThread::callee_target_offset()), rbx);
// move methodOop to rax, in case we end up in an c2i adapter.
// the c2i adapters expect methodOop in rax, (c2) because c2's
// resolve stubs return the result (the method) in rax,.
// I'd love to fix this.
- __ movl(rax, rbx);
+ __ mov(rax, rbx);
__ jmp(rdi);
}
@@ -883,16 +931,16 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
Label missed;
__ verify_oop(holder);
- __ movl(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
+ __ movptr(temp, Address(receiver, oopDesc::klass_offset_in_bytes()));
__ verify_oop(temp);
- __ cmpl(temp, Address(holder, compiledICHolderOopDesc::holder_klass_offset()));
- __ movl(rbx, Address(holder, compiledICHolderOopDesc::holder_method_offset()));
+ __ cmpptr(temp, Address(holder, compiledICHolderOopDesc::holder_klass_offset()));
+ __ movptr(rbx, Address(holder, compiledICHolderOopDesc::holder_method_offset()));
__ jcc(Assembler::notEqual, missed);
// Method might have been compiled since the call site was patched to
// interpreted if that is the case treat it as a miss so we can get
// the call site corrected.
- __ cmpl(Address(rbx, in_bytes(methodOopDesc::code_offset())), NULL_WORD);
+ __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, skip_fixup);
__ bind(missed);
@@ -953,17 +1001,20 @@ static void simple_move32(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
// stack to stack
// __ ld(FP, reg2offset(src.first()) + STACK_BIAS, L5);
// __ st(L5, SP, reg2offset(dst.first()) + STACK_BIAS);
- __ movl(rax, Address(rbp, reg2offset_in(src.first())));
- __ movl(Address(rsp, reg2offset_out(dst.first())), rax);
+ __ movl2ptr(rax, Address(rbp, reg2offset_in(src.first())));
+ __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
} else {
// stack to reg
- __ movl(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
+ __ movl2ptr(dst.first()->as_Register(), Address(rbp, reg2offset_in(src.first())));
}
} else if (dst.first()->is_stack()) {
// reg to stack
- __ movl(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
+ // no need to sign extend on 64bit
+ __ movptr(Address(rsp, reg2offset_out(dst.first())), src.first()->as_Register());
} else {
- __ movl(dst.first()->as_Register(), src.first()->as_Register());
+ if (dst.first() != src.first()) {
+ __ mov(dst.first()->as_Register(), src.first()->as_Register());
+ }
}
}
@@ -987,12 +1038,12 @@ static void object_move(MacroAssembler* masm,
// Oop is already on the stack as an argument
Register rHandle = rax;
Label nil;
- __ xorl(rHandle, rHandle);
- __ cmpl(Address(rbp, reg2offset_in(src.first())), NULL_WORD);
+ __ xorptr(rHandle, rHandle);
+ __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, nil);
- __ leal(rHandle, Address(rbp, reg2offset_in(src.first())));
+ __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
__ bind(nil);
- __ movl(Address(rsp, reg2offset_out(dst.first())), rHandle);
+ __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
int offset_in_older_frame = src.first()->reg2stack() + SharedRuntime::out_preserve_stack_slots();
map->set_oop(VMRegImpl::stack2reg(offset_in_older_frame + framesize_in_slots));
@@ -1007,15 +1058,15 @@ static void object_move(MacroAssembler* masm,
int oop_slot = (rOop == rcx ? 0 : 1) * VMRegImpl::slots_per_word + oop_handle_offset;
int offset = oop_slot*VMRegImpl::stack_slot_size;
Label skip;
- __ movl(Address(rsp, offset), rOop);
+ __ movptr(Address(rsp, offset), rOop);
map->set_oop(VMRegImpl::stack2reg(oop_slot));
- __ xorl(rHandle, rHandle);
- __ cmpl(rOop, NULL_WORD);
+ __ xorptr(rHandle, rHandle);
+ __ cmpptr(rOop, (int32_t)NULL_WORD);
__ jcc(Assembler::equal, skip);
- __ leal(rHandle, Address(rsp, offset));
+ __ lea(rHandle, Address(rsp, offset));
__ bind(skip);
// Store the handle parameter
- __ movl(Address(rsp, reg2offset_out(dst.first())), rHandle);
+ __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
if (is_receiver) {
*receiver_offset = offset;
}
@@ -1033,7 +1084,7 @@ static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
if (src.first()->is_stack()) {
__ movl(rax, Address(rbp, reg2offset_in(src.first())));
- __ movl(Address(rsp, reg2offset_out(dst.first())), rax);
+ __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
} else {
// reg to stack
__ movflt(Address(rsp, reg2offset_out(dst.first())), src.first()->as_XMMRegister());
@@ -1050,10 +1101,10 @@ static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
if (src.first()->is_stack() && dst.first()->is_stack()) {
assert(src.second()->is_stack() && dst.second()->is_stack(), "must be all stack");
- __ movl(rax, Address(rbp, reg2offset_in(src.first())));
- __ movl(rbx, Address(rbp, reg2offset_in(src.second())));
- __ movl(Address(rsp, reg2offset_out(dst.first())), rax);
- __ movl(Address(rsp, reg2offset_out(dst.second())), rbx);
+ __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
+ NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
+ __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
+ NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
} else {
ShouldNotReachHere();
}
@@ -1074,10 +1125,10 @@ static void double_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
if (src.first()->is_stack()) {
// source is all stack
- __ movl(rax, Address(rbp, reg2offset_in(src.first())));
- __ movl(rbx, Address(rbp, reg2offset_in(src.second())));
- __ movl(Address(rsp, reg2offset_out(dst.first())), rax);
- __ movl(Address(rsp, reg2offset_out(dst.second())), rbx);
+ __ movptr(rax, Address(rbp, reg2offset_in(src.first())));
+ NOT_LP64(__ movptr(rbx, Address(rbp, reg2offset_in(src.second()))));
+ __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
+ NOT_LP64(__ movptr(Address(rsp, reg2offset_out(dst.second())), rbx));
} else {
// reg to stack
// No worries about stack alignment
@@ -1098,11 +1149,11 @@ void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type,
break;
case T_VOID: break;
case T_LONG:
- __ movl(Address(rbp, -wordSize), rax);
- __ movl(Address(rbp, -2*wordSize), rdx);
+ __ movptr(Address(rbp, -wordSize), rax);
+ NOT_LP64(__ movptr(Address(rbp, -2*wordSize), rdx));
break;
default: {
- __ movl(Address(rbp, -wordSize), rax);
+ __ movptr(Address(rbp, -wordSize), rax);
}
}
}
@@ -1118,12 +1169,12 @@ void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_ty
__ fld_d(Address(rbp, -2*wordSize));
break;
case T_LONG:
- __ movl(rax, Address(rbp, -wordSize));
- __ movl(rdx, Address(rbp, -2*wordSize));
+ __ movptr(rax, Address(rbp, -wordSize));
+ NOT_LP64(__ movptr(rdx, Address(rbp, -2*wordSize)));
break;
case T_VOID: break;
default: {
- __ movl(rax, Address(rbp, -wordSize));
+ __ movptr(rax, Address(rbp, -wordSize));
}
}
}
@@ -1268,7 +1319,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ verify_oop(receiver);
- __ cmpl(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
+ __ cmpptr(ic_reg, Address(receiver, oopDesc::klass_offset_in_bytes()));
__ jcc(Assembler::equal, hit);
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
@@ -1291,23 +1342,23 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
Label slowCase;
Register receiver = rcx;
Register result = rax;
- __ movl(result, Address(receiver, oopDesc::mark_offset_in_bytes()));
+ __ movptr(result, Address(receiver, oopDesc::mark_offset_in_bytes()));
// check if locked
- __ testl (result, markOopDesc::unlocked_value);
+ __ testptr(result, markOopDesc::unlocked_value);
__ jcc (Assembler::zero, slowCase);
if (UseBiasedLocking) {
// Check if biased and fall through to runtime if so
- __ testl (result, markOopDesc::biased_lock_bit_in_place);
+ __ testptr(result, markOopDesc::biased_lock_bit_in_place);
__ jcc (Assembler::notZero, slowCase);
}
// get hash
- __ andl (result, markOopDesc::hash_mask_in_place);
+ __ andptr(result, markOopDesc::hash_mask_in_place);
// test if hashCode exists
__ jcc (Assembler::zero, slowCase);
- __ shrl (result, markOopDesc::hash_shift);
+ __ shrptr(result, markOopDesc::hash_shift);
__ ret(0);
__ bind (slowCase);
}
@@ -1329,7 +1380,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Generate a new frame for the wrapper.
__ enter();
// -2 because return address is already present and so is saved rbp,
- __ subl(rsp, stack_size - 2*wordSize);
+ __ subptr(rsp, stack_size - 2*wordSize);
// Frame is now completed as far a size and linkage.
@@ -1450,13 +1501,13 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()));
// Now handlize the static class mirror it's known not-null.
- __ movl(Address(rsp, klass_offset), oop_handle_reg);
+ __ movptr(Address(rsp, klass_offset), oop_handle_reg);
map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
// Now get the handle
- __ leal(oop_handle_reg, Address(rsp, klass_offset));
+ __ lea(oop_handle_reg, Address(rsp, klass_offset));
// store the klass handle as second argument
- __ movl(Address(rsp, wordSize), oop_handle_reg);
+ __ movptr(Address(rsp, wordSize), oop_handle_reg);
}
// Change state to native (we save the return address in the thread, since it might not
@@ -1497,14 +1548,14 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
// Get the handle (the 2nd argument)
- __ movl(oop_handle_reg, Address(rsp, wordSize));
+ __ movptr(oop_handle_reg, Address(rsp, wordSize));
// Get address of the box
- __ leal(lock_reg, Address(rbp, lock_slot_rbp_offset));
+ __ lea(lock_reg, Address(rbp, lock_slot_rbp_offset));
// Load the oop from the handle
- __ movl(obj_reg, Address(oop_handle_reg, 0));
+ __ movptr(obj_reg, Address(oop_handle_reg, 0));
if (UseBiasedLocking) {
// Note that oop_handle_reg is trashed during this call
@@ -1512,13 +1563,13 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
}
// Load immediate 1 into swap_reg %rax,
- __ movl(swap_reg, 1);
+ __ movptr(swap_reg, 1);
// Load (object->mark() | 1) into swap_reg %rax,
- __ orl(swap_reg, Address(obj_reg, 0));
+ __ orptr(swap_reg, Address(obj_reg, 0));
// Save (object->mark() | 1) into BasicLock's displaced header
- __ movl(Address(lock_reg, mark_word_offset), swap_reg);
+ __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
if (os::is_MP()) {
__ lock();
@@ -1526,7 +1577,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// src -> dest iff dest == rax, else rax, <- dest
// *obj_reg = lock_reg iff *obj_reg == rax, else rax, = *(obj_reg)
- __ cmpxchg(lock_reg, Address(obj_reg, 0));
+ __ cmpxchgptr(lock_reg, Address(obj_reg, 0));
__ jcc(Assembler::equal, lock_done);
// Test if the oopMark is an obvious stack pointer, i.e.,
@@ -1538,18 +1589,18 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// least significant 2 bits clear.
// NOTE: the oopMark is in swap_reg %rax, as the result of cmpxchg
- __ subl(swap_reg, rsp);
- __ andl(swap_reg, 3 - os::vm_page_size());
+ __ subptr(swap_reg, rsp);
+ __ andptr(swap_reg, 3 - os::vm_page_size());
// Save the test result, for recursive case, the result is zero
- __ movl(Address(lock_reg, mark_word_offset), swap_reg);
+ __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
__ jcc(Assembler::notEqual, slow_path_lock);
// Slow path will re-enter here
__ bind(lock_done);
if (UseBiasedLocking) {
// Re-fetch oop_handle_reg as we trashed it above
- __ movl(oop_handle_reg, Address(rsp, wordSize));
+ __ movptr(oop_handle_reg, Address(rsp, wordSize));
}
}
@@ -1559,8 +1610,8 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// get JNIEnv* which is first argument to native
- __ leal(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
- __ movl(Address(rsp, 0), rdx);
+ __ lea(rdx, Address(thread, in_bytes(JavaThread::jni_environment_offset())));
+ __ movptr(Address(rsp, 0), rdx);
// Now set thread in native
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native);
@@ -1575,7 +1626,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Unpack native results.
switch (ret_type) {
case T_BOOLEAN: __ c2bool(rax); break;
- case T_CHAR : __ andl(rax, 0xFFFF); break;
+ case T_CHAR : __ andptr(rax, 0xFFFF); break;
case T_BYTE : __ sign_extend_byte (rax); break;
case T_SHORT : __ sign_extend_short(rax); break;
case T_INT : /* nothing to do */ break;
@@ -1602,7 +1653,10 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
if(os::is_MP()) {
if (UseMembar) {
- __ membar(); // Force this write out before the read below
+ // Force this write out before the read below
+ __ membar(Assembler::Membar_mask_bits(
+ Assembler::LoadLoad | Assembler::LoadStore |
+ Assembler::StoreLoad | Assembler::StoreStore));
} else {
// Write serialization page so VM thread can do a pseudo remote membar.
// We use the current thread pointer to calculate a thread specific
@@ -1636,7 +1690,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// by hand.
//
save_native_result(masm, ret_type, stack_slots);
- __ pushl(thread);
+ __ push(thread);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
JavaThread::check_special_condition_for_native_trans)));
__ increment(rsp, wordSize);
@@ -1669,7 +1723,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
Label done;
// Get locked oop from the handle we passed to jni
- __ movl(obj_reg, Address(oop_handle_reg, 0));
+ __ movptr(obj_reg, Address(oop_handle_reg, 0));
if (UseBiasedLocking) {
__ biased_locking_exit(obj_reg, rbx, done);
@@ -1677,7 +1731,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Simple recursive lock?
- __ cmpl(Address(rbp, lock_slot_rbp_offset), NULL_WORD);
+ __ cmpptr(Address(rbp, lock_slot_rbp_offset), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, done);
// Must save rax, if if it is live now because cmpxchg must use it
@@ -1686,10 +1740,10 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
}
// get old displaced header
- __ movl(rbx, Address(rbp, lock_slot_rbp_offset));
+ __ movptr(rbx, Address(rbp, lock_slot_rbp_offset));
// get address of the stack lock
- __ leal(rax, Address(rbp, lock_slot_rbp_offset));
+ __ lea(rax, Address(rbp, lock_slot_rbp_offset));
// Atomic swap old header if oop still contains the stack lock
if (os::is_MP()) {
@@ -1698,7 +1752,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// src -> dest iff dest == rax, else rax, <- dest
// *obj_reg = rbx, iff *obj_reg == rax, else rax, = *(obj_reg)
- __ cmpxchg(rbx, Address(obj_reg, 0));
+ __ cmpxchgptr(rbx, Address(obj_reg, 0));
__ jcc(Assembler::notEqual, slow_path_unlock);
// slow path re-enters here
@@ -1729,20 +1783,20 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Unpack oop result
if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
Label L;
- __ cmpl(rax, NULL_WORD);
+ __ cmpptr(rax, (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L);
- __ movl(rax, Address(rax, 0));
+ __ movptr(rax, Address(rax, 0));
__ bind(L);
__ verify_oop(rax);
}
// reset handle block
- __ movl(rcx, Address(thread, JavaThread::active_handles_offset()));
+ __ movptr(rcx, Address(thread, JavaThread::active_handles_offset()));
- __ movl(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), 0);
+ __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
// Any exception pending?
- __ cmpl(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
+ __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, exception_pending);
@@ -1782,15 +1836,15 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// has last_Java_frame setup. No exceptions so do vanilla call not call_VM
// args are (oop obj, BasicLock* lock, JavaThread* thread)
- __ pushl(thread);
- __ pushl(lock_reg);
- __ pushl(obj_reg);
+ __ push(thread);
+ __ push(lock_reg);
+ __ push(obj_reg);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C)));
- __ addl(rsp, 3*wordSize);
+ __ addptr(rsp, 3*wordSize);
#ifdef ASSERT
{ Label L;
- __ cmpl(Address(thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
+ __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
__ jcc(Assembler::equal, L);
__ stop("no pending exception allowed on exit from monitorenter");
__ bind(L);
@@ -1810,29 +1864,29 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
}
// Save pending exception around call to VM (which contains an EXCEPTION_MARK)
- __ pushl(Address(thread, in_bytes(Thread::pending_exception_offset())));
- __ movl(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
+ __ pushptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
+ __ movptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
// should be a peal
// +wordSize because of the push above
- __ leal(rax, Address(rbp, lock_slot_rbp_offset));
- __ pushl(rax);
+ __ lea(rax, Address(rbp, lock_slot_rbp_offset));
+ __ push(rax);
- __ pushl(obj_reg);
+ __ push(obj_reg);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
- __ addl(rsp, 2*wordSize);
+ __ addptr(rsp, 2*wordSize);
#ifdef ASSERT
{
Label L;
- __ cmpl(Address(thread, in_bytes(Thread::pending_exception_offset())), NULL_WORD);
+ __ cmpptr(Address(thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L);
__ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
__ bind(L);
}
#endif /* ASSERT */
- __ popl(Address(thread, in_bytes(Thread::pending_exception_offset())));
+ __ popptr(Address(thread, in_bytes(Thread::pending_exception_offset())));
if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
restore_native_result(masm, ret_type, stack_slots);
@@ -2320,7 +2374,7 @@ void SharedRuntime::generate_deopt_blob() {
map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words);
// Normal deoptimization
- __ pushl(Deoptimization::Unpack_deopt);
+ __ push(Deoptimization::Unpack_deopt);
__ jmp(cont);
int reexecute_offset = __ pc() - start;
@@ -2331,7 +2385,7 @@ void SharedRuntime::generate_deopt_blob() {
// No need to update map as each call to save_live_registers will produce identical oopmap
(void) RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words);
- __ pushl(Deoptimization::Unpack_reexecute);
+ __ push(Deoptimization::Unpack_reexecute);
__ jmp(cont);
int exception_offset = __ pc() - start;
@@ -2344,8 +2398,8 @@ void SharedRuntime::generate_deopt_blob() {
// unpack_with_exception_in_tls entry point.
__ get_thread(rdi);
- __ movl(Address(rdi, JavaThread::exception_pc_offset()), rdx);
- __ movl(Address(rdi, JavaThread::exception_oop_offset()), rax);
+ __ movptr(Address(rdi, JavaThread::exception_pc_offset()), rdx);
+ __ movptr(Address(rdi, JavaThread::exception_oop_offset()), rax);
int exception_in_tls_offset = __ pc() - start;
@@ -2360,7 +2414,7 @@ void SharedRuntime::generate_deopt_blob() {
// make room on stack for the return address
// It will be patched later with the throwing pc. The correct value is not
// available now because loading it from memory would destroy registers.
- __ pushl(0);
+ __ push(0);
// Save everything in sight.
@@ -2370,24 +2424,24 @@ void SharedRuntime::generate_deopt_blob() {
// Now it is safe to overwrite any register
// store the correct deoptimization type
- __ pushl(Deoptimization::Unpack_exception);
+ __ push(Deoptimization::Unpack_exception);
// load throwing pc from JavaThread and patch it as the return address
// of the current frame. Then clear the field in JavaThread
__ get_thread(rdi);
- __ movl(rdx, Address(rdi, JavaThread::exception_pc_offset()));
- __ movl(Address(rbp, wordSize), rdx);
- __ movl(Address(rdi, JavaThread::exception_pc_offset()), NULL_WORD);
+ __ movptr(rdx, Address(rdi, JavaThread::exception_pc_offset()));
+ __ movptr(Address(rbp, wordSize), rdx);
+ __ movptr(Address(rdi, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
#ifdef ASSERT
// verify that there is really an exception oop in JavaThread
- __ movl(rax, Address(rdi, JavaThread::exception_oop_offset()));
+ __ movptr(rax, Address(rdi, JavaThread::exception_oop_offset()));
__ verify_oop(rax);
// verify that there is no pending exception
Label no_pending_exception;
- __ movl(rax, Address(rdi, Thread::pending_exception_offset()));
- __ testl(rax, rax);
+ __ movptr(rax, Address(rdi, Thread::pending_exception_offset()));
+ __ testptr(rax, rax);
__ jcc(Assembler::zero, no_pending_exception);
__ stop("must not have pending exception here");
__ bind(no_pending_exception);
@@ -2402,7 +2456,7 @@ void SharedRuntime::generate_deopt_blob() {
// Call C code. Need thread and this frame, but NOT official VM entry
// crud. We cannot block on this call, no GC can happen.
__ get_thread(rcx);
- __ pushl(rcx);
+ __ push(rcx);
// fetch_unroll_info needs to call last_java_frame()
__ set_last_Java_frame(rcx, noreg, noreg, NULL);
@@ -2414,35 +2468,35 @@ void SharedRuntime::generate_deopt_blob() {
oop_maps->add_gc_map( __ pc()-start, map);
// Discard arg to fetch_unroll_info
- __ popl(rcx);
+ __ pop(rcx);
__ get_thread(rcx);
__ reset_last_Java_frame(rcx, false, false);
// Load UnrollBlock into EDI
- __ movl(rdi, rax);
+ __ mov(rdi, rax);
// Move the unpack kind to a safe place in the UnrollBlock because
// we are very short of registers
Address unpack_kind(rdi, Deoptimization::UnrollBlock::unpack_kind_offset_in_bytes());
// retrieve the deopt kind from where we left it.
- __ popl(rax);
+ __ pop(rax);
__ movl(unpack_kind, rax); // save the unpack_kind value
Label noException;
__ cmpl(rax, Deoptimization::Unpack_exception); // Was exception pending?
__ jcc(Assembler::notEqual, noException);
- __ movl(rax, Address(rcx, JavaThread::exception_oop_offset()));
- __ movl(rdx, Address(rcx, JavaThread::exception_pc_offset()));
- __ movl(Address(rcx, JavaThread::exception_oop_offset()), NULL_WORD);
- __ movl(Address(rcx, JavaThread::exception_pc_offset()), NULL_WORD);
+ __ movptr(rax, Address(rcx, JavaThread::exception_oop_offset()));
+ __ movptr(rdx, Address(rcx, JavaThread::exception_pc_offset()));
+ __ movptr(Address(rcx, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
+ __ movptr(Address(rcx, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
__ verify_oop(rax);
// Overwrite the result registers with the exception results.
- __ movl(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
- __ movl(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
+ __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
+ __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
__ bind(noException);
@@ -2467,7 +2521,7 @@ void SharedRuntime::generate_deopt_blob() {
// when we are done the return to frame 3 will still be on the stack.
// Pop deoptimized frame
- __ addl(rsp,Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
+ __ addptr(rsp, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
// sp should be pointing at the return address to the caller (3)
@@ -2478,12 +2532,12 @@ void SharedRuntime::generate_deopt_blob() {
}
// Load array of frame pcs into ECX
- __ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
+ __ movptr(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
- __ popl(rsi); // trash the old pc
+ __ pop(rsi); // trash the old pc
// Load array of frame sizes into ESI
- __ movl(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
+ __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
@@ -2491,7 +2545,7 @@ void SharedRuntime::generate_deopt_blob() {
__ movl(counter, rbx);
// Pick up the initial fp we should save
- __ movl(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes()));
+ __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes()));
// Now adjust the caller's stack to make up for the extra locals
// but record the original sp so that we can save it in the skeletal interpreter
@@ -2499,43 +2553,44 @@ void SharedRuntime::generate_deopt_blob() {
// value and not the "real" sp value.
Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
- __ movl(sp_temp, rsp);
- __ subl(rsp, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
+ __ movptr(sp_temp, rsp);
+ __ movl2ptr(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
+ __ subptr(rsp, rbx);
// Push interpreter frames in a loop
Label loop;
__ bind(loop);
- __ movl(rbx, Address(rsi, 0)); // Load frame size
+ __ movptr(rbx, Address(rsi, 0)); // Load frame size
#ifdef CC_INTERP
- __ subl(rbx, 4*wordSize); // we'll push pc and ebp by hand and
+ __ subptr(rbx, 4*wordSize); // we'll push pc and ebp by hand and
#ifdef ASSERT
- __ pushl(0xDEADDEAD); // Make a recognizable pattern
- __ pushl(0xDEADDEAD);
+ __ push(0xDEADDEAD); // Make a recognizable pattern
+ __ push(0xDEADDEAD);
#else /* ASSERT */
- __ subl(rsp, 2*wordSize); // skip the "static long no_param"
+ __ subptr(rsp, 2*wordSize); // skip the "static long no_param"
#endif /* ASSERT */
#else /* CC_INTERP */
- __ subl(rbx, 2*wordSize); // we'll push pc and rbp, by hand
+ __ subptr(rbx, 2*wordSize); // we'll push pc and rbp, by hand
#endif /* CC_INTERP */
- __ pushl(Address(rcx, 0)); // save return address
+ __ pushptr(Address(rcx, 0)); // save return address
__ enter(); // save old & set new rbp,
- __ subl(rsp, rbx); // Prolog!
- __ movl(rbx, sp_temp); // sender's sp
+ __ subptr(rsp, rbx); // Prolog!
+ __ movptr(rbx, sp_temp); // sender's sp
#ifdef CC_INTERP
- __ movl(Address(rbp,
+ __ movptr(Address(rbp,
-(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
rbx); // Make it walkable
#else /* CC_INTERP */
// This value is corrected by layout_activation_impl
- __ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD );
- __ movl(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
+ __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
+ __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
#endif /* CC_INTERP */
- __ movl(sp_temp, rsp); // pass to next frame
- __ addl(rsi, 4); // Bump array pointer (sizes)
- __ addl(rcx, 4); // Bump array pointer (pcs)
- __ decrement(counter); // decrement counter
+ __ movptr(sp_temp, rsp); // pass to next frame
+ __ addptr(rsi, wordSize); // Bump array pointer (sizes)
+ __ addptr(rcx, wordSize); // Bump array pointer (pcs)
+ __ decrementl(counter); // decrement counter
__ jcc(Assembler::notZero, loop);
- __ pushl(Address(rcx, 0)); // save final return address
+ __ pushptr(Address(rcx, 0)); // save final return address
// Re-push self-frame
__ enter(); // save old & set new rbp,
@@ -2543,11 +2598,11 @@ void SharedRuntime::generate_deopt_blob() {
// Return address and rbp, are in place
// We'll push additional args later. Just allocate a full sized
// register save area
- __ subl(rsp, (frame_size_in_words-additional_words - 2) * wordSize);
+ __ subptr(rsp, (frame_size_in_words-additional_words - 2) * wordSize);
// Restore frame locals after moving the frame
- __ movl(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
- __ movl(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
+ __ movptr(Address(rsp, RegisterSaver::raxOffset()*wordSize), rax);
+ __ movptr(Address(rsp, RegisterSaver::rdxOffset()*wordSize), rdx);
__ fstp_d(Address(rsp, RegisterSaver::fpResultOffset()*wordSize)); // Pop float stack and store in local
if( UseSSE>=2 ) __ movdbl(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
if( UseSSE==1 ) __ movflt(Address(rsp, RegisterSaver::xmm0Offset()*wordSize), xmm0);
@@ -2556,7 +2611,7 @@ void SharedRuntime::generate_deopt_blob() {
__ pushl(unpack_kind); // get the unpack_kind value
__ get_thread(rcx);
- __ pushl(rcx);
+ __ push(rcx);
// set last_Java_sp, last_Java_fp
__ set_last_Java_frame(rcx, noreg, rbp, NULL);
@@ -2569,14 +2624,14 @@ void SharedRuntime::generate_deopt_blob() {
oop_maps->add_gc_map( __ pc()-start, new OopMap( frame_size_in_words, 0 ));
// rax, contains the return result type
- __ pushl(rax);
+ __ push(rax);
__ get_thread(rcx);
__ reset_last_Java_frame(rcx, false, false);
// Collect return values
- __ movl(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize));
- __ movl(rdx,Address(rsp, (RegisterSaver::rdxOffset() + additional_words + 1)*wordSize));
+ __ movptr(rax,Address(rsp, (RegisterSaver::raxOffset() + additional_words + 1)*wordSize));
+ __ movptr(rdx,Address(rsp, (RegisterSaver::rdxOffset() + additional_words + 1)*wordSize));
// Clear floating point stack before returning to interpreter
__ empty_FPU_stack();
@@ -2637,12 +2692,12 @@ void SharedRuntime::generate_uncommon_trap_blob() {
address start = __ pc();
// Push self-frame.
- __ subl(rsp, return_off*wordSize); // Epilog!
+ __ subptr(rsp, return_off*wordSize); // Epilog!
// rbp, is an implicitly saved callee saved register (i.e. the calling
// convention will save restore it in prolog/epilog) Other than that
// there are no callee save registers no that adapter frames are gone.
- __ movl(Address(rsp, rbp_off*wordSize),rbp);
+ __ movptr(Address(rsp, rbp_off*wordSize), rbp);
// Clear the floating point exception stack
__ empty_FPU_stack();
@@ -2654,7 +2709,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// Call C code. Need thread but NOT official VM entry
// crud. We cannot block on this call, no GC can happen. Call should
// capture callee-saved registers as well as return values.
- __ movl(Address(rsp, arg0_off*wordSize),rdx);
+ __ movptr(Address(rsp, arg0_off*wordSize), rdx);
// argument already in ECX
__ movl(Address(rsp, arg1_off*wordSize),rcx);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
@@ -2671,7 +2726,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
__ reset_last_Java_frame(rcx, false, false);
// Load UnrollBlock into EDI
- __ movl(rdi, rax);
+ __ movptr(rdi, rax);
// Pop all the frames we must move/replace.
//
@@ -2681,10 +2736,11 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// 3: caller of deopting frame (could be compiled/interpreted).
// Pop self-frame. We have no frame, and must rely only on EAX and ESP.
- __ addl(rsp,(framesize-1)*wordSize); // Epilog!
+ __ addptr(rsp,(framesize-1)*wordSize); // Epilog!
// Pop deoptimized frame
- __ addl(rsp,Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
+ __ movl2ptr(rcx, Address(rdi,Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
+ __ addptr(rsp, rcx);
// sp should be pointing at the return address to the caller (3)
@@ -2698,10 +2754,10 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// Load array of frame pcs into ECX
__ movl(rcx,Address(rdi,Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
- __ popl(rsi); // trash the pc
+ __ pop(rsi); // trash the pc
// Load array of frame sizes into ESI
- __ movl(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
+ __ movptr(rsi,Address(rdi,Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
Address counter(rdi, Deoptimization::UnrollBlock::counter_temp_offset_in_bytes());
@@ -2709,7 +2765,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
__ movl(counter, rbx);
// Pick up the initial fp we should save
- __ movl(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes()));
+ __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes()));
// Now adjust the caller's stack to make up for the extra locals
// but record the original sp so that we can save it in the skeletal interpreter
@@ -2717,47 +2773,48 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// value and not the "real" sp value.
Address sp_temp(rdi, Deoptimization::UnrollBlock::sender_sp_temp_offset_in_bytes());
- __ movl(sp_temp, rsp);
- __ subl(rsp, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
+ __ movptr(sp_temp, rsp);
+ __ movl(rbx, Address(rdi, Deoptimization::UnrollBlock::caller_adjustment_offset_in_bytes()));
+ __ subptr(rsp, rbx);
// Push interpreter frames in a loop
Label loop;
__ bind(loop);
- __ movl(rbx, Address(rsi, 0)); // Load frame size
+ __ movptr(rbx, Address(rsi, 0)); // Load frame size
#ifdef CC_INTERP
- __ subl(rbx, 4*wordSize); // we'll push pc and ebp by hand and
+ __ subptr(rbx, 4*wordSize); // we'll push pc and ebp by hand and
#ifdef ASSERT
- __ pushl(0xDEADDEAD); // Make a recognizable pattern
- __ pushl(0xDEADDEAD); // (parm to RecursiveInterpreter...)
+ __ push(0xDEADDEAD); // Make a recognizable pattern
+ __ push(0xDEADDEAD); // (parm to RecursiveInterpreter...)
#else /* ASSERT */
- __ subl(rsp, 2*wordSize); // skip the "static long no_param"
+ __ subptr(rsp, 2*wordSize); // skip the "static long no_param"
#endif /* ASSERT */
#else /* CC_INTERP */
- __ subl(rbx, 2*wordSize); // we'll push pc and rbp, by hand
+ __ subptr(rbx, 2*wordSize); // we'll push pc and rbp, by hand
#endif /* CC_INTERP */
- __ pushl(Address(rcx, 0)); // save return address
+ __ pushptr(Address(rcx, 0)); // save return address
__ enter(); // save old & set new rbp,
- __ subl(rsp, rbx); // Prolog!
- __ movl(rbx, sp_temp); // sender's sp
+ __ subptr(rsp, rbx); // Prolog!
+ __ movptr(rbx, sp_temp); // sender's sp
#ifdef CC_INTERP
- __ movl(Address(rbp,
+ __ movptr(Address(rbp,
-(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
rbx); // Make it walkable
#else /* CC_INTERP */
// This value is corrected by layout_activation_impl
- __ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD );
- __ movl(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
+ __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
+ __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), rbx); // Make it walkable
#endif /* CC_INTERP */
- __ movl(sp_temp, rsp); // pass to next frame
- __ addl(rsi, 4); // Bump array pointer (sizes)
- __ addl(rcx, 4); // Bump array pointer (pcs)
- __ decrement(counter); // decrement counter
+ __ movptr(sp_temp, rsp); // pass to next frame
+ __ addptr(rsi, wordSize); // Bump array pointer (sizes)
+ __ addptr(rcx, wordSize); // Bump array pointer (pcs)
+ __ decrementl(counter); // decrement counter
__ jcc(Assembler::notZero, loop);
- __ pushl(Address(rcx, 0)); // save final return address
+ __ pushptr(Address(rcx, 0)); // save final return address
// Re-push self-frame
__ enter(); // save old & set new rbp,
- __ subl(rsp, (framesize-2) * wordSize); // Prolog!
+ __ subptr(rsp, (framesize-2) * wordSize); // Prolog!
// set last_Java_sp, last_Java_fp
@@ -2767,7 +2824,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// Call C code. Need thread but NOT official VM entry
// crud. We cannot block on this call, no GC can happen. Call should
// restore return values to their stack-slots with the new SP.
- __ movl(Address(rsp,arg0_off*wordSize),rdi);
+ __ movptr(Address(rsp,arg0_off*wordSize),rdi);
__ movl(Address(rsp,arg1_off*wordSize), Deoptimization::Unpack_uncommon_trap);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
// Set an oopmap for the call site
@@ -2824,7 +2881,7 @@ static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return)
// Otherwise we push space for a return address that the safepoint
// handler will install later to make the stack walking sensible.
if( !cause_return )
- __ pushl(rbx); // Make room for return address (or push it again)
+ __ push(rbx); // Make room for return address (or push it again)
map = RegisterSaver::save_live_registers(masm, additional_words, &frame_size_in_words, false);
@@ -2834,13 +2891,13 @@ static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return)
// Push thread argument and setup last_Java_sp
__ get_thread(java_thread);
- __ pushl(java_thread);
+ __ push(java_thread);
__ set_last_Java_frame(java_thread, noreg, noreg, NULL);
// if this was not a poll_return then we need to correct the return address now.
if( !cause_return ) {
- __ movl(rax, Address(java_thread, JavaThread::saved_exception_pc_offset()));
- __ movl(Address(rbp, wordSize), rax);
+ __ movptr(rax, Address(java_thread, JavaThread::saved_exception_pc_offset()));
+ __ movptr(Address(rbp, wordSize), rax);
}
// do the call
@@ -2854,7 +2911,7 @@ static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return)
oop_maps->add_gc_map( __ pc() - start, map);
// Discard arg
- __ popl(rcx);
+ __ pop(rcx);
Label noException;
@@ -2862,7 +2919,7 @@ static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return)
__ get_thread(java_thread);
__ reset_last_Java_frame(java_thread, false, false);
- __ cmpl(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD);
+ __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, noException);
// Exception pending
@@ -2919,7 +2976,7 @@ static RuntimeStub* generate_resolve_blob(address destination, const char* name)
const Register thread = rdi;
__ get_thread(rdi);
- __ pushl(thread);
+ __ push(thread);
__ set_last_Java_frame(thread, noreg, rbp, NULL);
__ call(RuntimeAddress(destination));
@@ -2933,20 +2990,20 @@ static RuntimeStub* generate_resolve_blob(address destination, const char* name)
// rax, contains the address we are going to jump to assuming no exception got installed
- __ addl(rsp, wordSize);
+ __ addptr(rsp, wordSize);
// clear last_Java_sp
__ reset_last_Java_frame(thread, true, false);
// check for pending exceptions
Label pending;
- __ cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
+ __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, pending);
// get the returned methodOop
- __ movl(rbx, Address(thread, JavaThread::vm_result_offset()));
- __ movl(Address(rsp, RegisterSaver::rbx_offset() * wordSize), rbx);
+ __ movptr(rbx, Address(thread, JavaThread::vm_result_offset()));
+ __ movptr(Address(rsp, RegisterSaver::rbx_offset() * wordSize), rbx);
- __ movl(Address(rsp, RegisterSaver::rax_offset() * wordSize), rax);
+ __ movptr(Address(rsp, RegisterSaver::rax_offset() * wordSize), rax);
RegisterSaver::restore_live_registers(masm);
@@ -2963,8 +3020,8 @@ static RuntimeStub* generate_resolve_blob(address destination, const char* name)
// exception pending => remove activation and forward to exception handler
__ get_thread(thread);
- __ movl(Address(thread, JavaThread::vm_result_offset()), NULL_WORD);
- __ movl(rax, Address(thread, Thread::pending_exception_offset()));
+ __ movptr(Address(thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
+ __ movptr(rax, Address(thread, Thread::pending_exception_offset()));
__ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
// -------------
diff --git a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
index b2d4a6513..26e7e1489 100644
--- a/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
+++ b/src/cpu/x86/vm/sharedRuntime_x86_64.cpp
@@ -120,6 +120,7 @@ class RegisterSaver {
// values on its own
static int rax_offset_in_bytes(void) { return BytesPerInt * rax_off; }
+ static int rdx_offset_in_bytes(void) { return BytesPerInt * rdx_off; }
static int rbx_offset_in_bytes(void) { return BytesPerInt * rbx_off; }
static int xmm0_offset_in_bytes(void) { return BytesPerInt * xmm0_off; }
static int return_offset_in_bytes(void) { return BytesPerInt * return_off; }
@@ -152,7 +153,7 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
__ push_CPU_state(); // Push a multiple of 16 bytes
if (frame::arg_reg_save_area_bytes != 0) {
// Allocate argument register save area
- __ subq(rsp, frame::arg_reg_save_area_bytes);
+ __ subptr(rsp, frame::arg_reg_save_area_bytes);
}
// Set an oopmap for the call site. This oopmap will map all
@@ -266,12 +267,12 @@ OopMap* RegisterSaver::save_live_registers(MacroAssembler* masm, int additional_
void RegisterSaver::restore_live_registers(MacroAssembler* masm) {
if (frame::arg_reg_save_area_bytes != 0) {
// Pop arg register save area
- __ addq(rsp, frame::arg_reg_save_area_bytes);
+ __ addptr(rsp, frame::arg_reg_save_area_bytes);
}
// Recover CPU state
__ pop_CPU_state();
// Get the rbp described implicitly by the calling convention (no oopMap)
- __ popq(rbp);
+ __ pop(rbp);
}
void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
@@ -285,9 +286,11 @@ void RegisterSaver::restore_result_registers(MacroAssembler* masm) {
// Restore fp result register
__ movdbl(xmm0, Address(rsp, xmm0_offset_in_bytes()));
// Restore integer result register
- __ movq(rax, Address(rsp, rax_offset_in_bytes()));
+ __ movptr(rax, Address(rsp, rax_offset_in_bytes()));
+ __ movptr(rdx, Address(rsp, rdx_offset_in_bytes()));
+
// Pop all of the register save are off the stack except the return address
- __ addq(rsp, return_offset_in_bytes());
+ __ addptr(rsp, return_offset_in_bytes());
}
// The java_calling_convention describes stack locations as ideal slots on
@@ -407,18 +410,18 @@ int SharedRuntime::java_calling_convention(const BasicType *sig_bt,
static void patch_callers_callsite(MacroAssembler *masm) {
Label L;
__ verify_oop(rbx);
- __ cmpq(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int)NULL_WORD);
+ __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L);
// Save the current stack pointer
- __ movq(r13, rsp);
+ __ mov(r13, rsp);
// Schedule the branch target address early.
// Call into the VM to patch the caller, then jump to compiled callee
// rax isn't live so capture return address while we easily can
- __ movq(rax, Address(rsp, 0));
+ __ movptr(rax, Address(rsp, 0));
// align stack so push_CPU_state doesn't fault
- __ andq(rsp, -(StackAlignmentInBytes));
+ __ andptr(rsp, -(StackAlignmentInBytes));
__ push_CPU_state();
@@ -430,20 +433,20 @@ static void patch_callers_callsite(MacroAssembler *masm) {
// Allocate argument register save area
if (frame::arg_reg_save_area_bytes != 0) {
- __ subq(rsp, frame::arg_reg_save_area_bytes);
+ __ subptr(rsp, frame::arg_reg_save_area_bytes);
}
- __ movq(c_rarg0, rbx);
- __ movq(c_rarg1, rax);
+ __ mov(c_rarg0, rbx);
+ __ mov(c_rarg1, rax);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::fixup_callers_callsite)));
// De-allocate argument register save area
if (frame::arg_reg_save_area_bytes != 0) {
- __ addq(rsp, frame::arg_reg_save_area_bytes);
+ __ addptr(rsp, frame::arg_reg_save_area_bytes);
}
__ pop_CPU_state();
// restore sp
- __ movq(rsp, r13);
+ __ mov(rsp, r13);
__ bind(L);
}
@@ -452,13 +455,13 @@ static void tag_stack(MacroAssembler *masm, const BasicType sig, int st_off) {
if (TaggedStackInterpreter) {
int tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(0);
if (sig == T_OBJECT || sig == T_ARRAY) {
- __ mov64(Address(rsp, tag_offset), frame::TagReference);
+ __ movptr(Address(rsp, tag_offset), (int32_t) frame::TagReference);
} else if (sig == T_LONG || sig == T_DOUBLE) {
int next_tag_offset = st_off + Interpreter::expr_tag_offset_in_bytes(1);
- __ mov64(Address(rsp, next_tag_offset), frame::TagValue);
- __ mov64(Address(rsp, tag_offset), frame::TagValue);
+ __ movptr(Address(rsp, next_tag_offset), (int32_t) frame::TagValue);
+ __ movptr(Address(rsp, tag_offset), (int32_t) frame::TagValue);
} else {
- __ mov64(Address(rsp, tag_offset), frame::TagValue);
+ __ movptr(Address(rsp, tag_offset), (int32_t) frame::TagValue);
}
}
}
@@ -490,15 +493,15 @@ static void gen_c2i_adapter(MacroAssembler *masm,
extraspace = round_to(extraspace, 2*wordSize);
// Get return address
- __ popq(rax);
+ __ pop(rax);
// set senderSP value
- __ movq(r13, rsp);
+ __ mov(r13, rsp);
- __ subq(rsp, extraspace);
+ __ subptr(rsp, extraspace);
// Store the return address in the expected location
- __ movq(Address(rsp, 0), rax);
+ __ movptr(Address(rsp, 0), rax);
// Now write the args into the outgoing interpreter space
for (int i = 0; i < total_args_passed; i++) {
@@ -537,7 +540,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
if (!r_2->is_valid()) {
// sign extend??
__ movl(rax, Address(rsp, ld_off));
- __ movq(Address(rsp, st_off), rax);
+ __ movptr(Address(rsp, st_off), rax);
tag_stack(masm, sig_bt[i], st_off);
} else {
@@ -553,7 +556,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
#ifdef ASSERT
// Overwrite the unused slot with known junk
__ mov64(rax, CONST64(0xdeadffffdeadaaaa));
- __ movq(Address(rsp, st_off), rax);
+ __ movptr(Address(rsp, st_off), rax);
#endif /* ASSERT */
tag_stack(masm, sig_bt[i], next_off);
} else {
@@ -576,12 +579,12 @@ static void gen_c2i_adapter(MacroAssembler *masm,
#ifdef ASSERT
// Overwrite the unused slot with known junk
__ mov64(rax, CONST64(0xdeadffffdeadaaab));
- __ movq(Address(rsp, st_off), rax);
+ __ movptr(Address(rsp, st_off), rax);
#endif /* ASSERT */
__ movq(Address(rsp, next_off), r);
tag_stack(masm, sig_bt[i], next_off);
} else {
- __ movq(Address(rsp, st_off), r);
+ __ movptr(Address(rsp, st_off), r);
tag_stack(masm, sig_bt[i], st_off);
}
}
@@ -595,7 +598,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
#ifdef ASSERT
// Overwrite the unused slot with known junk
__ mov64(rax, CONST64(0xdeadffffdeadaaac));
- __ movq(Address(rsp, st_off), rax);
+ __ movptr(Address(rsp, st_off), rax);
#endif /* ASSERT */
__ movdbl(Address(rsp, next_off), r_1->as_XMMRegister());
tag_stack(masm, sig_bt[i], next_off);
@@ -604,7 +607,7 @@ static void gen_c2i_adapter(MacroAssembler *masm,
}
// Schedule the branch target address early.
- __ movq(rcx, Address(rbx, in_bytes(methodOopDesc::interpreter_entry_offset())));
+ __ movptr(rcx, Address(rbx, in_bytes(methodOopDesc::interpreter_entry_offset())));
__ jmp(rcx);
}
@@ -631,7 +634,7 @@ static void gen_i2c_adapter(MacroAssembler *masm,
// save code can segv when fxsave instructions find improperly
// aligned stack pointer.
- __ movq(rax, Address(rsp, 0));
+ __ movptr(rax, Address(rsp, 0));
// Cut-out for having no stack args. Since up to 2 int/oop args are passed
// in registers, we will occasionally have no stack args.
@@ -645,20 +648,20 @@ static void gen_i2c_adapter(MacroAssembler *masm,
comp_words_on_stack = round_to(comp_args_on_stack*VMRegImpl::stack_slot_size, wordSize)>>LogBytesPerWord;
// Round up to miminum stack alignment, in wordSize
comp_words_on_stack = round_to(comp_words_on_stack, 2);
- __ subq(rsp, comp_words_on_stack * wordSize);
+ __ subptr(rsp, comp_words_on_stack * wordSize);
}
// Ensure compiled code always sees stack at proper alignment
- __ andq(rsp, -16);
+ __ andptr(rsp, -16);
// push the return address and misalign the stack that youngest frame always sees
// as far as the placement of the call instruction
- __ pushq(rax);
+ __ push(rax);
// Will jump to the compiled code just as if compiled code was doing it.
// Pre-load the register-jump target early, to schedule it better.
- __ movq(r11, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset())));
+ __ movptr(r11, Address(rbx, in_bytes(methodOopDesc::from_compiled_offset())));
// Now generate the shuffle code. Pick up all register args and move the
// rest through the floating point stack top.
@@ -697,7 +700,7 @@ static void gen_i2c_adapter(MacroAssembler *masm,
if (!r_2->is_valid()) {
// sign extend???
__ movl(rax, Address(r13, ld_off));
- __ movq(Address(rsp, st_off), rax);
+ __ movptr(Address(rsp, st_off), rax);
} else {
//
// We are using two optoregs. This can be either T_OBJECT, T_ADDRESS, T_LONG, or T_DOUBLE
@@ -751,12 +754,12 @@ static void gen_i2c_adapter(MacroAssembler *masm,
// is possible. So we stash the desired callee in the thread
// and the vm will find there should this case occur.
- __ movq(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
+ __ movptr(Address(r15_thread, JavaThread::callee_target_offset()), rbx);
// put methodOop where a c2i would expect should we end up there
// only needed becaus eof c2 resolve stubs return methodOop as a result in
// rax
- __ movq(rax, rbx);
+ __ mov(rax, rbx);
__ jmp(r11);
}
@@ -792,8 +795,8 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
__ load_klass(temp, receiver);
__ verify_oop(temp);
- __ cmpq(temp, Address(holder, compiledICHolderOopDesc::holder_klass_offset()));
- __ movq(rbx, Address(holder, compiledICHolderOopDesc::holder_method_offset()));
+ __ cmpptr(temp, Address(holder, compiledICHolderOopDesc::holder_klass_offset()));
+ __ movptr(rbx, Address(holder, compiledICHolderOopDesc::holder_method_offset()));
__ jcc(Assembler::equal, ok);
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
@@ -801,7 +804,7 @@ AdapterHandlerEntry* SharedRuntime::generate_i2c2i_adapters(MacroAssembler *masm
// Method might have been compiled since the call site was patched to
// interpreted if that is the case treat it as a miss so we can get
// the call site corrected.
- __ cmpq(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int)NULL_WORD);
+ __ cmpptr(Address(rbx, in_bytes(methodOopDesc::code_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, skip_fixup);
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
}
@@ -980,10 +983,10 @@ static void object_move(MacroAssembler* masm,
*receiver_offset = (offset_in_older_frame + framesize_in_slots) * VMRegImpl::stack_slot_size;
}
- __ cmpq(Address(rbp, reg2offset_in(src.first())), (int)NULL_WORD);
- __ leaq(rHandle, Address(rbp, reg2offset_in(src.first())));
+ __ cmpptr(Address(rbp, reg2offset_in(src.first())), (int32_t)NULL_WORD);
+ __ lea(rHandle, Address(rbp, reg2offset_in(src.first())));
// conditionally move a NULL
- __ cmovq(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
+ __ cmovptr(Assembler::equal, rHandle, Address(rbp, reg2offset_in(src.first())));
} else {
// Oop is in an a register we must store it to the space we reserve
@@ -1011,20 +1014,20 @@ static void object_move(MacroAssembler* masm,
map->set_oop(VMRegImpl::stack2reg(oop_slot));
// Store oop in handle area, may be NULL
- __ movq(Address(rsp, offset), rOop);
+ __ movptr(Address(rsp, offset), rOop);
if (is_receiver) {
*receiver_offset = offset;
}
- __ cmpq(rOop, (int)NULL);
- __ leaq(rHandle, Address(rsp, offset));
+ __ cmpptr(rOop, (int32_t)NULL_WORD);
+ __ lea(rHandle, Address(rsp, offset));
// conditionally move a NULL from the handle area where it was just stored
- __ cmovq(Assembler::equal, rHandle, Address(rsp, offset));
+ __ cmovptr(Assembler::equal, rHandle, Address(rsp, offset));
}
// If arg is on the stack then place it otherwise it is already in correct reg.
if (dst.first()->is_stack()) {
- __ movq(Address(rsp, reg2offset_out(dst.first())), rHandle);
+ __ movptr(Address(rsp, reg2offset_out(dst.first())), rHandle);
}
}
@@ -1039,7 +1042,7 @@ static void float_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
if (src.first()->is_stack()) {
if (dst.first()->is_stack()) {
__ movl(rax, Address(rbp, reg2offset_in(src.first())));
- __ movq(Address(rsp, reg2offset_out(dst.first())), rax);
+ __ movptr(Address(rsp, reg2offset_out(dst.first())), rax);
} else {
// stack to reg
assert(dst.first()->is_XMMRegister(), "only expect xmm registers as parameters");
@@ -1068,7 +1071,7 @@ static void long_move(MacroAssembler* masm, VMRegPair src, VMRegPair dst) {
if (src.is_single_phys_reg() ) {
if (dst.is_single_phys_reg()) {
if (dst.first() != src.first()) {
- __ movq(dst.first()->as_Register(), src.first()->as_Register());
+ __ mov(dst.first()->as_Register(), src.first()->as_Register());
}
} else {
assert(dst.is_single_reg(), "not a stack pair");
@@ -1124,7 +1127,7 @@ void SharedRuntime::save_native_result(MacroAssembler *masm, BasicType ret_type,
break;
case T_VOID: break;
default: {
- __ movq(Address(rbp, -wordSize), rax);
+ __ movptr(Address(rbp, -wordSize), rax);
}
}
}
@@ -1141,7 +1144,7 @@ void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_ty
break;
case T_VOID: break;
default: {
- __ movq(rax, Address(rbp, -wordSize));
+ __ movptr(rax, Address(rbp, -wordSize));
}
}
}
@@ -1149,9 +1152,9 @@ void SharedRuntime::restore_native_result(MacroAssembler *masm, BasicType ret_ty
static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
for ( int i = first_arg ; i < arg_count ; i++ ) {
if (args[i].first()->is_Register()) {
- __ pushq(args[i].first()->as_Register());
+ __ push(args[i].first()->as_Register());
} else if (args[i].first()->is_XMMRegister()) {
- __ subq(rsp, 2*wordSize);
+ __ subptr(rsp, 2*wordSize);
__ movdbl(Address(rsp, 0), args[i].first()->as_XMMRegister());
}
}
@@ -1160,10 +1163,10 @@ static void save_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegP
static void restore_args(MacroAssembler *masm, int arg_count, int first_arg, VMRegPair *args) {
for ( int i = arg_count - 1 ; i >= first_arg ; i-- ) {
if (args[i].first()->is_Register()) {
- __ popq(args[i].first()->as_Register());
+ __ pop(args[i].first()->as_Register());
} else if (args[i].first()->is_XMMRegister()) {
__ movdbl(args[i].first()->as_XMMRegister(), Address(rsp, 0));
- __ addq(rsp, 2*wordSize);
+ __ addptr(rsp, 2*wordSize);
}
}
}
@@ -1303,16 +1306,16 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
Label exception_pending;
__ verify_oop(receiver);
- __ pushq(tmp); // spill (any other registers free here???)
+ __ push(tmp); // spill (any other registers free here???)
__ load_klass(tmp, receiver);
__ cmpq(ic_reg, tmp);
__ jcc(Assembler::equal, ok);
- __ popq(tmp);
+ __ pop(tmp);
__ jump(RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
__ bind(ok);
- __ popq(tmp);
+ __ pop(tmp);
// Verified entry point must be aligned
__ align(8);
@@ -1335,7 +1338,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Generate a new frame for the wrapper.
__ enter();
// -2 because return address is already present and so is saved rbp
- __ subq(rsp, stack_size - 2*wordSize);
+ __ subptr(rsp, stack_size - 2*wordSize);
// Frame is now completed as far as size and linkage.
@@ -1344,9 +1347,9 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
#ifdef ASSERT
{
Label L;
- __ movq(rax, rsp);
- __ andq(rax, -16); // must be 16 byte boundry (see amd64 ABI)
- __ cmpq(rax, rsp);
+ __ mov(rax, rsp);
+ __ andptr(rax, -16); // must be 16 byte boundry (see amd64 ABI)
+ __ cmpptr(rax, rsp);
__ jcc(Assembler::equal, L);
__ stop("improperly aligned stack");
__ bind(L);
@@ -1467,13 +1470,13 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ movoop(oop_handle_reg, JNIHandles::make_local(Klass::cast(method->method_holder())->java_mirror()));
// Now handlize the static class mirror it's known not-null.
- __ movq(Address(rsp, klass_offset), oop_handle_reg);
+ __ movptr(Address(rsp, klass_offset), oop_handle_reg);
map->set_oop(VMRegImpl::stack2reg(klass_slot_offset));
// Now get the handle
- __ leaq(oop_handle_reg, Address(rsp, klass_offset));
+ __ lea(oop_handle_reg, Address(rsp, klass_offset));
// store the klass handle as second argument
- __ movq(c_rarg1, oop_handle_reg);
+ __ movptr(c_rarg1, oop_handle_reg);
// and protect the arg if we must spill
c_arg--;
}
@@ -1521,14 +1524,14 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
const int mark_word_offset = BasicLock::displaced_header_offset_in_bytes();
// Get the handle (the 2nd argument)
- __ movq(oop_handle_reg, c_rarg1);
+ __ mov(oop_handle_reg, c_rarg1);
// Get address of the box
- __ leaq(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
+ __ lea(lock_reg, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
// Load the oop from the handle
- __ movq(obj_reg, Address(oop_handle_reg, 0));
+ __ movptr(obj_reg, Address(oop_handle_reg, 0));
if (UseBiasedLocking) {
__ biased_locking_enter(lock_reg, obj_reg, swap_reg, rscratch1, false, lock_done, &slow_path_lock);
@@ -1538,17 +1541,17 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ movl(swap_reg, 1);
// Load (object->mark() | 1) into swap_reg %rax
- __ orq(swap_reg, Address(obj_reg, 0));
+ __ orptr(swap_reg, Address(obj_reg, 0));
// Save (object->mark() | 1) into BasicLock's displaced header
- __ movq(Address(lock_reg, mark_word_offset), swap_reg);
+ __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
if (os::is_MP()) {
__ lock();
}
// src -> dest iff dest == rax else rax <- dest
- __ cmpxchgq(lock_reg, Address(obj_reg, 0));
+ __ cmpxchgptr(lock_reg, Address(obj_reg, 0));
__ jcc(Assembler::equal, lock_done);
// Hmm should this move to the slow path code area???
@@ -1562,11 +1565,11 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// least significant 2 bits clear.
// NOTE: the oopMark is in swap_reg %rax as the result of cmpxchg
- __ subq(swap_reg, rsp);
- __ andq(swap_reg, 3 - os::vm_page_size());
+ __ subptr(swap_reg, rsp);
+ __ andptr(swap_reg, 3 - os::vm_page_size());
// Save the test result, for recursive case, the result is zero
- __ movq(Address(lock_reg, mark_word_offset), swap_reg);
+ __ movptr(Address(lock_reg, mark_word_offset), swap_reg);
__ jcc(Assembler::notEqual, slow_path_lock);
// Slow path will re-enter here
@@ -1580,21 +1583,21 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// get JNIEnv* which is first argument to native
- __ leaq(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
+ __ lea(c_rarg0, Address(r15_thread, in_bytes(JavaThread::jni_environment_offset())));
// Now set thread in native
- __ mov64(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
+ __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native);
__ call(RuntimeAddress(method->native_function()));
// Either restore the MXCSR register after returning from the JNI Call
// or verify that it wasn't changed.
if (RestoreMXCSROnJNICalls) {
- __ ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std()));
+ __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
}
else if (CheckJNICalls ) {
- __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::amd64::verify_mxcsr_entry())));
+ __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::verify_mxcsr_entry())));
}
@@ -1624,7 +1627,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// VM thread changes sync state to synchronizing and suspends threads for GC.
// Thread A is resumed to finish this native method, but doesn't block here since it
// didn't see any synchronization is progress, and escapes.
- __ mov64(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
+ __ movl(Address(r15_thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
if(os::is_MP()) {
if (UseMembar) {
@@ -1662,12 +1665,12 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// by hand.
//
save_native_result(masm, ret_type, stack_slots);
- __ movq(c_rarg0, r15_thread);
- __ movq(r12, rsp); // remember sp
- __ subq(rsp, frame::arg_reg_save_area_bytes); // windows
- __ andq(rsp, -16); // align stack as required by ABI
+ __ mov(c_rarg0, r15_thread);
+ __ mov(r12, rsp); // remember sp
+ __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
+ __ andptr(rsp, -16); // align stack as required by ABI
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
- __ movq(rsp, r12); // restore sp
+ __ mov(rsp, r12); // restore sp
__ reinit_heapbase();
// Restore any method result value
restore_native_result(masm, ret_type, stack_slots);
@@ -1691,7 +1694,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
if (method->is_synchronized()) {
// Get locked oop from the handle we passed to jni
- __ movq(obj_reg, Address(oop_handle_reg, 0));
+ __ movptr(obj_reg, Address(oop_handle_reg, 0));
Label done;
@@ -1701,7 +1704,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Simple recursive lock?
- __ cmpq(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int)NULL_WORD);
+ __ cmpptr(Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, done);
// Must save rax if if it is live now because cmpxchg must use it
@@ -1711,15 +1714,15 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// get address of the stack lock
- __ leaq(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
+ __ lea(rax, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
// get old displaced header
- __ movq(old_hdr, Address(rax, 0));
+ __ movptr(old_hdr, Address(rax, 0));
// Atomic swap old header if oop still contains the stack lock
if (os::is_MP()) {
__ lock();
}
- __ cmpxchgq(old_hdr, Address(obj_reg, 0));
+ __ cmpxchgptr(old_hdr, Address(obj_reg, 0));
__ jcc(Assembler::notEqual, slow_path_unlock);
// slow path re-enters here
@@ -1746,23 +1749,23 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// Unpack oop result
if (ret_type == T_OBJECT || ret_type == T_ARRAY) {
Label L;
- __ testq(rax, rax);
+ __ testptr(rax, rax);
__ jcc(Assembler::zero, L);
- __ movq(rax, Address(rax, 0));
+ __ movptr(rax, Address(rax, 0));
__ bind(L);
__ verify_oop(rax);
}
// reset handle block
- __ movq(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
- __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int)NULL_WORD);
+ __ movptr(rcx, Address(r15_thread, JavaThread::active_handles_offset()));
+ __ movptr(Address(rcx, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
// pop our frame
__ leave();
// Any exception pending?
- __ cmpq(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
+ __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, exception_pending);
// Return
@@ -1790,9 +1793,9 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
// protect the args we've loaded
save_args(masm, total_c_args, c_arg, out_regs);
- __ movq(c_rarg0, obj_reg);
- __ movq(c_rarg1, lock_reg);
- __ movq(c_rarg2, r15_thread);
+ __ mov(c_rarg0, obj_reg);
+ __ mov(c_rarg1, lock_reg);
+ __ mov(c_rarg2, r15_thread);
// Not a leaf but we have last_Java_frame setup as we want
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_locking_C), 3);
@@ -1800,7 +1803,7 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
#ifdef ASSERT
{ Label L;
- __ cmpq(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
+ __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L);
__ stop("no pending exception allowed on exit from monitorenter");
__ bind(L);
@@ -1820,32 +1823,32 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
save_native_result(masm, ret_type, stack_slots);
}
- __ leaq(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
+ __ lea(c_rarg1, Address(rsp, lock_slot_offset * VMRegImpl::stack_slot_size));
- __ movq(c_rarg0, obj_reg);
- __ movq(r12, rsp); // remember sp
- __ subq(rsp, frame::arg_reg_save_area_bytes); // windows
- __ andq(rsp, -16); // align stack as required by ABI
+ __ mov(c_rarg0, obj_reg);
+ __ mov(r12, rsp); // remember sp
+ __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
+ __ andptr(rsp, -16); // align stack as required by ABI
// Save pending exception around call to VM (which contains an EXCEPTION_MARK)
// NOTE that obj_reg == rbx currently
- __ movq(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
- __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
+ __ movptr(rbx, Address(r15_thread, in_bytes(Thread::pending_exception_offset())));
+ __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int32_t)NULL_WORD);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::complete_monitor_unlocking_C)));
- __ movq(rsp, r12); // restore sp
+ __ mov(rsp, r12); // restore sp
__ reinit_heapbase();
#ifdef ASSERT
{
Label L;
- __ cmpq(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
+ __ cmpptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), (int)NULL_WORD);
__ jcc(Assembler::equal, L);
__ stop("no pending exception allowed on exit complete_monitor_unlocking_C");
__ bind(L);
}
#endif /* ASSERT */
- __ movq(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx);
+ __ movptr(Address(r15_thread, in_bytes(Thread::pending_exception_offset())), rbx);
if (ret_type == T_FLOAT || ret_type == T_DOUBLE ) {
restore_native_result(masm, ret_type, stack_slots);
@@ -1860,11 +1863,11 @@ nmethod *SharedRuntime::generate_native_wrapper(MacroAssembler *masm,
__ bind(reguard);
save_native_result(masm, ret_type, stack_slots);
- __ movq(r12, rsp); // remember sp
- __ subq(rsp, frame::arg_reg_save_area_bytes); // windows
- __ andq(rsp, -16); // align stack as required by ABI
+ __ mov(r12, rsp); // remember sp
+ __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
+ __ andptr(rsp, -16); // align stack as required by ABI
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
- __ movq(rsp, r12); // restore sp
+ __ mov(rsp, r12); // restore sp
__ reinit_heapbase();
restore_native_result(masm, ret_type, stack_slots);
// and continue
@@ -2574,19 +2577,74 @@ void SharedRuntime::generate_deopt_blob() {
// Normal deoptimization. Save exec mode for unpack_frames.
__ movl(r14, Deoptimization::Unpack_deopt); // callee-saved
__ jmp(cont);
+
+ int reexecute_offset = __ pc() - start;
+
+ // Reexecute case
+ // return address is the pc describes what bci to do re-execute at
+
+ // No need to update map as each call to save_live_registers will produce identical oopmap
+ (void) RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
+
+ __ movl(r14, Deoptimization::Unpack_reexecute); // callee-saved
+ __ jmp(cont);
+
int exception_offset = __ pc() - start;
// Prolog for exception case
- // Push throwing pc as return address
- __ pushq(rdx);
+ // all registers are dead at this entry point, except for rax, and
+ // rdx which contain the exception oop and exception pc
+ // respectively. Set them in TLS and fall thru to the
+ // unpack_with_exception_in_tls entry point.
+
+ __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
+ __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), rax);
+
+ int exception_in_tls_offset = __ pc() - start;
+
+ // new implementation because exception oop is now passed in JavaThread
+
+ // Prolog for exception case
+ // All registers must be preserved because they might be used by LinearScan
+ // Exceptiop oop and throwing PC are passed in JavaThread
+ // tos: stack at point of call to method that threw the exception (i.e. only
+ // args are on the stack, no return address)
+
+ // make room on stack for the return address
+ // It will be patched later with the throwing pc. The correct value is not
+ // available now because loading it from memory would destroy registers.
+ __ push(0);
// Save everything in sight.
map = RegisterSaver::save_live_registers(masm, 0, &frame_size_in_words);
+ // Now it is safe to overwrite any register
+
// Deopt during an exception. Save exec mode for unpack_frames.
__ movl(r14, Deoptimization::Unpack_exception); // callee-saved
+ // load throwing pc from JavaThread and patch it as the return address
+ // of the current frame. Then clear the field in JavaThread
+
+ __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
+ __ movptr(Address(rbp, wordSize), rdx);
+ __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
+
+#ifdef ASSERT
+ // verify that there is really an exception oop in JavaThread
+ __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
+ __ verify_oop(rax);
+
+ // verify that there is no pending exception
+ Label no_pending_exception;
+ __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
+ __ testptr(rax, rax);
+ __ jcc(Assembler::zero, no_pending_exception);
+ __ stop("must not have pending exception here");
+ __ bind(no_pending_exception);
+#endif
+
__ bind(cont);
// Call C code. Need thread and this frame, but NOT official VM entry
@@ -2599,15 +2657,15 @@ void SharedRuntime::generate_deopt_blob() {
__ set_last_Java_frame(noreg, noreg, NULL);
#ifdef ASSERT
{ Label L;
- __ cmpq(Address(r15_thread,
+ __ cmpptr(Address(r15_thread,
JavaThread::last_Java_fp_offset()),
- 0);
+ (int32_t)0);
__ jcc(Assembler::equal, L);
__ stop("SharedRuntime::generate_deopt_blob: last_Java_fp not cleared");
__ bind(L);
}
#endif // ASSERT
- __ movq(c_rarg0, r15_thread);
+ __ mov(c_rarg0, r15_thread);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::fetch_unroll_info)));
// Need to have an oopmap that tells fetch_unroll_info where to
@@ -2617,7 +2675,25 @@ void SharedRuntime::generate_deopt_blob() {
__ reset_last_Java_frame(false, false);
// Load UnrollBlock* into rdi
- __ movq(rdi, rax);
+ __ mov(rdi, rax);
+
+ Label noException;
+ __ cmpl(r12, Deoptimization::Unpack_exception); // Was exception pending?
+ __ jcc(Assembler::notEqual, noException);
+ __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
+ // QQQ this is useless it was NULL above
+ __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
+ __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()), (int32_t)NULL_WORD);
+ __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int32_t)NULL_WORD);
+
+ __ verify_oop(rax);
+
+ // Overwrite the result registers with the exception results.
+ __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
+ // I think this is useless
+ __ movptr(Address(rsp, RegisterSaver::rdx_offset_in_bytes()), rdx);
+
+ __ bind(noException);
// Only register save data is on the stack.
// Now restore the result registers. Everything else is either dead
@@ -2640,7 +2716,7 @@ void SharedRuntime::generate_deopt_blob() {
// Pop deoptimized frame
__ movl(rcx, Address(rdi, Deoptimization::UnrollBlock::size_of_deoptimized_frame_offset_in_bytes()));
- __ addq(rsp, rcx);
+ __ addptr(rsp, rcx);
// rsp should be pointing at the return address to the caller (3)
@@ -2651,19 +2727,19 @@ void SharedRuntime::generate_deopt_blob() {
}
// Load address of array of frame pcs into rcx
- __ movq(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
+ __ movptr(rcx, Address(rdi, Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
// Trash the old pc
- __ addq(rsp, wordSize);
+ __ addptr(rsp, wordSize);
// Load address of array of frame sizes into rsi
- __ movq(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
+ __ movptr(rsi, Address(rdi, Deoptimization::UnrollBlock::frame_sizes_offset_in_bytes()));
// Load counter into rdx
__ movl(rdx, Address(rdi, Deoptimization::UnrollBlock::number_of_frames_offset_in_bytes()));
// Pick up the initial fp we should save
- __ movq(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes()));
+ __ movptr(rbp, Address(rdi, Deoptimization::UnrollBlock::initial_fp_offset_in_bytes()));
// Now adjust the caller's stack to make up for the extra locals
// but record the original sp so that we can save it in the skeletal interpreter
@@ -2672,41 +2748,56 @@ void SharedRuntime::generate_deopt_blob() {
const Register sender_sp = r8;
- __ movq(sender_sp, rsp);
+ __ mov(sender_sp, rsp);
__ movl(rbx, Address(rdi,
Deoptimization::UnrollBlock::
caller_adjustment_offset_in_bytes()));
- __ subq(rsp, rbx);
+ __ subptr(rsp, rbx);
// Push interpreter frames in a loop
Label loop;
__ bind(loop);
- __ movq(rbx, Address(rsi, 0)); // Load frame size
- __ subq(rbx, 2*wordSize); // We'll push pc and ebp by hand
- __ pushq(Address(rcx, 0)); // Save return address
+ __ movptr(rbx, Address(rsi, 0)); // Load frame size
+#ifdef CC_INTERP
+ __ subptr(rbx, 4*wordSize); // we'll push pc and ebp by hand and
+#ifdef ASSERT
+ __ push(0xDEADDEAD); // Make a recognizable pattern
+ __ push(0xDEADDEAD);
+#else /* ASSERT */
+ __ subptr(rsp, 2*wordSize); // skip the "static long no_param"
+#endif /* ASSERT */
+#else
+ __ subptr(rbx, 2*wordSize); // We'll push pc and ebp by hand
+#endif // CC_INTERP
+ __ pushptr(Address(rcx, 0)); // Save return address
__ enter(); // Save old & set new ebp
- __ subq(rsp, rbx); // Prolog
- __ movq(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
- sender_sp); // Make it walkable
+ __ subptr(rsp, rbx); // Prolog
+#ifdef CC_INTERP
+ __ movptr(Address(rbp,
+ -(sizeof(BytecodeInterpreter)) + in_bytes(byte_offset_of(BytecodeInterpreter, _sender_sp))),
+ sender_sp); // Make it walkable
+#else /* CC_INTERP */
// This value is corrected by layout_activation_impl
- __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int)NULL_WORD );
- __ movq(sender_sp, rsp); // Pass sender_sp to next frame
- __ addq(rsi, wordSize); // Bump array pointer (sizes)
- __ addq(rcx, wordSize); // Bump array pointer (pcs)
+ __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
+ __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize), sender_sp); // Make it walkable
+#endif /* CC_INTERP */
+ __ mov(sender_sp, rsp); // Pass sender_sp to next frame
+ __ addptr(rsi, wordSize); // Bump array pointer (sizes)
+ __ addptr(rcx, wordSize); // Bump array pointer (pcs)
__ decrementl(rdx); // Decrement counter
__ jcc(Assembler::notZero, loop);
- __ pushq(Address(rcx, 0)); // Save final return address
+ __ pushptr(Address(rcx, 0)); // Save final return address
// Re-push self-frame
__ enter(); // Save old & set new ebp
// Allocate a full sized register save area.
// Return address and rbp are in place, so we allocate two less words.
- __ subq(rsp, (frame_size_in_words - 2) * wordSize);
+ __ subptr(rsp, (frame_size_in_words - 2) * wordSize);
// Restore frame locals after moving the frame
__ movdbl(Address(rsp, RegisterSaver::xmm0_offset_in_bytes()), xmm0);
- __ movq(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
+ __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
// Call C code. Need thread but NOT official VM entry
// crud. We cannot block on this call, no GC can happen. Call should
@@ -2717,7 +2808,7 @@ void SharedRuntime::generate_deopt_blob() {
// Use rbp because the frames look interpreted now
__ set_last_Java_frame(noreg, rbp, NULL);
- __ movq(c_rarg0, r15_thread);
+ __ mov(c_rarg0, r15_thread);
__ movl(c_rarg1, r14); // second arg: exec_mode
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
@@ -2729,7 +2820,9 @@ void SharedRuntime::generate_deopt_blob() {
// Collect return values
__ movdbl(xmm0, Address(rsp, RegisterSaver::xmm0_offset_in_bytes()));
- __ movq(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes()));
+ __ movptr(rax, Address(rsp, RegisterSaver::rax_offset_in_bytes()));
+ // I think this is useless (throwing pc?)
+ __ movptr(rdx, Address(rsp, RegisterSaver::rdx_offset_in_bytes()));
// Pop self-frame.
__ leave(); // Epilog
@@ -2740,7 +2833,8 @@ void SharedRuntime::generate_deopt_blob() {
// Make sure all code is generated
masm->flush();
- _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, 0, frame_size_in_words);
+ _deopt_blob = DeoptimizationBlob::create(&buffer, oop_maps, 0, exception_offset, reexecute_offset, frame_size_in_words);
+ _deopt_blob->set_unpack_with_exception_in_tls_offset(exception_in_tls_offset);
}
#ifdef COMPILER2
@@ -2758,10 +2852,10 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// Push self-frame. We get here with a return address on the
// stack, so rsp is 8-byte aligned until we allocate our frame.
- __ subq(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
+ __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog!
// No callee saved registers. rbp is assumed implicitly saved
- __ movq(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
+ __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
// compiler left unloaded_class_index in j_rarg0 move to where the
// runtime expects it.
@@ -2776,7 +2870,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
//
// UnrollBlock* uncommon_trap(JavaThread* thread, jint unloaded_class_index);
- __ movq(c_rarg0, r15_thread);
+ __ mov(c_rarg0, r15_thread);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::uncommon_trap)));
// Set an oopmap for the call site
@@ -2790,7 +2884,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
__ reset_last_Java_frame(false, false);
// Load UnrollBlock* into rdi
- __ movq(rdi, rax);
+ __ mov(rdi, rax);
// Pop all the frames we must move/replace.
//
@@ -2800,13 +2894,13 @@ void SharedRuntime::generate_uncommon_trap_blob() {
// 3: caller of deopting frame (could be compiled/interpreted).
// Pop self-frame. We have no frame, and must rely only on rax and rsp.
- __ addq(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog!
+ __ addptr(rsp, (SimpleRuntimeFrame::framesize - 2) << LogBytesPerInt); // Epilog!
// Pop deoptimized frame (int)
__ movl(rcx, Address(rdi,
Deoptimization::UnrollBlock::
size_of_deoptimized_frame_offset_in_bytes()));
- __ addq(rsp, rcx);
+ __ addptr(rsp, rcx);
// rsp should be pointing at the return address to the caller (3)
@@ -2817,17 +2911,17 @@ void SharedRuntime::generate_uncommon_trap_blob() {
}
// Load address of array of frame pcs into rcx (address*)
- __ movq(rcx,
- Address(rdi,
- Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
+ __ movptr(rcx,
+ Address(rdi,
+ Deoptimization::UnrollBlock::frame_pcs_offset_in_bytes()));
// Trash the return pc
- __ addq(rsp, wordSize);
+ __ addptr(rsp, wordSize);
// Load address of array of frame sizes into rsi (intptr_t*)
- __ movq(rsi, Address(rdi,
- Deoptimization::UnrollBlock::
- frame_sizes_offset_in_bytes()));
+ __ movptr(rsi, Address(rdi,
+ Deoptimization::UnrollBlock::
+ frame_sizes_offset_in_bytes()));
// Counter
__ movl(rdx, Address(rdi,
@@ -2835,9 +2929,9 @@ void SharedRuntime::generate_uncommon_trap_blob() {
number_of_frames_offset_in_bytes())); // (int)
// Pick up the initial fp we should save
- __ movq(rbp,
- Address(rdi,
- Deoptimization::UnrollBlock::initial_fp_offset_in_bytes()));
+ __ movptr(rbp,
+ Address(rdi,
+ Deoptimization::UnrollBlock::initial_fp_offset_in_bytes()));
// Now adjust the caller's stack to make up for the extra locals but
// record the original sp so that we can save it in the skeletal
@@ -2846,34 +2940,34 @@ void SharedRuntime::generate_uncommon_trap_blob() {
const Register sender_sp = r8;
- __ movq(sender_sp, rsp);
+ __ mov(sender_sp, rsp);
__ movl(rbx, Address(rdi,
Deoptimization::UnrollBlock::
caller_adjustment_offset_in_bytes())); // (int)
- __ subq(rsp, rbx);
+ __ subptr(rsp, rbx);
// Push interpreter frames in a loop
Label loop;
__ bind(loop);
- __ movq(rbx, Address(rsi, 0)); // Load frame size
- __ subq(rbx, 2 * wordSize); // We'll push pc and rbp by hand
- __ pushq(Address(rcx, 0)); // Save return address
- __ enter(); // Save old & set new rbp
- __ subq(rsp, rbx); // Prolog
- __ movq(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
- sender_sp); // Make it walkable
+ __ movptr(rbx, Address(rsi, 0)); // Load frame size
+ __ subptr(rbx, 2 * wordSize); // We'll push pc and rbp by hand
+ __ pushptr(Address(rcx, 0)); // Save return address
+ __ enter(); // Save old & set new rbp
+ __ subptr(rsp, rbx); // Prolog
+ __ movptr(Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize),
+ sender_sp); // Make it walkable
// This value is corrected by layout_activation_impl
- __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int)NULL_WORD );
- __ movq(sender_sp, rsp); // Pass sender_sp to next frame
- __ addq(rsi, wordSize); // Bump array pointer (sizes)
- __ addq(rcx, wordSize); // Bump array pointer (pcs)
- __ decrementl(rdx); // Decrement counter
+ __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD );
+ __ mov(sender_sp, rsp); // Pass sender_sp to next frame
+ __ addptr(rsi, wordSize); // Bump array pointer (sizes)
+ __ addptr(rcx, wordSize); // Bump array pointer (pcs)
+ __ decrementl(rdx); // Decrement counter
__ jcc(Assembler::notZero, loop);
- __ pushq(Address(rcx, 0)); // Save final return address
+ __ pushptr(Address(rcx, 0)); // Save final return address
// Re-push self-frame
__ enter(); // Save old & set new rbp
- __ subq(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt);
+ __ subptr(rsp, (SimpleRuntimeFrame::framesize - 4) << LogBytesPerInt);
// Prolog
// Use rbp because the frames look interpreted now
@@ -2886,7 +2980,7 @@ void SharedRuntime::generate_uncommon_trap_blob() {
//
// BasicType unpack_frames(JavaThread* thread, int exec_mode);
- __ movq(c_rarg0, r15_thread);
+ __ mov(c_rarg0, r15_thread);
__ movl(c_rarg1, Deoptimization::Unpack_uncommon_trap);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, Deoptimization::unpack_frames)));
@@ -2933,7 +3027,7 @@ static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return)
// Make room for return address (or push it again)
if (!cause_return) {
- __ pushq(rbx);
+ __ push(rbx);
}
// Save registers, fpu state, and flags
@@ -2950,12 +3044,12 @@ static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return)
if (!cause_return) {
// overwrite the dummy value we pushed on entry
- __ movq(c_rarg0, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
- __ movq(Address(rbp, wordSize), c_rarg0);
+ __ movptr(c_rarg0, Address(r15_thread, JavaThread::saved_exception_pc_offset()));
+ __ movptr(Address(rbp, wordSize), c_rarg0);
}
// Do the call
- __ movq(c_rarg0, r15_thread);
+ __ mov(c_rarg0, r15_thread);
__ call(RuntimeAddress(call_ptr));
// Set an oopmap for the call site. This oopmap will map all
@@ -2969,7 +3063,7 @@ static SafepointBlob* generate_handler_blob(address call_ptr, bool cause_return)
__ reset_last_Java_frame(false, false);
- __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD);
+ __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, noException);
// Exception pending
@@ -3023,7 +3117,7 @@ static RuntimeStub* generate_resolve_blob(address destination, const char* name)
__ set_last_Java_frame(noreg, noreg, NULL);
- __ movq(c_rarg0, r15_thread);
+ __ mov(c_rarg0, r15_thread);
__ call(RuntimeAddress(destination));
@@ -3040,14 +3134,14 @@ static RuntimeStub* generate_resolve_blob(address destination, const char* name)
__ reset_last_Java_frame(false, false);
// check for pending exceptions
Label pending;
- __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD);
+ __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, pending);
// get the returned methodOop
- __ movq(rbx, Address(r15_thread, JavaThread::vm_result_offset()));
- __ movq(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx);
+ __ movptr(rbx, Address(r15_thread, JavaThread::vm_result_offset()));
+ __ movptr(Address(rsp, RegisterSaver::rbx_offset_in_bytes()), rbx);
- __ movq(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
+ __ movptr(Address(rsp, RegisterSaver::rax_offset_in_bytes()), rax);
RegisterSaver::restore_live_registers(masm);
@@ -3065,7 +3159,7 @@ static RuntimeStub* generate_resolve_blob(address destination, const char* name)
__ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int)NULL_WORD);
- __ movq(rax, Address(r15_thread, Thread::pending_exception_offset()));
+ __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
__ jump(RuntimeAddress(StubRoutines::forward_exception_entry()));
// -------------
@@ -3154,8 +3248,8 @@ void OptoRuntime::generate_exception_blob() {
address start = __ pc();
// Exception pc is 'return address' for stack walker
- __ pushq(rdx);
- __ subq(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog
+ __ push(rdx);
+ __ subptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Prolog
// Save callee-saved registers. See x86_64.ad.
@@ -3163,14 +3257,14 @@ void OptoRuntime::generate_exception_blob() {
// convention will save restore it in prolog/epilog) Other than that
// there are no callee save registers now that adapter frames are gone.
- __ movq(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
+ __ movptr(Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt), rbp);
// Store exception in Thread object. We cannot pass any arguments to the
// handle_exception call, since we do not want to make any assumption
// about the size of the frame where the exception happened in.
// c_rarg0 is either rdi (Linux) or rcx (Windows).
- __ movq(Address(r15_thread, JavaThread::exception_oop_offset()),rax);
- __ movq(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
+ __ movptr(Address(r15_thread, JavaThread::exception_oop_offset()),rax);
+ __ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), rdx);
// This call does all the hard work. It checks if an exception handler
// exists in the method.
@@ -3181,7 +3275,7 @@ void OptoRuntime::generate_exception_blob() {
// address OptoRuntime::handle_exception_C(JavaThread* thread)
__ set_last_Java_frame(noreg, noreg, NULL);
- __ movq(c_rarg0, r15_thread);
+ __ mov(c_rarg0, r15_thread);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, OptoRuntime::handle_exception_C)));
// Set an oopmap for the call site. This oopmap will only be used if we
@@ -3202,20 +3296,20 @@ void OptoRuntime::generate_exception_blob() {
// convention will save restore it in prolog/epilog) Other than that
// there are no callee save registers no that adapter frames are gone.
- __ movq(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt));
+ __ movptr(rbp, Address(rsp, SimpleRuntimeFrame::rbp_off << LogBytesPerInt));
- __ addq(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog
- __ popq(rdx); // No need for exception pc anymore
+ __ addptr(rsp, SimpleRuntimeFrame::return_off << LogBytesPerInt); // Epilog
+ __ pop(rdx); // No need for exception pc anymore
// rax: exception handler
// We have a handler in rax (could be deopt blob).
- __ movq(r8, rax);
+ __ mov(r8, rax);
// Get the exception oop
- __ movq(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
+ __ movptr(rax, Address(r15_thread, JavaThread::exception_oop_offset()));
// Get the exception pc in case we are deoptimized
- __ movq(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
+ __ movptr(rdx, Address(r15_thread, JavaThread::exception_pc_offset()));
#ifdef ASSERT
__ movptr(Address(r15_thread, JavaThread::exception_handler_pc_offset()), (int)NULL_WORD);
__ movptr(Address(r15_thread, JavaThread::exception_pc_offset()), (int)NULL_WORD);
diff --git a/src/cpu/x86/vm/stubGenerator_x86_32.cpp b/src/cpu/x86/vm/stubGenerator_x86_32.cpp
index 058c23028..00b1fa7ea 100644
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp
+++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp
@@ -30,6 +30,7 @@
// see the comment in stubRoutines.hpp
#define __ _masm->
+#define a__ ((Assembler*)_masm)->
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
@@ -67,7 +68,7 @@ class StubGenerator: public StubCodeGenerator {
#define inc_counter_np(counter) (0)
#else
void inc_counter_np_(int& counter) {
- __ increment(ExternalAddress((address)&counter));
+ __ incrementl(ExternalAddress((address)&counter));
}
#define inc_counter_np(counter) \
BLOCK_COMMENT("inc_counter " #counter); \
@@ -137,16 +138,16 @@ class StubGenerator: public StubCodeGenerator {
// stub code
__ enter();
- __ movl(rcx, parameter_size); // parameter counter
- __ shll(rcx, Interpreter::logStackElementSize()); // convert parameter count to bytes
- __ addl(rcx, locals_count_in_bytes); // reserve space for register saves
- __ subl(rsp, rcx);
- __ andl(rsp, -(StackAlignmentInBytes)); // Align stack
+ __ movptr(rcx, parameter_size); // parameter counter
+ __ shlptr(rcx, Interpreter::logStackElementSize()); // convert parameter count to bytes
+ __ addptr(rcx, locals_count_in_bytes); // reserve space for register saves
+ __ subptr(rsp, rcx);
+ __ andptr(rsp, -(StackAlignmentInBytes)); // Align stack
// save rdi, rsi, & rbx, according to C calling conventions
- __ movl(saved_rdi, rdi);
- __ movl(saved_rsi, rsi);
- __ movl(saved_rbx, rbx);
+ __ movptr(saved_rdi, rdi);
+ __ movptr(saved_rsi, rsi);
+ __ movptr(saved_rbx, rbx);
// save and initialize %mxcsr
if (sse_save) {
Label skip_ldmx;
@@ -166,8 +167,8 @@ class StubGenerator: public StubCodeGenerator {
#ifdef ASSERT
// make sure we have no pending exceptions
{ Label L;
- __ movl(rcx, thread);
- __ cmpl(Address(rcx, Thread::pending_exception_offset()), NULL_WORD);
+ __ movptr(rcx, thread);
+ __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L);
__ stop("StubRoutines::call_stub: entered with pending exception");
__ bind(L);
@@ -189,20 +190,20 @@ class StubGenerator: public StubCodeGenerator {
// source is rdx[rcx: N-1..0]
// dest is rsp[rbx: 0..N-1]
- __ movl(rdx, parameters); // parameter pointer
- __ xorl(rbx, rbx);
+ __ movptr(rdx, parameters); // parameter pointer
+ __ xorptr(rbx, rbx);
__ BIND(loop);
if (TaggedStackInterpreter) {
- __ movl(rax, Address(rdx, rcx, Interpreter::stackElementScale(),
+ __ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(),
-2*wordSize)); // get tag
- __ movl(Address(rsp, rbx, Interpreter::stackElementScale(),
+ __ movptr(Address(rsp, rbx, Interpreter::stackElementScale(),
Interpreter::expr_tag_offset_in_bytes(0)), rax); // store tag
}
// get parameter
- __ movl(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -wordSize));
- __ movl(Address(rsp, rbx, Interpreter::stackElementScale(),
+ __ movptr(rax, Address(rdx, rcx, Interpreter::stackElementScale(), -wordSize));
+ __ movptr(Address(rsp, rbx, Interpreter::stackElementScale(),
Interpreter::expr_offset_in_bytes(0)), rax); // store parameter
__ increment(rbx);
__ decrement(rcx);
@@ -210,9 +211,9 @@ class StubGenerator: public StubCodeGenerator {
// call Java function
__ BIND(parameters_done);
- __ movl(rbx, method); // get methodOop
- __ movl(rax, entry_point); // get entry_point
- __ movl(rsi, rsp); // set sender sp
+ __ movptr(rbx, method); // get methodOop
+ __ movptr(rax, entry_point); // get entry_point
+ __ mov(rsi, rsp); // set sender sp
BLOCK_COMMENT("call Java function");
__ call(rax);
@@ -225,7 +226,7 @@ class StubGenerator: public StubCodeGenerator {
// store result depending on type
// (everything that is not T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
- __ movl(rdi, result);
+ __ movptr(rdi, result);
Label is_long, is_float, is_double, exit;
__ movl(rsi, result_type);
__ cmpl(rsi, T_LONG);
@@ -243,7 +244,7 @@ class StubGenerator: public StubCodeGenerator {
__ verify_FPU(0, "generate_call_stub");
// pop parameters
- __ leal(rsp, rsp_after_call);
+ __ lea(rsp, rsp_after_call);
// restore %mxcsr
if (sse_save) {
@@ -251,13 +252,13 @@ class StubGenerator: public StubCodeGenerator {
}
// restore rdi, rsi and rbx,
- __ movl(rbx, saved_rbx);
- __ movl(rsi, saved_rsi);
- __ movl(rdi, saved_rdi);
- __ addl(rsp, 4*wordSize);
+ __ movptr(rbx, saved_rbx);
+ __ movptr(rsi, saved_rsi);
+ __ movptr(rdi, saved_rdi);
+ __ addptr(rsp, 4*wordSize);
// return
- __ popl(rbp);
+ __ pop(rbp);
__ ret(0);
// handle return types different from T_INT
@@ -291,7 +292,7 @@ class StubGenerator: public StubCodeGenerator {
// return above that handles interpreter returns.
BLOCK_COMMENT("call_stub_compiled_return:");
- StubRoutines::i486::set_call_stub_compiled_return( __ pc());
+ StubRoutines::x86::set_call_stub_compiled_return( __ pc());
#ifdef COMPILER2
if (UseSSE >= 2) {
@@ -337,12 +338,12 @@ class StubGenerator: public StubCodeGenerator {
address start = __ pc();
// get thread directly
- __ movl(rcx, thread);
+ __ movptr(rcx, thread);
#ifdef ASSERT
// verify that threads correspond
{ Label L;
__ get_thread(rbx);
- __ cmpl(rbx, rcx);
+ __ cmpptr(rbx, rcx);
__ jcc(Assembler::equal, L);
__ stop("StubRoutines::catch_exception: threads must correspond");
__ bind(L);
@@ -350,7 +351,7 @@ class StubGenerator: public StubCodeGenerator {
#endif
// set pending exception
__ verify_oop(rax);
- __ movl(Address(rcx, Thread::pending_exception_offset()), rax );
+ __ movptr(Address(rcx, Thread::pending_exception_offset()), rax );
__ lea(Address(rcx, Thread::exception_file_offset ()),
ExternalAddress((address)__FILE__));
__ movl(Address(rcx, Thread::exception_line_offset ()), __LINE__ );
@@ -389,7 +390,7 @@ class StubGenerator: public StubCodeGenerator {
// make sure this code is only executed if there is a pending exception
{ Label L;
__ get_thread(rcx);
- __ cmpl(Address(rcx, Thread::pending_exception_offset()), NULL_WORD);
+ __ cmpptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, L);
__ stop("StubRoutines::forward exception: no pending exception (1)");
__ bind(L);
@@ -397,21 +398,21 @@ class StubGenerator: public StubCodeGenerator {
#endif
// compute exception handler into rbx,
- __ movl(rax, Address(rsp, 0));
+ __ movptr(rax, Address(rsp, 0));
BLOCK_COMMENT("call exception_handler_for_return_address");
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rax);
- __ movl(rbx, rax);
+ __ mov(rbx, rax);
// setup rax, & rdx, remove return address & clear pending exception
__ get_thread(rcx);
- __ popl(rdx);
- __ movl(rax, Address(rcx, Thread::pending_exception_offset()));
- __ movl(Address(rcx, Thread::pending_exception_offset()), NULL_WORD);
+ __ pop(rdx);
+ __ movptr(rax, Address(rcx, Thread::pending_exception_offset()));
+ __ movptr(Address(rcx, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
#ifdef ASSERT
// make sure exception is set
{ Label L;
- __ testl(rax, rax);
+ __ testptr(rax, rax);
__ jcc(Assembler::notEqual, L);
__ stop("StubRoutines::forward exception: no pending exception (2)");
__ bind(L);
@@ -447,13 +448,13 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", "atomic_xchg");
address start = __ pc();
- __ pushl(rdx);
+ __ push(rdx);
Address exchange(rsp, 2 * wordSize);
Address dest_addr(rsp, 3 * wordSize);
__ movl(rax, exchange);
- __ movl(rdx, dest_addr);
- __ xchg(rax, Address(rdx, 0));
- __ popl(rdx);
+ __ movptr(rdx, dest_addr);
+ __ xchgl(rax, Address(rdx, 0));
+ __ pop(rdx);
__ ret(0);
return start;
@@ -476,8 +477,8 @@ class StubGenerator: public StubCodeGenerator {
if (CheckJNICalls && UseSSE > 0 ) {
Label ok_ret;
ExternalAddress mxcsr_std(StubRoutines::addr_mxcsr_std());
- __ pushl(rax);
- __ subl(rsp, wordSize); // allocate a temp location
+ __ push(rax);
+ __ subptr(rsp, wordSize); // allocate a temp location
__ stmxcsr(mxcsr_save);
__ movl(rax, mxcsr_save);
__ andl(rax, MXCSR_MASK);
@@ -489,8 +490,8 @@ class StubGenerator: public StubCodeGenerator {
__ ldmxcsr(mxcsr_std);
__ bind(ok_ret);
- __ addl(rsp, wordSize);
- __ popl(rax);
+ __ addptr(rsp, wordSize);
+ __ pop(rax);
}
__ ret(0);
@@ -514,8 +515,8 @@ class StubGenerator: public StubCodeGenerator {
if (CheckJNICalls) {
Label ok_ret;
- __ pushl(rax);
- __ subl(rsp, wordSize); // allocate a temp location
+ __ push(rax);
+ __ subptr(rsp, wordSize); // allocate a temp location
__ fnstcw(fpu_cntrl_wrd_save);
__ movl(rax, fpu_cntrl_wrd_save);
__ andl(rax, FPU_CNTRL_WRD_MASK);
@@ -528,8 +529,8 @@ class StubGenerator: public StubCodeGenerator {
__ fldcw(fpu_std);
__ bind(ok_ret);
- __ addl(rsp, wordSize);
- __ popl(rax);
+ __ addptr(rsp, wordSize);
+ __ pop(rax);
}
__ ret(0);
@@ -563,22 +564,22 @@ class StubGenerator: public StubCodeGenerator {
assert(FPUStateSizeInWords == 27, "update stack layout");
// Save outgoing argument to stack across push_FPU_state()
- __ subl(rsp, wordSize * 2);
+ __ subptr(rsp, wordSize * 2);
__ fstp_d(Address(rsp, 0));
// Save CPU & FPU state
- __ pushl(rbx);
- __ pushl(rcx);
- __ pushl(rsi);
- __ pushl(rdi);
- __ pushl(rbp);
+ __ push(rbx);
+ __ push(rcx);
+ __ push(rsi);
+ __ push(rdi);
+ __ push(rbp);
__ push_FPU_state();
// push_FPU_state() resets the FP top of stack
// Load original double into FP top of stack
__ fld_d(Address(rsp, saved_argument_off * wordSize));
// Store double into stack as outgoing argument
- __ subl(rsp, wordSize*2);
+ __ subptr(rsp, wordSize*2);
__ fst_d(Address(rsp, 0));
// Prepare FPU for doing math in C-land
@@ -592,12 +593,12 @@ class StubGenerator: public StubCodeGenerator {
// Restore CPU & FPU state
__ pop_FPU_state();
- __ popl(rbp);
- __ popl(rdi);
- __ popl(rsi);
- __ popl(rcx);
- __ popl(rbx);
- __ addl(rsp, wordSize * 2);
+ __ pop(rbp);
+ __ pop(rdi);
+ __ pop(rsi);
+ __ pop(rcx);
+ __ pop(rbx);
+ __ addptr(rsp, wordSize * 2);
__ ret(0);
@@ -613,13 +614,13 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
address start = __ pc();
- __ pushl(0); // hole for return address-to-be
- __ pushad(); // push registers
+ __ push(0); // hole for return address-to-be
+ __ pusha(); // push registers
Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord);
BLOCK_COMMENT("call handle_unsafe_access");
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access)));
- __ movl(next_pc, rax); // stuff next address
- __ popad();
+ __ movptr(next_pc, rax); // stuff next address
+ __ popa();
__ ret(0); // jump to next address
return start;
@@ -643,62 +644,62 @@ class StubGenerator: public StubCodeGenerator {
// [tos + 5]: saved rax, - saved by caller and bashed
Label exit, error;
- __ pushfd();
- __ increment(ExternalAddress((address) StubRoutines::verify_oop_count_addr()));
- __ pushl(rdx); // save rdx
+ __ pushf();
+ __ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr()));
+ __ push(rdx); // save rdx
// make sure object is 'reasonable'
- __ movl(rax, Address(rsp, 4 * wordSize)); // get object
- __ testl(rax, rax);
+ __ movptr(rax, Address(rsp, 4 * wordSize)); // get object
+ __ testptr(rax, rax);
__ jcc(Assembler::zero, exit); // if obj is NULL it is ok
// Check if the oop is in the right area of memory
const int oop_mask = Universe::verify_oop_mask();
const int oop_bits = Universe::verify_oop_bits();
- __ movl(rdx, rax);
- __ andl(rdx, oop_mask);
- __ cmpl(rdx, oop_bits);
+ __ mov(rdx, rax);
+ __ andptr(rdx, oop_mask);
+ __ cmpptr(rdx, oop_bits);
__ jcc(Assembler::notZero, error);
// make sure klass is 'reasonable'
- __ movl(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass
- __ testl(rax, rax);
+ __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass
+ __ testptr(rax, rax);
__ jcc(Assembler::zero, error); // if klass is NULL it is broken
// Check if the klass is in the right area of memory
const int klass_mask = Universe::verify_klass_mask();
const int klass_bits = Universe::verify_klass_bits();
- __ movl(rdx, rax);
- __ andl(rdx, klass_mask);
- __ cmpl(rdx, klass_bits);
+ __ mov(rdx, rax);
+ __ andptr(rdx, klass_mask);
+ __ cmpptr(rdx, klass_bits);
__ jcc(Assembler::notZero, error);
// make sure klass' klass is 'reasonable'
- __ movl(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass' klass
- __ testl(rax, rax);
+ __ movptr(rax, Address(rax, oopDesc::klass_offset_in_bytes())); // get klass' klass
+ __ testptr(rax, rax);
__ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken
- __ movl(rdx, rax);
- __ andl(rdx, klass_mask);
- __ cmpl(rdx, klass_bits);
+ __ mov(rdx, rax);
+ __ andptr(rdx, klass_mask);
+ __ cmpptr(rdx, klass_bits);
__ jcc(Assembler::notZero, error); // if klass not in right area
// of memory it is broken too.
// return if everything seems ok
__ bind(exit);
- __ movl(rax, Address(rsp, 5 * wordSize)); // get saved rax, back
- __ popl(rdx); // restore rdx
- __ popfd(); // restore EFLAGS
+ __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back
+ __ pop(rdx); // restore rdx
+ __ popf(); // restore EFLAGS
__ ret(3 * wordSize); // pop arguments
// handle errors
__ bind(error);
- __ movl(rax, Address(rsp, 5 * wordSize)); // get saved rax, back
- __ popl(rdx); // get saved rdx back
- __ popfd(); // get saved EFLAGS off stack -- will be ignored
- __ pushad(); // push registers (eip = return address & msg are already pushed)
+ __ movptr(rax, Address(rsp, 5 * wordSize)); // get saved rax, back
+ __ pop(rdx); // get saved rdx back
+ __ popf(); // get saved EFLAGS off stack -- will be ignored
+ __ pusha(); // push registers (eip = return address & msg are already pushed)
BLOCK_COMMENT("call MacroAssembler::debug");
- __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug)));
- __ popad();
+ __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug32)));
+ __ popa();
__ ret(3 * wordSize); // pop arguments
return start;
}
@@ -717,12 +718,12 @@ class StubGenerator: public StubCodeGenerator {
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
{
- __ pushad(); // push registers
- __ pushl(count);
- __ pushl(start);
+ __ pusha(); // push registers
+ __ push(count);
+ __ push(start);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_pre));
__ addl(esp, wordSize * 2);
- __ popad();
+ __ popa();
}
break;
case BarrierSet::CardTableModRef:
@@ -753,12 +754,12 @@ class StubGenerator: public StubCodeGenerator {
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
{
- __ pushad(); // push registers
- __ pushl(count);
- __ pushl(start);
+ __ pusha(); // push registers
+ __ push(count);
+ __ push(start);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post));
__ addl(esp, wordSize * 2);
- __ popad();
+ __ popa();
}
break;
@@ -774,10 +775,10 @@ class StubGenerator: public StubCodeGenerator {
const Register end = count; // elements count; end == start+count-1
assert_different_registers(start, end);
- __ leal(end, Address(start, count, Address::times_4, -4));
- __ shrl(start, CardTableModRefBS::card_shift);
- __ shrl(end, CardTableModRefBS::card_shift);
- __ subl(end, start); // end --> count
+ __ lea(end, Address(start, count, Address::times_ptr, -wordSize));
+ __ shrptr(start, CardTableModRefBS::card_shift);
+ __ shrptr(end, CardTableModRefBS::card_shift);
+ __ subptr(end, start); // end --> count
__ BIND(L_loop);
intptr_t disp = (intptr_t) ct->byte_map_base;
Address cardtable(start, count, Address::times_1, disp);
@@ -823,7 +824,7 @@ class StubGenerator: public StubCodeGenerator {
__ movq(Address(from, to_from, Address::times_1, 40), mmx5);
__ movq(Address(from, to_from, Address::times_1, 48), mmx6);
__ movq(Address(from, to_from, Address::times_1, 56), mmx7);
- __ addl(from, 64);
+ __ addptr(from, 64);
__ BIND(L_copy_64_bytes);
__ subl(qword_count, 8);
__ jcc(Assembler::greaterEqual, L_copy_64_bytes_loop);
@@ -835,7 +836,7 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_copy_8_bytes);
__ movq(mmx0, Address(from, 0));
__ movq(Address(from, to_from, Address::times_1), mmx0);
- __ addl(from, 8);
+ __ addptr(from, 8);
__ decrement(qword_count);
__ jcc(Assembler::greater, L_copy_8_bytes);
__ BIND(L_exit);
@@ -852,7 +853,7 @@ class StubGenerator: public StubCodeGenerator {
Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte;
Label L_copy_2_bytes, L_copy_4_bytes, L_copy_64_bytes;
- int shift = Address::times_4 - sf;
+ int shift = Address::times_ptr - sf;
const Register from = rsi; // source array address
const Register to = rdi; // destination array address
@@ -861,22 +862,22 @@ class StubGenerator: public StubCodeGenerator {
const Register saved_to = rdx; // saved destination array address
__ enter(); // required for proper stackwalking of RuntimeStub frame
- __ pushl(rsi);
- __ pushl(rdi);
- __ movl(from , Address(rsp, 12+ 4));
- __ movl(to , Address(rsp, 12+ 8));
+ __ push(rsi);
+ __ push(rdi);
+ __ movptr(from , Address(rsp, 12+ 4));
+ __ movptr(to , Address(rsp, 12+ 8));
__ movl(count, Address(rsp, 12+ 12));
if (t == T_OBJECT) {
__ testl(count, count);
__ jcc(Assembler::zero, L_0_count);
gen_write_ref_array_pre_barrier(to, count);
- __ movl(saved_to, to); // save 'to'
+ __ mov(saved_to, to); // save 'to'
}
*entry = __ pc(); // Entry point from conjoint arraycopy stub.
BLOCK_COMMENT("Entry:");
- __ subl(to, from); // to --> to_from
+ __ subptr(to, from); // to --> to_from
__ cmpl(count, 2<<shift); // Short arrays (< 8 bytes) copy by element
__ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp
if (!aligned && (t == T_BYTE || t == T_SHORT)) {
@@ -896,28 +897,28 @@ class StubGenerator: public StubCodeGenerator {
__ jccb(Assembler::zero, L_skip_align2);
__ movw(rax, Address(from, 0));
__ movw(Address(from, to_from, Address::times_1, 0), rax);
- __ addl(from, 2);
+ __ addptr(from, 2);
__ subl(count, 1<<(shift-1));
__ BIND(L_skip_align2);
}
if (!VM_Version::supports_mmx()) {
- __ movl(rax, count); // save 'count'
- __ shrl(count, shift); // bytes count
- __ addl(to_from, from); // restore 'to'
- __ rep_movl();
- __ subl(to_from, from); // restore 'to_from'
- __ movl(count, rax); // restore 'count'
+ __ mov(rax, count); // save 'count'
+ __ shrl(count, shift); // bytes count
+ __ addptr(to_from, from);// restore 'to'
+ __ rep_mov();
+ __ subptr(to_from, from);// restore 'to_from'
+ __ mov(count, rax); // restore 'count'
__ jmpb(L_copy_2_bytes); // all dwords were copied
} else {
// align to 8 bytes, we know we are 4 byte aligned to start
- __ testl(from, 4);
+ __ testptr(from, 4);
__ jccb(Assembler::zero, L_copy_64_bytes);
__ movl(rax, Address(from, 0));
__ movl(Address(from, to_from, Address::times_1, 0), rax);
- __ addl(from, 4);
+ __ addptr(from, 4);
__ subl(count, 1<<shift);
__ BIND(L_copy_64_bytes);
- __ movl(rax, count);
+ __ mov(rax, count);
__ shrl(rax, shift+1); // 8 bytes chunk count
//
// Copy 8-byte chunks through MMX registers, 8 per iteration of the loop
@@ -931,7 +932,7 @@ class StubGenerator: public StubCodeGenerator {
__ movl(rax, Address(from, 0));
__ movl(Address(from, to_from, Address::times_1, 0), rax);
if (t == T_BYTE || t == T_SHORT) {
- __ addl(from, 4);
+ __ addptr(from, 4);
__ BIND(L_copy_2_bytes);
// copy tailing word
__ testl(count, 1<<(shift-1));
@@ -939,7 +940,7 @@ class StubGenerator: public StubCodeGenerator {
__ movw(rax, Address(from, 0));
__ movw(Address(from, to_from, Address::times_1, 0), rax);
if (t == T_BYTE) {
- __ addl(from, 2);
+ __ addptr(from, 2);
__ BIND(L_copy_byte);
// copy tailing byte
__ testl(count, 1);
@@ -956,15 +957,15 @@ class StubGenerator: public StubCodeGenerator {
if (t == T_OBJECT) {
__ movl(count, Address(rsp, 12+12)); // reread 'count'
- __ movl(to, saved_to); // restore 'to'
+ __ mov(to, saved_to); // restore 'to'
gen_write_ref_array_post_barrier(to, count);
__ BIND(L_0_count);
}
inc_copy_counter_np(t);
- __ popl(rdi);
- __ popl(rsi);
+ __ pop(rdi);
+ __ pop(rsi);
__ leave(); // required for proper stackwalking of RuntimeStub frame
- __ xorl(rax, rax); // return 0
+ __ xorptr(rax, rax); // return 0
__ ret(0);
return start;
}
@@ -981,7 +982,7 @@ class StubGenerator: public StubCodeGenerator {
Label L_0_count, L_exit, L_skip_align1, L_skip_align2, L_copy_byte;
Label L_copy_2_bytes, L_copy_4_bytes, L_copy_8_bytes, L_copy_8_bytes_loop;
- int shift = Address::times_4 - sf;
+ int shift = Address::times_ptr - sf;
const Register src = rax; // source array address
const Register dst = rdx; // destination array address
@@ -991,11 +992,11 @@ class StubGenerator: public StubCodeGenerator {
const Register end = rax; // array end address
__ enter(); // required for proper stackwalking of RuntimeStub frame
- __ pushl(rsi);
- __ pushl(rdi);
- __ movl(src , Address(rsp, 12+ 4)); // from
- __ movl(dst , Address(rsp, 12+ 8)); // to
- __ movl(count, Address(rsp, 12+12)); // count
+ __ push(rsi);
+ __ push(rdi);
+ __ movptr(src , Address(rsp, 12+ 4)); // from
+ __ movptr(dst , Address(rsp, 12+ 8)); // to
+ __ movl2ptr(count, Address(rsp, 12+12)); // count
if (t == T_OBJECT) {
gen_write_ref_array_pre_barrier(dst, count);
}
@@ -1009,15 +1010,15 @@ class StubGenerator: public StubCodeGenerator {
__ testl(count, count);
__ jcc(Assembler::zero, L_0_count);
}
- __ movl(from, src);
- __ movl(to , dst);
+ __ mov(from, src);
+ __ mov(to , dst);
// arrays overlap test
RuntimeAddress nooverlap(nooverlap_target);
- __ cmpl(dst, src);
- __ leal(end, Address(src, count, sf, 0)); // src + count * elem_size
+ __ cmpptr(dst, src);
+ __ lea(end, Address(src, count, sf, 0)); // src + count * elem_size
__ jump_cc(Assembler::belowEqual, nooverlap);
- __ cmpl(dst, end);
+ __ cmpptr(dst, end);
__ jump_cc(Assembler::aboveEqual, nooverlap);
// copy from high to low
@@ -1025,7 +1026,7 @@ class StubGenerator: public StubCodeGenerator {
__ jcc(Assembler::below, L_copy_4_bytes); // use unsigned cmp
if (t == T_BYTE || t == T_SHORT) {
// Align the end of destination array at 4 bytes address boundary
- __ leal(end, Address(dst, count, sf, 0));
+ __ lea(end, Address(dst, count, sf, 0));
if (t == T_BYTE) {
// One byte misalignment happens only for byte arrays
__ testl(end, 1);
@@ -1038,7 +1039,7 @@ class StubGenerator: public StubCodeGenerator {
// Two bytes misalignment happens only for byte and short (char) arrays
__ testl(end, 2);
__ jccb(Assembler::zero, L_skip_align2);
- __ subl(count, 1<<(shift-1));
+ __ subptr(count, 1<<(shift-1));
__ movw(rdx, Address(from, count, sf, 0));
__ movw(Address(to, count, sf, 0), rdx);
__ BIND(L_skip_align2);
@@ -1048,21 +1049,21 @@ class StubGenerator: public StubCodeGenerator {
if (!VM_Version::supports_mmx()) {
__ std();
- __ movl(rax, count); // Save 'count'
- __ movl(rdx, to); // Save 'to'
- __ leal(rsi, Address(from, count, sf, -4));
- __ leal(rdi, Address(to , count, sf, -4));
- __ shrl(count, shift); // bytes count
- __ rep_movl();
+ __ mov(rax, count); // Save 'count'
+ __ mov(rdx, to); // Save 'to'
+ __ lea(rsi, Address(from, count, sf, -4));
+ __ lea(rdi, Address(to , count, sf, -4));
+ __ shrptr(count, shift); // bytes count
+ __ rep_mov();
__ cld();
- __ movl(count, rax); // restore 'count'
+ __ mov(count, rax); // restore 'count'
__ andl(count, (1<<shift)-1); // mask the number of rest elements
- __ movl(from, Address(rsp, 12+4)); // reread 'from'
- __ movl(to, rdx); // restore 'to'
+ __ movptr(from, Address(rsp, 12+4)); // reread 'from'
+ __ mov(to, rdx); // restore 'to'
__ jmpb(L_copy_2_bytes); // all dword were copied
} else {
// Align to 8 bytes the end of array. It is aligned to 4 bytes already.
- __ testl(end, 4);
+ __ testptr(end, 4);
__ jccb(Assembler::zero, L_copy_8_bytes);
__ subl(count, 1<<shift);
__ movl(rdx, Address(from, count, sf, 0));
@@ -1111,15 +1112,15 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_copy_2_bytes);
}
if (t == T_OBJECT) {
- __ movl(count, Address(rsp, 12+12)); // reread count
+ __ movl2ptr(count, Address(rsp, 12+12)); // reread count
gen_write_ref_array_post_barrier(to, count);
__ BIND(L_0_count);
}
inc_copy_counter_np(t);
- __ popl(rdi);
- __ popl(rsi);
+ __ pop(rdi);
+ __ pop(rsi);
__ leave(); // required for proper stackwalking of RuntimeStub frame
- __ xorl(rax, rax); // return 0
+ __ xorptr(rax, rax); // return 0
__ ret(0);
return start;
}
@@ -1137,14 +1138,14 @@ class StubGenerator: public StubCodeGenerator {
const Register to_from = rdx; // (to - from)
__ enter(); // required for proper stackwalking of RuntimeStub frame
- __ movl(from , Address(rsp, 8+0)); // from
- __ movl(to , Address(rsp, 8+4)); // to
- __ movl(count, Address(rsp, 8+8)); // count
+ __ movptr(from , Address(rsp, 8+0)); // from
+ __ movptr(to , Address(rsp, 8+4)); // to
+ __ movl2ptr(count, Address(rsp, 8+8)); // count
*entry = __ pc(); // Entry point from conjoint arraycopy stub.
BLOCK_COMMENT("Entry:");
- __ subl(to, from); // to --> to_from
+ __ subptr(to, from); // to --> to_from
if (VM_Version::supports_mmx()) {
mmx_copy_forward(from, to_from, count);
} else {
@@ -1153,14 +1154,14 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_copy_8_bytes_loop);
__ fild_d(Address(from, 0));
__ fistp_d(Address(from, to_from, Address::times_1));
- __ addl(from, 8);
+ __ addptr(from, 8);
__ BIND(L_copy_8_bytes);
__ decrement(count);
__ jcc(Assembler::greaterEqual, L_copy_8_bytes_loop);
}
inc_copy_counter_np(T_LONG);
__ leave(); // required for proper stackwalking of RuntimeStub frame
- __ xorl(rax, rax); // return 0
+ __ xorptr(rax, rax); // return 0
__ ret(0);
return start;
}
@@ -1178,20 +1179,20 @@ class StubGenerator: public StubCodeGenerator {
const Register end_from = rax; // source array end address
__ enter(); // required for proper stackwalking of RuntimeStub frame
- __ movl(from , Address(rsp, 8+0)); // from
- __ movl(to , Address(rsp, 8+4)); // to
- __ movl(count, Address(rsp, 8+8)); // count
+ __ movptr(from , Address(rsp, 8+0)); // from
+ __ movptr(to , Address(rsp, 8+4)); // to
+ __ movl2ptr(count, Address(rsp, 8+8)); // count
*entry = __ pc(); // Entry point from generic arraycopy stub.
BLOCK_COMMENT("Entry:");
// arrays overlap test
- __ cmpl(to, from);
+ __ cmpptr(to, from);
RuntimeAddress nooverlap(nooverlap_target);
__ jump_cc(Assembler::belowEqual, nooverlap);
- __ leal(end_from, Address(from, count, Address::times_8, 0));
- __ cmpl(to, end_from);
- __ movl(from, Address(rsp, 8)); // from
+ __ lea(end_from, Address(from, count, Address::times_8, 0));
+ __ cmpptr(to, end_from);
+ __ movptr(from, Address(rsp, 8)); // from
__ jump_cc(Assembler::aboveEqual, nooverlap);
__ jmpb(L_copy_8_bytes);
@@ -1214,7 +1215,7 @@ class StubGenerator: public StubCodeGenerator {
}
inc_copy_counter_np(T_LONG);
__ leave(); // required for proper stackwalking of RuntimeStub frame
- __ xorl(rax, rax); // return 0
+ __ xorptr(rax, rax); // return 0
__ ret(0);
return start;
}
@@ -1251,14 +1252,14 @@ class StubGenerator: public StubCodeGenerator {
Address super_cache_addr( sub_klass, sc_offset);
// if the pointers are equal, we are done (e.g., String[] elements)
- __ cmpl(sub_klass, super_klass_addr);
+ __ cmpptr(sub_klass, super_klass_addr);
__ jcc(Assembler::equal, L_success);
// check the supertype display:
- __ movl(temp, super_check_offset_addr);
+ __ movl2ptr(temp, super_check_offset_addr);
Address super_check_addr(sub_klass, temp, Address::times_1, 0);
- __ movl(temp, super_check_addr); // load displayed supertype
- __ cmpl(temp, super_klass_addr); // test the super type
+ __ movptr(temp, super_check_addr); // load displayed supertype
+ __ cmpptr(temp, super_klass_addr); // test the super type
__ jcc(Assembler::equal, L_success);
// if it was a primary super, we can just fail immediately
@@ -1271,31 +1272,31 @@ class StubGenerator: public StubCodeGenerator {
{
// The repne_scan instruction uses fixed registers, which we must spill.
// (We need a couple more temps in any case.)
- __ pushl(rax);
- __ pushl(rcx);
- __ pushl(rdi);
+ __ push(rax);
+ __ push(rcx);
+ __ push(rdi);
assert_different_registers(sub_klass, rax, rcx, rdi);
- __ movl(rdi, secondary_supers_addr);
+ __ movptr(rdi, secondary_supers_addr);
// Load the array length.
__ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));
// Skip to start of data.
- __ addl(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
+ __ addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
// Scan rcx words at [edi] for occurance of rax,
// Set NZ/Z based on last compare
- __ movl(rax, super_klass_addr);
+ __ movptr(rax, super_klass_addr);
__ repne_scan();
// Unspill the temp. registers:
- __ popl(rdi);
- __ popl(rcx);
- __ popl(rax);
+ __ pop(rdi);
+ __ pop(rcx);
+ __ pop(rax);
}
__ jcc(Assembler::notEqual, L_failure);
// Success. Cache the super we found and proceed in triumph.
- __ movl(temp, super_klass_addr); // note: rax, is dead
- __ movl(super_cache_addr, temp);
+ __ movptr(temp, super_klass_addr); // note: rax, is dead
+ __ movptr(super_cache_addr, temp);
if (!fall_through_on_success)
__ jmp(L_success);
@@ -1338,9 +1339,9 @@ class StubGenerator: public StubCodeGenerator {
__ enter(); // required for proper stackwalking of RuntimeStub frame
- __ pushl(rsi);
- __ pushl(rdi);
- __ pushl(rbx);
+ __ push(rsi);
+ __ push(rdi);
+ __ push(rbx);
Address from_arg(rsp, 16+ 4); // from
Address to_arg(rsp, 16+ 8); // to
@@ -1349,9 +1350,9 @@ class StubGenerator: public StubCodeGenerator {
Address ckval_arg(rsp, 16+20); // super_klass
// Load up:
- __ movl(from, from_arg);
- __ movl(to, to_arg);
- __ movl(length, length_arg);
+ __ movptr(from, from_arg);
+ __ movptr(to, to_arg);
+ __ movl2ptr(length, length_arg);
*entry = __ pc(); // Entry point from generic arraycopy stub.
BLOCK_COMMENT("Entry:");
@@ -1364,28 +1365,28 @@ class StubGenerator: public StubCodeGenerator {
// checked.
// Loop-invariant addresses. They are exclusive end pointers.
- Address end_from_addr(from, length, Address::times_4, 0);
- Address end_to_addr(to, length, Address::times_4, 0);
+ Address end_from_addr(from, length, Address::times_ptr, 0);
+ Address end_to_addr(to, length, Address::times_ptr, 0);
Register end_from = from; // re-use
Register end_to = to; // re-use
Register count = length; // re-use
// Loop-variant addresses. They assume post-incremented count < 0.
- Address from_element_addr(end_from, count, Address::times_4, 0);
- Address to_element_addr(end_to, count, Address::times_4, 0);
+ Address from_element_addr(end_from, count, Address::times_ptr, 0);
+ Address to_element_addr(end_to, count, Address::times_ptr, 0);
Address elem_klass_addr(elem, oopDesc::klass_offset_in_bytes());
// Copy from low to high addresses, indexed from the end of each array.
- __ leal(end_from, end_from_addr);
- __ leal(end_to, end_to_addr);
+ __ lea(end_from, end_from_addr);
+ __ lea(end_to, end_to_addr);
gen_write_ref_array_pre_barrier(to, count);
assert(length == count, ""); // else fix next line:
- __ negl(count); // negate and test the length
+ __ negptr(count); // negate and test the length
__ jccb(Assembler::notZero, L_load_element);
// Empty array: Nothing to do.
- __ xorl(rax, rax); // return 0 on (trivial) success
+ __ xorptr(rax, rax); // return 0 on (trivial) success
__ jmp(L_done);
// ======== begin loop ========
@@ -1396,20 +1397,20 @@ class StubGenerator: public StubCodeGenerator {
__ align(16);
__ BIND(L_store_element);
- __ movl(to_element_addr, elem); // store the oop
+ __ movptr(to_element_addr, elem); // store the oop
__ increment(count); // increment the count toward zero
__ jccb(Assembler::zero, L_do_card_marks);
// ======== loop entry is here ========
__ BIND(L_load_element);
- __ movl(elem, from_element_addr); // load the oop
- __ testl(elem, elem);
+ __ movptr(elem, from_element_addr); // load the oop
+ __ testptr(elem, elem);
__ jccb(Assembler::zero, L_store_element);
// (Could do a trick here: Remember last successful non-null
// element stored and make a quick oop equality check on it.)
- __ movl(elem_klass, elem_klass_addr); // query the object klass
+ __ movptr(elem_klass, elem_klass_addr); // query the object klass
generate_type_check(elem_klass, ckoff_arg, ckval_arg, temp,
&L_store_element, NULL);
// (On fall-through, we have failed the element type check.)
@@ -1420,25 +1421,25 @@ class StubGenerator: public StubCodeGenerator {
// Emit GC store barriers for the oops we have copied (length_arg + count),
// and report their number to the caller.
__ addl(count, length_arg); // transfers = (length - remaining)
- __ movl(rax, count); // save the value
- __ notl(rax); // report (-1^K) to caller
- __ movl(to, to_arg); // reload
+ __ movl2ptr(rax, count); // save the value
+ __ notptr(rax); // report (-1^K) to caller
+ __ movptr(to, to_arg); // reload
assert_different_registers(to, count, rax);
gen_write_ref_array_post_barrier(to, count);
__ jmpb(L_done);
// Come here on success only.
__ BIND(L_do_card_marks);
- __ movl(count, length_arg);
- __ movl(to, to_arg); // reload
+ __ movl2ptr(count, length_arg);
+ __ movptr(to, to_arg); // reload
gen_write_ref_array_post_barrier(to, count);
- __ xorl(rax, rax); // return 0 on success
+ __ xorptr(rax, rax); // return 0 on success
// Common exit point (success or failure).
__ BIND(L_done);
- __ popl(rbx);
- __ popl(rdi);
- __ popl(rsi);
+ __ pop(rbx);
+ __ pop(rdi);
+ __ pop(rsi);
inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -1480,24 +1481,24 @@ class StubGenerator: public StubCodeGenerator {
const Register count = rcx; // elements count
__ enter(); // required for proper stackwalking of RuntimeStub frame
- __ pushl(rsi);
- __ pushl(rdi);
+ __ push(rsi);
+ __ push(rdi);
Address from_arg(rsp, 12+ 4); // from
Address to_arg(rsp, 12+ 8); // to
Address count_arg(rsp, 12+12); // byte count
// Load up:
- __ movl(from , from_arg);
- __ movl(to , to_arg);
- __ movl(count, count_arg);
+ __ movptr(from , from_arg);
+ __ movptr(to , to_arg);
+ __ movl2ptr(count, count_arg);
// bump this on entry, not on exit:
inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr);
const Register bits = rsi;
- __ movl(bits, from);
- __ orl(bits, to);
- __ orl(bits, count);
+ __ mov(bits, from);
+ __ orptr(bits, to);
+ __ orptr(bits, count);
__ testl(bits, BytesPerLong-1);
__ jccb(Assembler::zero, L_long_aligned);
@@ -1509,20 +1510,20 @@ class StubGenerator: public StubCodeGenerator {
__ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry));
__ BIND(L_short_aligned);
- __ shrl(count, LogBytesPerShort); // size => short_count
+ __ shrptr(count, LogBytesPerShort); // size => short_count
__ movl(count_arg, count); // update 'count'
__ jump(RuntimeAddress(short_copy_entry));
__ BIND(L_int_aligned);
- __ shrl(count, LogBytesPerInt); // size => int_count
+ __ shrptr(count, LogBytesPerInt); // size => int_count
__ movl(count_arg, count); // update 'count'
__ jump(RuntimeAddress(int_copy_entry));
__ BIND(L_long_aligned);
- __ shrl(count, LogBytesPerLong); // size => qword_count
+ __ shrptr(count, LogBytesPerLong); // size => qword_count
__ movl(count_arg, count); // update 'count'
- __ popl(rdi); // Do pops here since jlong_arraycopy stub does not do it.
- __ popl(rsi);
+ __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it.
+ __ pop(rsi);
__ jump(RuntimeAddress(long_copy_entry));
return start;
@@ -1595,8 +1596,8 @@ class StubGenerator: public StubCodeGenerator {
address start = __ pc();
__ enter(); // required for proper stackwalking of RuntimeStub frame
- __ pushl(rsi);
- __ pushl(rdi);
+ __ push(rsi);
+ __ push(rdi);
// bump this on entry, not on exit:
inc_counter_np(SharedRuntime::_generic_array_copy_ctr);
@@ -1629,27 +1630,27 @@ class StubGenerator: public StubCodeGenerator {
const Register length = rcx; // transfer count
// if (src == NULL) return -1;
- __ movl(src, SRC); // src oop
- __ testl(src, src);
+ __ movptr(src, SRC); // src oop
+ __ testptr(src, src);
__ jccb(Assembler::zero, L_failed_0);
// if (src_pos < 0) return -1;
- __ movl(src_pos, SRC_POS); // src_pos
+ __ movl2ptr(src_pos, SRC_POS); // src_pos
__ testl(src_pos, src_pos);
__ jccb(Assembler::negative, L_failed_0);
// if (dst == NULL) return -1;
- __ movl(dst, DST); // dst oop
- __ testl(dst, dst);
+ __ movptr(dst, DST); // dst oop
+ __ testptr(dst, dst);
__ jccb(Assembler::zero, L_failed_0);
// if (dst_pos < 0) return -1;
- __ movl(dst_pos, DST_POS); // dst_pos
+ __ movl2ptr(dst_pos, DST_POS); // dst_pos
__ testl(dst_pos, dst_pos);
__ jccb(Assembler::negative, L_failed_0);
// if (length < 0) return -1;
- __ movl(length, LENGTH); // length
+ __ movl2ptr(length, LENGTH); // length
__ testl(length, length);
__ jccb(Assembler::negative, L_failed_0);
@@ -1657,18 +1658,18 @@ class StubGenerator: public StubCodeGenerator {
Address src_klass_addr(src, oopDesc::klass_offset_in_bytes());
Address dst_klass_addr(dst, oopDesc::klass_offset_in_bytes());
const Register rcx_src_klass = rcx; // array klass
- __ movl(rcx_src_klass, Address(src, oopDesc::klass_offset_in_bytes()));
+ __ movptr(rcx_src_klass, Address(src, oopDesc::klass_offset_in_bytes()));
#ifdef ASSERT
// assert(src->klass() != NULL);
BLOCK_COMMENT("assert klasses not null");
{ Label L1, L2;
- __ testl(rcx_src_klass, rcx_src_klass);
+ __ testptr(rcx_src_klass, rcx_src_klass);
__ jccb(Assembler::notZero, L2); // it is broken if klass is NULL
__ bind(L1);
__ stop("broken null klass");
__ bind(L2);
- __ cmpl(dst_klass_addr, 0);
+ __ cmpptr(dst_klass_addr, (int32_t)NULL_WORD);
__ jccb(Assembler::equal, L1); // this would be broken also
BLOCK_COMMENT("assert done");
}
@@ -1692,7 +1693,7 @@ class StubGenerator: public StubCodeGenerator {
__ jcc(Assembler::equal, L_objArray);
// if (src->klass() != dst->klass()) return -1;
- __ cmpl(rcx_src_klass, dst_klass_addr);
+ __ cmpptr(rcx_src_klass, dst_klass_addr);
__ jccb(Assembler::notEqual, L_failed_0);
const Register rcx_lh = rcx; // layout helper
@@ -1726,12 +1727,12 @@ class StubGenerator: public StubCodeGenerator {
const Register dst_array = dst; // dst array offset
const Register rdi_elsize = rdi; // log2 element size
- __ movl(rsi_offset, rcx_lh);
- __ shrl(rsi_offset, Klass::_lh_header_size_shift);
- __ andl(rsi_offset, Klass::_lh_header_size_mask); // array_offset
- __ addl(src_array, rsi_offset); // src array offset
- __ addl(dst_array, rsi_offset); // dst array offset
- __ andl(rcx_lh, Klass::_lh_log2_element_size_mask); // log2 elsize
+ __ mov(rsi_offset, rcx_lh);
+ __ shrptr(rsi_offset, Klass::_lh_header_size_shift);
+ __ andptr(rsi_offset, Klass::_lh_header_size_mask); // array_offset
+ __ addptr(src_array, rsi_offset); // src array offset
+ __ addptr(dst_array, rsi_offset); // dst array offset
+ __ andptr(rcx_lh, Klass::_lh_log2_element_size_mask); // log2 elsize
// next registers should be set before the jump to corresponding stub
const Register from = src; // source array address
@@ -1743,17 +1744,17 @@ class StubGenerator: public StubCodeGenerator {
#define COUNT Address(rsp, 12+12) // Only for oop arraycopy
BLOCK_COMMENT("scale indexes to element size");
- __ movl(rsi, SRC_POS); // src_pos
- __ shll(rsi); // src_pos << rcx (log2 elsize)
+ __ movl2ptr(rsi, SRC_POS); // src_pos
+ __ shlptr(rsi); // src_pos << rcx (log2 elsize)
assert(src_array == from, "");
- __ addl(from, rsi); // from = src_array + SRC_POS << log2 elsize
- __ movl(rdi, DST_POS); // dst_pos
- __ shll(rdi); // dst_pos << rcx (log2 elsize)
+ __ addptr(from, rsi); // from = src_array + SRC_POS << log2 elsize
+ __ movl2ptr(rdi, DST_POS); // dst_pos
+ __ shlptr(rdi); // dst_pos << rcx (log2 elsize)
assert(dst_array == to, "");
- __ addl(to, rdi); // to = dst_array + DST_POS << log2 elsize
- __ movl(FROM, from); // src_addr
- __ movl(rdi_elsize, rcx_lh); // log2 elsize
- __ movl(count, LENGTH); // elements count
+ __ addptr(to, rdi); // to = dst_array + DST_POS << log2 elsize
+ __ movptr(FROM, from); // src_addr
+ __ mov(rdi_elsize, rcx_lh); // log2 elsize
+ __ movl2ptr(count, LENGTH); // elements count
BLOCK_COMMENT("choose copy loop based on element size");
__ cmpl(rdi_elsize, 0);
@@ -1767,15 +1768,15 @@ class StubGenerator: public StubCodeGenerator {
__ cmpl(rdi_elsize, LogBytesPerLong);
__ jccb(Assembler::notEqual, L_failed);
#endif
- __ popl(rdi); // Do pops here since jlong_arraycopy stub does not do it.
- __ popl(rsi);
+ __ pop(rdi); // Do pops here since jlong_arraycopy stub does not do it.
+ __ pop(rsi);
__ jump(RuntimeAddress(entry_jlong_arraycopy));
__ BIND(L_failed);
- __ xorl(rax, rax);
- __ notl(rax); // return -1
- __ popl(rdi);
- __ popl(rsi);
+ __ xorptr(rax, rax);
+ __ notptr(rax); // return -1
+ __ pop(rdi);
+ __ pop(rsi);
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -1785,7 +1786,7 @@ class StubGenerator: public StubCodeGenerator {
Label L_plain_copy, L_checkcast_copy;
// test array classes for subtyping
- __ cmpl(rcx_src_klass, dst_klass_addr); // usual case is exact equality
+ __ cmpptr(rcx_src_klass, dst_klass_addr); // usual case is exact equality
__ jccb(Assembler::notEqual, L_checkcast_copy);
// Identically typed arrays can be copied without element-wise checks.
@@ -1793,15 +1794,15 @@ class StubGenerator: public StubCodeGenerator {
arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed);
__ BIND(L_plain_copy);
- __ movl(count, LENGTH); // elements count
- __ movl(src_pos, SRC_POS); // reload src_pos
- __ leal(from, Address(src, src_pos, Address::times_4,
- arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
- __ movl(dst_pos, DST_POS); // reload dst_pos
- __ leal(to, Address(dst, dst_pos, Address::times_4,
- arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
- __ movl(FROM, from); // src_addr
- __ movl(TO, to); // dst_addr
+ __ movl2ptr(count, LENGTH); // elements count
+ __ movl2ptr(src_pos, SRC_POS); // reload src_pos
+ __ lea(from, Address(src, src_pos, Address::times_ptr,
+ arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
+ __ movl2ptr(dst_pos, DST_POS); // reload dst_pos
+ __ lea(to, Address(dst, dst_pos, Address::times_ptr,
+ arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
+ __ movptr(FROM, from); // src_addr
+ __ movptr(TO, to); // dst_addr
__ movl(COUNT, count); // count
__ jump(RuntimeAddress(entry_oop_arraycopy));
@@ -1821,37 +1822,37 @@ class StubGenerator: public StubCodeGenerator {
Address dst_klass_lh_addr(rsi_dst_klass, lh_offset);
// Before looking at dst.length, make sure dst is also an objArray.
- __ movl(rsi_dst_klass, dst_klass_addr);
+ __ movptr(rsi_dst_klass, dst_klass_addr);
__ cmpl(dst_klass_lh_addr, objArray_lh);
__ jccb(Assembler::notEqual, L_failed);
// It is safe to examine both src.length and dst.length.
- __ movl(src_pos, SRC_POS); // reload rsi
+ __ movl2ptr(src_pos, SRC_POS); // reload rsi
arraycopy_range_checks(src, src_pos, dst, dst_pos, LENGTH, L_failed);
// (Now src_pos and dst_pos are killed, but not src and dst.)
// We'll need this temp (don't forget to pop it after the type check).
- __ pushl(rbx);
+ __ push(rbx);
Register rbx_src_klass = rbx;
- __ movl(rbx_src_klass, rcx_src_klass); // spill away from rcx
- __ movl(rsi_dst_klass, dst_klass_addr);
+ __ mov(rbx_src_klass, rcx_src_klass); // spill away from rcx
+ __ movptr(rsi_dst_klass, dst_klass_addr);
Address super_check_offset_addr(rsi_dst_klass, sco_offset);
Label L_fail_array_check;
generate_type_check(rbx_src_klass,
super_check_offset_addr, dst_klass_addr,
rdi_temp, NULL, &L_fail_array_check);
// (On fall-through, we have passed the array type check.)
- __ popl(rbx);
+ __ pop(rbx);
__ jmp(L_plain_copy);
__ BIND(L_fail_array_check);
// Reshuffle arguments so we can call checkcast_arraycopy:
// match initial saves for checkcast_arraycopy
- // pushl(rsi); // already done; see above
- // pushl(rdi); // already done; see above
- // pushl(rbx); // already done; see above
+ // push(rsi); // already done; see above
+ // push(rdi); // already done; see above
+ // push(rbx); // already done; see above
// Marshal outgoing arguments now, freeing registers.
Address from_arg(rsp, 16+ 4); // from
@@ -1866,24 +1867,24 @@ class StubGenerator: public StubCodeGenerator {
// push rbx, changed the incoming offsets (why not just use rbp,??)
// assert(SRC_POS_arg.disp() == SRC_POS.disp() + 4, "");
- __ movl(rbx, Address(rsi_dst_klass, ek_offset));
- __ movl(length, LENGTH_arg); // reload elements count
- __ movl(src_pos, SRC_POS_arg); // reload src_pos
- __ movl(dst_pos, DST_POS_arg); // reload dst_pos
+ __ movptr(rbx, Address(rsi_dst_klass, ek_offset));
+ __ movl2ptr(length, LENGTH_arg); // reload elements count
+ __ movl2ptr(src_pos, SRC_POS_arg); // reload src_pos
+ __ movl2ptr(dst_pos, DST_POS_arg); // reload dst_pos
- __ movl(ckval_arg, rbx); // destination element type
+ __ movptr(ckval_arg, rbx); // destination element type
__ movl(rbx, Address(rbx, sco_offset));
__ movl(ckoff_arg, rbx); // corresponding class check offset
__ movl(length_arg, length); // outgoing length argument
- __ leal(from, Address(src, src_pos, Address::times_4,
+ __ lea(from, Address(src, src_pos, Address::times_ptr,
arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
- __ movl(from_arg, from);
+ __ movptr(from_arg, from);
- __ leal(to, Address(dst, dst_pos, Address::times_4,
+ __ lea(to, Address(dst, dst_pos, Address::times_ptr,
arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
- __ movl(to_arg, to);
+ __ movptr(to_arg, to);
__ jump(RuntimeAddress(entry_checkcast_arraycopy));
}
@@ -1934,10 +1935,10 @@ class StubGenerator: public StubCodeGenerator {
&entry_jint_arraycopy, "jint_arraycopy");
StubRoutines::_oop_disjoint_arraycopy =
- generate_disjoint_copy(T_OBJECT, true, Address::times_4, &entry,
+ generate_disjoint_copy(T_OBJECT, true, Address::times_ptr, &entry,
"oop_disjoint_arraycopy");
StubRoutines::_oop_arraycopy =
- generate_conjoint_copy(T_OBJECT, true, Address::times_4, entry,
+ generate_conjoint_copy(T_OBJECT, true, Address::times_ptr, entry,
&entry_oop_arraycopy, "oop_arraycopy");
StubRoutines::_jlong_disjoint_arraycopy =
@@ -2037,21 +2038,21 @@ class StubGenerator: public StubCodeGenerator {
Register java_thread = rbx;
__ get_thread(java_thread);
if (restore_saved_exception_pc) {
- __ movl(rax, Address(java_thread, in_bytes(JavaThread::saved_exception_pc_offset())));
- __ pushl(rax);
+ __ movptr(rax, Address(java_thread, in_bytes(JavaThread::saved_exception_pc_offset())));
+ __ push(rax);
}
__ enter(); // required for proper stackwalking of RuntimeStub frame
// pc and rbp, already pushed
- __ subl(rsp, (framesize-2) * wordSize); // prolog
+ __ subptr(rsp, (framesize-2) * wordSize); // prolog
// Frame is now completed as far as size and linkage.
int frame_complete = __ pc() - start;
// push java thread (becomes first argument of C function)
- __ movl(Address(rsp, thread_off * wordSize), java_thread);
+ __ movptr(Address(rsp, thread_off * wordSize), java_thread);
// Set up last_Java_sp and last_Java_fp
__ set_last_Java_frame(java_thread, rsp, rbp, NULL);
@@ -2075,7 +2076,7 @@ class StubGenerator: public StubCodeGenerator {
// check for pending exceptions
#ifdef ASSERT
Label L;
- __ cmpl(Address(java_thread, Thread::pending_exception_offset()), NULL_WORD);
+ __ cmpptr(Address(java_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, L);
__ should_not_reach_here();
__ bind(L);
@@ -2137,8 +2138,8 @@ class StubGenerator: public StubCodeGenerator {
// platform dependent
create_control_words();
- StubRoutines::i486::_verify_mxcsr_entry = generate_verify_mxcsr();
- StubRoutines::i486::_verify_fpu_cntrl_wrd_entry = generate_verify_fpu_cntrl_wrd();
+ StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
+ StubRoutines::x86::_verify_fpu_cntrl_wrd_entry = generate_verify_fpu_cntrl_wrd();
StubRoutines::_d2i_wrapper = generate_d2i_wrapper(T_INT,
CAST_FROM_FN_PTR(address, SharedRuntime::d2i));
StubRoutines::_d2l_wrapper = generate_d2i_wrapper(T_LONG,
diff --git a/src/cpu/x86/vm/stubGenerator_x86_64.cpp b/src/cpu/x86/vm/stubGenerator_x86_64.cpp
index c3b61d5bf..6964a1eb0 100644
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp
@@ -31,6 +31,7 @@
#define __ _masm->
#define TIMES_OOP (UseCompressedOops ? Address::times_4 : Address::times_8)
+#define a__ ((Assembler*)_masm)->
#ifdef PRODUCT
#define BLOCK_COMMENT(str) /* nothing */
@@ -210,32 +211,32 @@ class StubGenerator: public StubCodeGenerator {
// stub code
__ enter();
- __ subq(rsp, -rsp_after_call_off * wordSize);
+ __ subptr(rsp, -rsp_after_call_off * wordSize);
// save register parameters
#ifndef _WIN64
- __ movq(parameters, c_rarg5); // parameters
- __ movq(entry_point, c_rarg4); // entry_point
+ __ movptr(parameters, c_rarg5); // parameters
+ __ movptr(entry_point, c_rarg4); // entry_point
#endif
- __ movq(method, c_rarg3); // method
- __ movl(result_type, c_rarg2); // result type
- __ movq(result, c_rarg1); // result
- __ movq(call_wrapper, c_rarg0); // call wrapper
+ __ movptr(method, c_rarg3); // method
+ __ movl(result_type, c_rarg2); // result type
+ __ movptr(result, c_rarg1); // result
+ __ movptr(call_wrapper, c_rarg0); // call wrapper
// save regs belonging to calling function
- __ movq(rbx_save, rbx);
- __ movq(r12_save, r12);
- __ movq(r13_save, r13);
- __ movq(r14_save, r14);
- __ movq(r15_save, r15);
+ __ movptr(rbx_save, rbx);
+ __ movptr(r12_save, r12);
+ __ movptr(r13_save, r13);
+ __ movptr(r14_save, r14);
+ __ movptr(r15_save, r15);
#ifdef _WIN64
const Address rdi_save(rbp, rdi_off * wordSize);
const Address rsi_save(rbp, rsi_off * wordSize);
- __ movq(rsi_save, rsi);
- __ movq(rdi_save, rdi);
+ __ movptr(rsi_save, rsi);
+ __ movptr(rdi_save, rdi);
#else
const Address mxcsr_save(rbp, mxcsr_off * wordSize);
{
@@ -243,7 +244,7 @@ class StubGenerator: public StubCodeGenerator {
__ stmxcsr(mxcsr_save);
__ movl(rax, mxcsr_save);
__ andl(rax, MXCSR_MASK); // Only check control and mask bits
- ExternalAddress mxcsr_std(StubRoutines::amd64::mxcsr_std());
+ ExternalAddress mxcsr_std(StubRoutines::x86::mxcsr_std());
__ cmp32(rax, mxcsr_std);
__ jcc(Assembler::equal, skip_ldmx);
__ ldmxcsr(mxcsr_std);
@@ -252,14 +253,14 @@ class StubGenerator: public StubCodeGenerator {
#endif
// Load up thread register
- __ movq(r15_thread, thread);
+ __ movptr(r15_thread, thread);
__ reinit_heapbase();
#ifdef ASSERT
// make sure we have no pending exceptions
{
Label L;
- __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD);
+ __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L);
__ stop("StubRoutines::call_stub: entered with pending exception");
__ bind(L);
@@ -274,25 +275,25 @@ class StubGenerator: public StubCodeGenerator {
__ jcc(Assembler::zero, parameters_done);
Label loop;
- __ movq(c_rarg2, parameters); // parameter pointer
- __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1
+ __ movptr(c_rarg2, parameters); // parameter pointer
+ __ movl(c_rarg1, c_rarg3); // parameter counter is in c_rarg1
__ BIND(loop);
if (TaggedStackInterpreter) {
- __ movq(rax, Address(c_rarg2, 0)); // get tag
- __ addq(c_rarg2, wordSize); // advance to next tag
- __ pushq(rax); // pass tag
+ __ movl(rax, Address(c_rarg2, 0)); // get tag
+ __ addptr(c_rarg2, wordSize); // advance to next tag
+ __ push(rax); // pass tag
}
- __ movq(rax, Address(c_rarg2, 0)); // get parameter
- __ addq(c_rarg2, wordSize); // advance to next parameter
- __ decrementl(c_rarg1); // decrement counter
- __ pushq(rax); // pass parameter
+ __ movptr(rax, Address(c_rarg2, 0));// get parameter
+ __ addptr(c_rarg2, wordSize); // advance to next parameter
+ __ decrementl(c_rarg1); // decrement counter
+ __ push(rax); // pass parameter
__ jcc(Assembler::notZero, loop);
// call Java function
__ BIND(parameters_done);
- __ movq(rbx, method); // get methodOop
- __ movq(c_rarg1, entry_point); // get entry_point
- __ movq(r13, rsp); // set sender sp
+ __ movptr(rbx, method); // get methodOop
+ __ movptr(c_rarg1, entry_point); // get entry_point
+ __ mov(r13, rsp); // set sender sp
BLOCK_COMMENT("call Java function");
__ call(c_rarg1);
@@ -301,7 +302,7 @@ class StubGenerator: public StubCodeGenerator {
// store result depending on type (everything that is not
// T_OBJECT, T_LONG, T_FLOAT or T_DOUBLE is treated as T_INT)
- __ movq(c_rarg0, result);
+ __ movptr(c_rarg0, result);
Label is_long, is_float, is_double, exit;
__ movl(c_rarg1, result_type);
__ cmpl(c_rarg1, T_OBJECT);
@@ -319,16 +320,16 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(exit);
// pop parameters
- __ leaq(rsp, rsp_after_call);
+ __ lea(rsp, rsp_after_call);
#ifdef ASSERT
// verify that threads correspond
{
Label L, S;
- __ cmpq(r15_thread, thread);
+ __ cmpptr(r15_thread, thread);
__ jcc(Assembler::notEqual, S);
__ get_thread(rbx);
- __ cmpq(r15_thread, rbx);
+ __ cmpptr(r15_thread, rbx);
__ jcc(Assembler::equal, L);
__ bind(S);
__ jcc(Assembler::equal, L);
@@ -338,24 +339,24 @@ class StubGenerator: public StubCodeGenerator {
#endif
// restore regs belonging to calling function
- __ movq(r15, r15_save);
- __ movq(r14, r14_save);
- __ movq(r13, r13_save);
- __ movq(r12, r12_save);
- __ movq(rbx, rbx_save);
+ __ movptr(r15, r15_save);
+ __ movptr(r14, r14_save);
+ __ movptr(r13, r13_save);
+ __ movptr(r12, r12_save);
+ __ movptr(rbx, rbx_save);
#ifdef _WIN64
- __ movq(rdi, rdi_save);
- __ movq(rsi, rsi_save);
+ __ movptr(rdi, rdi_save);
+ __ movptr(rsi, rsi_save);
#else
__ ldmxcsr(mxcsr_save);
#endif
// restore rsp
- __ addq(rsp, -rsp_after_call_off * wordSize);
+ __ addptr(rsp, -rsp_after_call_off * wordSize);
// return
- __ popq(rbp);
+ __ pop(rbp);
__ ret(0);
// handle return types different from T_INT
@@ -398,10 +399,10 @@ class StubGenerator: public StubCodeGenerator {
// verify that threads correspond
{
Label L, S;
- __ cmpq(r15_thread, thread);
+ __ cmpptr(r15_thread, thread);
__ jcc(Assembler::notEqual, S);
__ get_thread(rbx);
- __ cmpq(r15_thread, rbx);
+ __ cmpptr(r15_thread, rbx);
__ jcc(Assembler::equal, L);
__ bind(S);
__ stop("StubRoutines::catch_exception: threads must correspond");
@@ -412,9 +413,9 @@ class StubGenerator: public StubCodeGenerator {
// set pending exception
__ verify_oop(rax);
- __ movq(Address(r15_thread, Thread::pending_exception_offset()), rax);
+ __ movptr(Address(r15_thread, Thread::pending_exception_offset()), rax);
__ lea(rscratch1, ExternalAddress((address)__FILE__));
- __ movq(Address(r15_thread, Thread::exception_file_offset()), rscratch1);
+ __ movptr(Address(r15_thread, Thread::exception_file_offset()), rscratch1);
__ movl(Address(r15_thread, Thread::exception_line_offset()), (int) __LINE__);
// complete return to VM
@@ -453,7 +454,7 @@ class StubGenerator: public StubCodeGenerator {
// make sure this code is only executed if there is a pending exception
{
Label L;
- __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int) NULL);
+ __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL);
__ jcc(Assembler::notEqual, L);
__ stop("StubRoutines::forward exception: no pending exception (1)");
__ bind(L);
@@ -461,23 +462,23 @@ class StubGenerator: public StubCodeGenerator {
#endif
// compute exception handler into rbx
- __ movq(c_rarg0, Address(rsp, 0));
+ __ movptr(c_rarg0, Address(rsp, 0));
BLOCK_COMMENT("call exception_handler_for_return_address");
__ call_VM_leaf(CAST_FROM_FN_PTR(address,
SharedRuntime::exception_handler_for_return_address),
c_rarg0);
- __ movq(rbx, rax);
+ __ mov(rbx, rax);
// setup rax & rdx, remove return address & clear pending exception
- __ popq(rdx);
- __ movq(rax, Address(r15_thread, Thread::pending_exception_offset()));
+ __ pop(rdx);
+ __ movptr(rax, Address(r15_thread, Thread::pending_exception_offset()));
__ movptr(Address(r15_thread, Thread::pending_exception_offset()), (int)NULL_WORD);
#ifdef ASSERT
// make sure exception is set
{
Label L;
- __ testq(rax, rax);
+ __ testptr(rax, rax);
__ jcc(Assembler::notEqual, L);
__ stop("StubRoutines::forward exception: no pending exception (2)");
__ bind(L);
@@ -525,8 +526,8 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", "atomic_xchg_ptr");
address start = __ pc();
- __ movq(rax, c_rarg0); // Copy to eax we need a return value anyhow
- __ xchgq(rax, Address(c_rarg1, 0)); // automatic LOCK
+ __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
+ __ xchgptr(rax, Address(c_rarg1, 0)); // automatic LOCK
__ ret(0);
return start;
@@ -619,10 +620,10 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", "atomic_add_ptr");
address start = __ pc();
- __ movq(rax, c_rarg0); // Copy to eax we need a return value anyhow
+ __ movptr(rax, c_rarg0); // Copy to eax we need a return value anyhow
if ( os::is_MP() ) __ lock();
- __ xaddl(Address(c_rarg1, 0), c_rarg0);
- __ addl(rax, c_rarg0);
+ __ xaddptr(Address(c_rarg1, 0), c_rarg0);
+ __ addptr(rax, c_rarg0);
__ ret(0);
return start;
@@ -655,9 +656,9 @@ class StubGenerator: public StubCodeGenerator {
address start = __ pc();
__ enter();
- __ movq(rax, old_fp); // callers fp
- __ movq(rax, older_fp); // the frame for ps()
- __ popq(rbp);
+ __ movptr(rax, old_fp); // callers fp
+ __ movptr(rax, older_fp); // the frame for ps()
+ __ pop(rbp);
__ ret(0);
return start;
@@ -678,21 +679,21 @@ class StubGenerator: public StubCodeGenerator {
if (CheckJNICalls) {
Label ok_ret;
- __ pushq(rax);
- __ subq(rsp, wordSize); // allocate a temp location
+ __ push(rax);
+ __ subptr(rsp, wordSize); // allocate a temp location
__ stmxcsr(mxcsr_save);
__ movl(rax, mxcsr_save);
__ andl(rax, MXCSR_MASK); // Only check control and mask bits
- __ cmpl(rax, *(int *)(StubRoutines::amd64::mxcsr_std()));
+ __ cmpl(rax, *(int *)(StubRoutines::x86::mxcsr_std()));
__ jcc(Assembler::equal, ok_ret);
__ warn("MXCSR changed by native JNI code, use -XX:+RestoreMXCSROnJNICall");
- __ ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std()));
+ __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
__ bind(ok_ret);
- __ addq(rsp, wordSize);
- __ popq(rax);
+ __ addptr(rsp, wordSize);
+ __ pop(rax);
}
__ ret(0);
@@ -708,10 +709,10 @@ class StubGenerator: public StubCodeGenerator {
Label L;
- __ pushq(rax);
- __ pushq(c_rarg3);
- __ pushq(c_rarg2);
- __ pushq(c_rarg1);
+ __ push(rax);
+ __ push(c_rarg3);
+ __ push(c_rarg2);
+ __ push(c_rarg1);
__ movl(rax, 0x7f800000);
__ xorl(c_rarg3, c_rarg3);
@@ -726,12 +727,12 @@ class StubGenerator: public StubCodeGenerator {
__ cmovl(Assembler::positive, c_rarg3, rax);
__ bind(L);
- __ movq(inout, c_rarg3);
+ __ movptr(inout, c_rarg3);
- __ popq(c_rarg1);
- __ popq(c_rarg2);
- __ popq(c_rarg3);
- __ popq(rax);
+ __ pop(c_rarg1);
+ __ pop(c_rarg2);
+ __ pop(c_rarg3);
+ __ pop(rax);
__ ret(0);
@@ -745,10 +746,10 @@ class StubGenerator: public StubCodeGenerator {
Label L;
- __ pushq(rax);
- __ pushq(c_rarg3);
- __ pushq(c_rarg2);
- __ pushq(c_rarg1);
+ __ push(rax);
+ __ push(c_rarg3);
+ __ push(c_rarg2);
+ __ push(c_rarg1);
__ movl(rax, 0x7f800000);
__ xorl(c_rarg3, c_rarg3);
@@ -760,15 +761,15 @@ class StubGenerator: public StubCodeGenerator {
__ testl(c_rarg2, c_rarg2); // signed ? min_jlong : max_jlong
__ mov64(c_rarg3, 0x8000000000000000);
__ mov64(rax, 0x7fffffffffffffff);
- __ cmovq(Assembler::positive, c_rarg3, rax);
+ __ cmov(Assembler::positive, c_rarg3, rax);
__ bind(L);
- __ movq(inout, c_rarg3);
+ __ movptr(inout, c_rarg3);
- __ popq(c_rarg1);
- __ popq(c_rarg2);
- __ popq(c_rarg3);
- __ popq(rax);
+ __ pop(c_rarg1);
+ __ pop(c_rarg2);
+ __ pop(c_rarg3);
+ __ pop(rax);
__ ret(0);
@@ -783,19 +784,19 @@ class StubGenerator: public StubCodeGenerator {
Label L;
- __ pushq(rax);
- __ pushq(c_rarg3);
- __ pushq(c_rarg2);
- __ pushq(c_rarg1);
- __ pushq(c_rarg0);
+ __ push(rax);
+ __ push(c_rarg3);
+ __ push(c_rarg2);
+ __ push(c_rarg1);
+ __ push(c_rarg0);
__ movl(rax, 0x7ff00000);
__ movq(c_rarg2, inout);
__ movl(c_rarg3, c_rarg2);
- __ movq(c_rarg1, c_rarg2);
- __ movq(c_rarg0, c_rarg2);
+ __ mov(c_rarg1, c_rarg2);
+ __ mov(c_rarg0, c_rarg2);
__ negl(c_rarg3);
- __ shrq(c_rarg1, 0x20);
+ __ shrptr(c_rarg1, 0x20);
__ orl(c_rarg3, c_rarg2);
__ andl(c_rarg1, 0x7fffffff);
__ xorl(c_rarg2, c_rarg2);
@@ -803,19 +804,19 @@ class StubGenerator: public StubCodeGenerator {
__ orl(c_rarg1, c_rarg3);
__ cmpl(rax, c_rarg1);
__ jcc(Assembler::negative, L); // NaN -> 0
- __ testq(c_rarg0, c_rarg0); // signed ? min_jint : max_jint
+ __ testptr(c_rarg0, c_rarg0); // signed ? min_jint : max_jint
__ movl(c_rarg2, 0x80000000);
__ movl(rax, 0x7fffffff);
- __ cmovl(Assembler::positive, c_rarg2, rax);
+ __ cmov(Assembler::positive, c_rarg2, rax);
__ bind(L);
- __ movq(inout, c_rarg2);
+ __ movptr(inout, c_rarg2);
- __ popq(c_rarg0);
- __ popq(c_rarg1);
- __ popq(c_rarg2);
- __ popq(c_rarg3);
- __ popq(rax);
+ __ pop(c_rarg0);
+ __ pop(c_rarg1);
+ __ pop(c_rarg2);
+ __ pop(c_rarg3);
+ __ pop(rax);
__ ret(0);
@@ -830,19 +831,19 @@ class StubGenerator: public StubCodeGenerator {
Label L;
- __ pushq(rax);
- __ pushq(c_rarg3);
- __ pushq(c_rarg2);
- __ pushq(c_rarg1);
- __ pushq(c_rarg0);
+ __ push(rax);
+ __ push(c_rarg3);
+ __ push(c_rarg2);
+ __ push(c_rarg1);
+ __ push(c_rarg0);
__ movl(rax, 0x7ff00000);
__ movq(c_rarg2, inout);
__ movl(c_rarg3, c_rarg2);
- __ movq(c_rarg1, c_rarg2);
- __ movq(c_rarg0, c_rarg2);
+ __ mov(c_rarg1, c_rarg2);
+ __ mov(c_rarg0, c_rarg2);
__ negl(c_rarg3);
- __ shrq(c_rarg1, 0x20);
+ __ shrptr(c_rarg1, 0x20);
__ orl(c_rarg3, c_rarg2);
__ andl(c_rarg1, 0x7fffffff);
__ xorl(c_rarg2, c_rarg2);
@@ -858,11 +859,11 @@ class StubGenerator: public StubCodeGenerator {
__ bind(L);
__ movq(inout, c_rarg2);
- __ popq(c_rarg0);
- __ popq(c_rarg1);
- __ popq(c_rarg2);
- __ popq(c_rarg3);
- __ popq(rax);
+ __ pop(c_rarg0);
+ __ pop(c_rarg1);
+ __ pop(c_rarg2);
+ __ pop(c_rarg3);
+ __ pop(rax);
__ ret(0);
@@ -889,17 +890,17 @@ class StubGenerator: public StubCodeGenerator {
StubCodeMark mark(this, "StubRoutines", "handler_for_unsafe_access");
address start = __ pc();
- __ pushq(0); // hole for return address-to-be
- __ pushaq(); // push registers
+ __ push(0); // hole for return address-to-be
+ __ pusha(); // push registers
Address next_pc(rsp, RegisterImpl::number_of_registers * BytesPerWord);
- __ subq(rsp, frame::arg_reg_save_area_bytes);
+ __ subptr(rsp, frame::arg_reg_save_area_bytes);
BLOCK_COMMENT("call handle_unsafe_access");
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, handle_unsafe_access)));
- __ addq(rsp, frame::arg_reg_save_area_bytes);
+ __ addptr(rsp, frame::arg_reg_save_area_bytes);
- __ movq(next_pc, rax); // stuff next address
- __ popaq();
+ __ movptr(next_pc, rax); // stuff next address
+ __ popa();
__ ret(0); // jump to next address
return start;
@@ -926,14 +927,14 @@ class StubGenerator: public StubCodeGenerator {
Label exit, error;
- __ pushfq();
+ __ pushf();
__ incrementl(ExternalAddress((address) StubRoutines::verify_oop_count_addr()));
- __ pushq(r12);
+ __ push(r12);
// save c_rarg2 and c_rarg3
- __ pushq(c_rarg2);
- __ pushq(c_rarg3);
+ __ push(c_rarg2);
+ __ push(c_rarg3);
enum {
// After previous pushes.
@@ -946,17 +947,17 @@ class StubGenerator: public StubCodeGenerator {
};
// get object
- __ movq(rax, Address(rsp, oop_to_verify));
+ __ movptr(rax, Address(rsp, oop_to_verify));
// make sure object is 'reasonable'
- __ testq(rax, rax);
+ __ testptr(rax, rax);
__ jcc(Assembler::zero, exit); // if obj is NULL it is OK
// Check if the oop is in the right area of memory
- __ movq(c_rarg2, rax);
+ __ movptr(c_rarg2, rax);
__ movptr(c_rarg3, (int64_t) Universe::verify_oop_mask());
- __ andq(c_rarg2, c_rarg3);
+ __ andptr(c_rarg2, c_rarg3);
__ movptr(c_rarg3, (int64_t) Universe::verify_oop_bits());
- __ cmpq(c_rarg2, c_rarg3);
+ __ cmpptr(c_rarg2, c_rarg3);
__ jcc(Assembler::notZero, error);
// set r12 to heapbase for load_klass()
@@ -964,46 +965,46 @@ class StubGenerator: public StubCodeGenerator {
// make sure klass is 'reasonable'
__ load_klass(rax, rax); // get klass
- __ testq(rax, rax);
+ __ testptr(rax, rax);
__ jcc(Assembler::zero, error); // if klass is NULL it is broken
// Check if the klass is in the right area of memory
- __ movq(c_rarg2, rax);
+ __ mov(c_rarg2, rax);
__ movptr(c_rarg3, (int64_t) Universe::verify_klass_mask());
- __ andq(c_rarg2, c_rarg3);
+ __ andptr(c_rarg2, c_rarg3);
__ movptr(c_rarg3, (int64_t) Universe::verify_klass_bits());
- __ cmpq(c_rarg2, c_rarg3);
+ __ cmpptr(c_rarg2, c_rarg3);
__ jcc(Assembler::notZero, error);
// make sure klass' klass is 'reasonable'
__ load_klass(rax, rax);
- __ testq(rax, rax);
+ __ testptr(rax, rax);
__ jcc(Assembler::zero, error); // if klass' klass is NULL it is broken
// Check if the klass' klass is in the right area of memory
__ movptr(c_rarg3, (int64_t) Universe::verify_klass_mask());
- __ andq(rax, c_rarg3);
+ __ andptr(rax, c_rarg3);
__ movptr(c_rarg3, (int64_t) Universe::verify_klass_bits());
- __ cmpq(rax, c_rarg3);
+ __ cmpptr(rax, c_rarg3);
__ jcc(Assembler::notZero, error);
// return if everything seems ok
__ bind(exit);
- __ movq(rax, Address(rsp, saved_rax)); // get saved rax back
- __ popq(c_rarg3); // restore c_rarg3
- __ popq(c_rarg2); // restore c_rarg2
- __ popq(r12); // restore r12
- __ popfq(); // restore flags
+ __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
+ __ pop(c_rarg3); // restore c_rarg3
+ __ pop(c_rarg2); // restore c_rarg2
+ __ pop(r12); // restore r12
+ __ popf(); // restore flags
__ ret(3 * wordSize); // pop caller saved stuff
// handle errors
__ bind(error);
- __ movq(rax, Address(rsp, saved_rax)); // get saved rax back
- __ popq(c_rarg3); // get saved c_rarg3 back
- __ popq(c_rarg2); // get saved c_rarg2 back
- __ popq(r12); // get saved r12 back
- __ popfq(); // get saved flags off stack --
+ __ movptr(rax, Address(rsp, saved_rax)); // get saved rax back
+ __ pop(c_rarg3); // get saved c_rarg3 back
+ __ pop(c_rarg2); // get saved c_rarg2 back
+ __ pop(r12); // get saved r12 back
+ __ popf(); // get saved flags off stack --
// will be ignored
- __ pushaq(); // push registers
+ __ pusha(); // push registers
// (rip is already
// already pushed)
// debug(char* msg, int64_t pc, int64_t regs[])
@@ -1016,17 +1017,17 @@ class StubGenerator: public StubCodeGenerator {
// * [tos + 19] saved rax - saved by caller and bashed
// * = popped on exit
- __ movq(c_rarg0, Address(rsp, error_msg)); // pass address of error message
- __ movq(c_rarg1, Address(rsp, return_addr)); // pass return address
- __ movq(c_rarg2, rsp); // pass address of regs on stack
- __ movq(r12, rsp); // remember rsp
- __ subq(rsp, frame::arg_reg_save_area_bytes);// windows
- __ andq(rsp, -16); // align stack as required by ABI
+ __ movptr(c_rarg0, Address(rsp, error_msg)); // pass address of error message
+ __ movptr(c_rarg1, Address(rsp, return_addr)); // pass return address
+ __ movq(c_rarg2, rsp); // pass address of regs on stack
+ __ mov(r12, rsp); // remember rsp
+ __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
+ __ andptr(rsp, -16); // align stack as required by ABI
BLOCK_COMMENT("call MacroAssembler::debug");
- __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug)));
- __ movq(rsp, r12); // restore rsp
- __ popaq(); // pop registers (includes r12)
- __ ret(3 * wordSize); // pop caller saved stuff
+ __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, MacroAssembler::debug64)));
+ __ mov(rsp, r12); // restore rsp
+ __ popa(); // pop registers (includes r12)
+ __ ret(3 * wordSize); // pop caller saved stuff
return start;
}
@@ -1088,16 +1089,16 @@ class StubGenerator: public StubCodeGenerator {
const Register count = c_rarg2;
const Register end_from = rax;
- __ cmpq(to, from);
- __ leaq(end_from, Address(from, count, sf, 0));
+ __ cmpptr(to, from);
+ __ lea(end_from, Address(from, count, sf, 0));
if (NOLp == NULL) {
ExternalAddress no_overlap(no_overlap_target);
__ jump_cc(Assembler::belowEqual, no_overlap);
- __ cmpq(to, end_from);
+ __ cmpptr(to, end_from);
__ jump_cc(Assembler::aboveEqual, no_overlap);
} else {
__ jcc(Assembler::belowEqual, (*NOLp));
- __ cmpq(to, end_from);
+ __ cmpptr(to, end_from);
__ jcc(Assembler::aboveEqual, (*NOLp));
}
}
@@ -1121,14 +1122,14 @@ class StubGenerator: public StubCodeGenerator {
assert(c_rarg0 == rcx && c_rarg1 == rdx && c_rarg2 == r8 && c_rarg3 == r9,
"unexpected argument registers");
if (nargs >= 4)
- __ movq(rax, r9); // r9 is also saved_rdi
- __ movq(saved_rdi, rdi);
- __ movq(saved_rsi, rsi);
- __ movq(rdi, rcx); // c_rarg0
- __ movq(rsi, rdx); // c_rarg1
- __ movq(rdx, r8); // c_rarg2
+ __ mov(rax, r9); // r9 is also saved_rdi
+ __ movptr(saved_rdi, rdi);
+ __ movptr(saved_rsi, rsi);
+ __ mov(rdi, rcx); // c_rarg0
+ __ mov(rsi, rdx); // c_rarg1
+ __ mov(rdx, r8); // c_rarg2
if (nargs >= 4)
- __ movq(rcx, rax); // c_rarg3 (via rax)
+ __ mov(rcx, rax); // c_rarg3 (via rax)
#else
assert(c_rarg0 == rdi && c_rarg1 == rsi && c_rarg2 == rdx && c_rarg3 == rcx,
"unexpected argument registers");
@@ -1139,8 +1140,8 @@ class StubGenerator: public StubCodeGenerator {
const Register saved_rdi = r9;
const Register saved_rsi = r10;
#ifdef _WIN64
- __ movq(rdi, saved_rdi);
- __ movq(rsi, saved_rsi);
+ __ movptr(rdi, saved_rdi);
+ __ movptr(rsi, saved_rsi);
#endif
}
@@ -1160,11 +1161,11 @@ class StubGenerator: public StubCodeGenerator {
case BarrierSet::G1SATBCT:
case BarrierSet::G1SATBCTLogging:
{
- __ pushaq(); // push registers
- __ movq(c_rarg0, addr);
- __ movq(c_rarg1, count);
+ __ pusha(); // push registers
+ __ movptr(c_rarg0, addr);
+ __ movptr(c_rarg1, count);
__ call(RuntimeAddress(BarrierSet::static_write_ref_array_pre));
- __ popaq();
+ __ popa();
}
break;
case BarrierSet::CardTableModRef:
@@ -1197,16 +1198,16 @@ class StubGenerator: public StubCodeGenerator {
case BarrierSet::G1SATBCTLogging:
{
- __ pushaq(); // push registers (overkill)
+ __ pusha(); // push registers (overkill)
// must compute element count unless barrier set interface is changed (other platforms supply count)
assert_different_registers(start, end, scratch);
- __ leaq(scratch, Address(end, wordSize));
- __ subq(scratch, start);
- __ shrq(scratch, LogBytesPerWord);
- __ movq(c_rarg0, start);
- __ movq(c_rarg1, scratch);
+ __ lea(scratch, Address(end, wordSize));
+ __ subptr(scratch, start);
+ __ shrptr(scratch, LogBytesPerWord);
+ __ mov(c_rarg0, start);
+ __ mov(c_rarg1, scratch);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, BarrierSet::static_write_ref_array_post));
- __ popaq();
+ __ popa();
}
break;
#endif // 0 G1 - only
@@ -1218,9 +1219,9 @@ class StubGenerator: public StubCodeGenerator {
Label L_loop;
- __ shrq(start, CardTableModRefBS::card_shift);
- __ shrq(end, CardTableModRefBS::card_shift);
- __ subq(end, start); // number of bytes to copy
+ __ shrptr(start, CardTableModRefBS::card_shift);
+ __ shrptr(end, CardTableModRefBS::card_shift);
+ __ subptr(end, start); // number of bytes to copy
intptr_t disp = (intptr_t) ct->byte_map_base;
if (__ is_simm32(disp)) {
@@ -1232,10 +1233,10 @@ class StubGenerator: public StubCodeGenerator {
}
const Register count = end; // 'end' register contains bytes count now
- __ addq(start, scratch);
+ __ addptr(start, scratch);
__ BIND(L_loop);
__ movb(Address(start, count, Address::times_1), 0);
- __ decrementq(count);
+ __ decrement(count);
__ jcc(Assembler::greaterEqual, L_loop);
}
}
@@ -1267,9 +1268,9 @@ class StubGenerator: public StubCodeGenerator {
__ movq(to, Address(end_from, qword_count, Address::times_8, - 0));
__ movq(Address(end_to, qword_count, Address::times_8, - 0), to);
__ BIND(L_copy_32_bytes);
- __ addq(qword_count, 4);
+ __ addptr(qword_count, 4);
__ jcc(Assembler::lessEqual, L_loop);
- __ subq(qword_count, 4);
+ __ subptr(qword_count, 4);
__ jcc(Assembler::less, L_copy_8_bytes); // Copy trailing qwords
}
@@ -1300,9 +1301,9 @@ class StubGenerator: public StubCodeGenerator {
__ movq(to, Address(from, qword_count, Address::times_8, 0));
__ movq(Address(dest, qword_count, Address::times_8, 0), to);
__ BIND(L_copy_32_bytes);
- __ subq(qword_count, 4);
+ __ subptr(qword_count, 4);
__ jcc(Assembler::greaterEqual, L_loop);
- __ addq(qword_count, 4);
+ __ addptr(qword_count, 4);
__ jcc(Assembler::greater, L_copy_8_bytes); // Copy trailing qwords
}
@@ -1354,45 +1355,45 @@ class StubGenerator: public StubCodeGenerator {
// r9 and r10 may be used to save non-volatile registers
// 'from', 'to' and 'count' are now valid
- __ movq(byte_count, count);
- __ shrq(count, 3); // count => qword_count
+ __ movptr(byte_count, count);
+ __ shrptr(count, 3); // count => qword_count
// Copy from low to high addresses. Use 'to' as scratch.
- __ leaq(end_from, Address(from, qword_count, Address::times_8, -8));
- __ leaq(end_to, Address(to, qword_count, Address::times_8, -8));
- __ negq(qword_count); // make the count negative
+ __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
+ __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
+ __ negptr(qword_count); // make the count negative
__ jmp(L_copy_32_bytes);
// Copy trailing qwords
__ BIND(L_copy_8_bytes);
__ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
__ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
- __ incrementq(qword_count);
+ __ increment(qword_count);
__ jcc(Assembler::notZero, L_copy_8_bytes);
// Check for and copy trailing dword
__ BIND(L_copy_4_bytes);
- __ testq(byte_count, 4);
+ __ testl(byte_count, 4);
__ jccb(Assembler::zero, L_copy_2_bytes);
__ movl(rax, Address(end_from, 8));
__ movl(Address(end_to, 8), rax);
- __ addq(end_from, 4);
- __ addq(end_to, 4);
+ __ addptr(end_from, 4);
+ __ addptr(end_to, 4);
// Check for and copy trailing word
__ BIND(L_copy_2_bytes);
- __ testq(byte_count, 2);
+ __ testl(byte_count, 2);
__ jccb(Assembler::zero, L_copy_byte);
__ movw(rax, Address(end_from, 8));
__ movw(Address(end_to, 8), rax);
- __ addq(end_from, 2);
- __ addq(end_to, 2);
+ __ addptr(end_from, 2);
+ __ addptr(end_to, 2);
// Check for and copy trailing byte
__ BIND(L_copy_byte);
- __ testq(byte_count, 1);
+ __ testl(byte_count, 1);
__ jccb(Assembler::zero, L_exit);
__ movb(rax, Address(end_from, 8));
__ movb(Address(end_to, 8), rax);
@@ -1400,7 +1401,7 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_exit);
inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
restore_arg_regs();
- __ xorq(rax, rax); // return 0
+ __ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -1450,28 +1451,28 @@ class StubGenerator: public StubCodeGenerator {
// r9 and r10 may be used to save non-volatile registers
// 'from', 'to' and 'count' are now valid
- __ movq(byte_count, count);
- __ shrq(count, 3); // count => qword_count
+ __ movptr(byte_count, count);
+ __ shrptr(count, 3); // count => qword_count
// Copy from high to low addresses.
// Check for and copy trailing byte
- __ testq(byte_count, 1);
+ __ testl(byte_count, 1);
__ jcc(Assembler::zero, L_copy_2_bytes);
__ movb(rax, Address(from, byte_count, Address::times_1, -1));
__ movb(Address(to, byte_count, Address::times_1, -1), rax);
- __ decrementq(byte_count); // Adjust for possible trailing word
+ __ decrement(byte_count); // Adjust for possible trailing word
// Check for and copy trailing word
__ BIND(L_copy_2_bytes);
- __ testq(byte_count, 2);
+ __ testl(byte_count, 2);
__ jcc(Assembler::zero, L_copy_4_bytes);
__ movw(rax, Address(from, byte_count, Address::times_1, -2));
__ movw(Address(to, byte_count, Address::times_1, -2), rax);
// Check for and copy trailing dword
__ BIND(L_copy_4_bytes);
- __ testq(byte_count, 4);
+ __ testl(byte_count, 4);
__ jcc(Assembler::zero, L_copy_32_bytes);
__ movl(rax, Address(from, qword_count, Address::times_8));
__ movl(Address(to, qword_count, Address::times_8), rax);
@@ -1481,12 +1482,12 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_copy_8_bytes);
__ movq(rax, Address(from, qword_count, Address::times_8, -8));
__ movq(Address(to, qword_count, Address::times_8, -8), rax);
- __ decrementq(qword_count);
+ __ decrement(qword_count);
__ jcc(Assembler::notZero, L_copy_8_bytes);
inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
restore_arg_regs();
- __ xorq(rax, rax); // return 0
+ __ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -1495,7 +1496,7 @@ class StubGenerator: public StubCodeGenerator {
inc_counter_np(SharedRuntime::_jbyte_array_copy_ctr);
restore_arg_regs();
- __ xorq(rax, rax); // return 0
+ __ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -1548,20 +1549,20 @@ class StubGenerator: public StubCodeGenerator {
// r9 and r10 may be used to save non-volatile registers
// 'from', 'to' and 'count' are now valid
- __ movq(word_count, count);
- __ shrq(count, 2); // count => qword_count
+ __ movptr(word_count, count);
+ __ shrptr(count, 2); // count => qword_count
// Copy from low to high addresses. Use 'to' as scratch.
- __ leaq(end_from, Address(from, qword_count, Address::times_8, -8));
- __ leaq(end_to, Address(to, qword_count, Address::times_8, -8));
- __ negq(qword_count);
+ __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
+ __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
+ __ negptr(qword_count);
__ jmp(L_copy_32_bytes);
// Copy trailing qwords
__ BIND(L_copy_8_bytes);
__ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
__ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
- __ incrementq(qword_count);
+ __ increment(qword_count);
__ jcc(Assembler::notZero, L_copy_8_bytes);
// Original 'dest' is trashed, so we can't use it as a
@@ -1569,17 +1570,17 @@ class StubGenerator: public StubCodeGenerator {
// Check for and copy trailing dword
__ BIND(L_copy_4_bytes);
- __ testq(word_count, 2);
+ __ testl(word_count, 2);
__ jccb(Assembler::zero, L_copy_2_bytes);
__ movl(rax, Address(end_from, 8));
__ movl(Address(end_to, 8), rax);
- __ addq(end_from, 4);
- __ addq(end_to, 4);
+ __ addptr(end_from, 4);
+ __ addptr(end_to, 4);
// Check for and copy trailing word
__ BIND(L_copy_2_bytes);
- __ testq(word_count, 1);
+ __ testl(word_count, 1);
__ jccb(Assembler::zero, L_exit);
__ movw(rax, Address(end_from, 8));
__ movw(Address(end_to, 8), rax);
@@ -1587,7 +1588,7 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_exit);
inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
restore_arg_regs();
- __ xorq(rax, rax); // return 0
+ __ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -1637,20 +1638,20 @@ class StubGenerator: public StubCodeGenerator {
// r9 and r10 may be used to save non-volatile registers
// 'from', 'to' and 'count' are now valid
- __ movq(word_count, count);
- __ shrq(count, 2); // count => qword_count
+ __ movptr(word_count, count);
+ __ shrptr(count, 2); // count => qword_count
// Copy from high to low addresses. Use 'to' as scratch.
// Check for and copy trailing word
- __ testq(word_count, 1);
+ __ testl(word_count, 1);
__ jccb(Assembler::zero, L_copy_4_bytes);
__ movw(rax, Address(from, word_count, Address::times_2, -2));
__ movw(Address(to, word_count, Address::times_2, -2), rax);
// Check for and copy trailing dword
__ BIND(L_copy_4_bytes);
- __ testq(word_count, 2);
+ __ testl(word_count, 2);
__ jcc(Assembler::zero, L_copy_32_bytes);
__ movl(rax, Address(from, qword_count, Address::times_8));
__ movl(Address(to, qword_count, Address::times_8), rax);
@@ -1660,12 +1661,12 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_copy_8_bytes);
__ movq(rax, Address(from, qword_count, Address::times_8, -8));
__ movq(Address(to, qword_count, Address::times_8, -8), rax);
- __ decrementq(qword_count);
+ __ decrement(qword_count);
__ jcc(Assembler::notZero, L_copy_8_bytes);
inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
restore_arg_regs();
- __ xorq(rax, rax); // return 0
+ __ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -1674,7 +1675,7 @@ class StubGenerator: public StubCodeGenerator {
inc_counter_np(SharedRuntime::_jshort_array_copy_ctr);
restore_arg_regs();
- __ xorq(rax, rax); // return 0
+ __ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -1738,25 +1739,25 @@ class StubGenerator: public StubCodeGenerator {
}
// 'from', 'to' and 'count' are now valid
- __ movq(dword_count, count);
- __ shrq(count, 1); // count => qword_count
+ __ movptr(dword_count, count);
+ __ shrptr(count, 1); // count => qword_count
// Copy from low to high addresses. Use 'to' as scratch.
- __ leaq(end_from, Address(from, qword_count, Address::times_8, -8));
- __ leaq(end_to, Address(to, qword_count, Address::times_8, -8));
- __ negq(qword_count);
+ __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
+ __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
+ __ negptr(qword_count);
__ jmp(L_copy_32_bytes);
// Copy trailing qwords
__ BIND(L_copy_8_bytes);
__ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
__ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
- __ incrementq(qword_count);
+ __ increment(qword_count);
__ jcc(Assembler::notZero, L_copy_8_bytes);
// Check for and copy trailing dword
__ BIND(L_copy_4_bytes);
- __ testq(dword_count, 1); // Only byte test since the value is 0 or 1
+ __ testl(dword_count, 1); // Only byte test since the value is 0 or 1
__ jccb(Assembler::zero, L_exit);
__ movl(rax, Address(end_from, 8));
__ movl(Address(end_to, 8), rax);
@@ -1768,7 +1769,7 @@ class StubGenerator: public StubCodeGenerator {
}
inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
restore_arg_regs();
- __ xorq(rax, rax); // return 0
+ __ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -1825,13 +1826,13 @@ class StubGenerator: public StubCodeGenerator {
assert_clean_int(count, rax); // Make sure 'count' is clean int.
// 'from', 'to' and 'count' are now valid
- __ movq(dword_count, count);
- __ shrq(count, 1); // count => qword_count
+ __ movptr(dword_count, count);
+ __ shrptr(count, 1); // count => qword_count
// Copy from high to low addresses. Use 'to' as scratch.
// Check for and copy trailing dword
- __ testq(dword_count, 1);
+ __ testl(dword_count, 1);
__ jcc(Assembler::zero, L_copy_32_bytes);
__ movl(rax, Address(from, dword_count, Address::times_4, -4));
__ movl(Address(to, dword_count, Address::times_4, -4), rax);
@@ -1841,7 +1842,7 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_copy_8_bytes);
__ movq(rax, Address(from, qword_count, Address::times_8, -8));
__ movq(Address(to, qword_count, Address::times_8, -8), rax);
- __ decrementq(qword_count);
+ __ decrement(qword_count);
__ jcc(Assembler::notZero, L_copy_8_bytes);
inc_counter_np(SharedRuntime::_jint_array_copy_ctr);
@@ -1849,7 +1850,7 @@ class StubGenerator: public StubCodeGenerator {
__ jmp(L_exit);
}
restore_arg_regs();
- __ xorq(rax, rax); // return 0
+ __ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -1864,7 +1865,7 @@ class StubGenerator: public StubCodeGenerator {
gen_write_ref_array_post_barrier(to, end_to, rax);
}
restore_arg_regs();
- __ xorq(rax, rax); // return 0
+ __ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -1921,16 +1922,16 @@ class StubGenerator: public StubCodeGenerator {
// 'from', 'to' and 'qword_count' are now valid
// Copy from low to high addresses. Use 'to' as scratch.
- __ leaq(end_from, Address(from, qword_count, Address::times_8, -8));
- __ leaq(end_to, Address(to, qword_count, Address::times_8, -8));
- __ negq(qword_count);
+ __ lea(end_from, Address(from, qword_count, Address::times_8, -8));
+ __ lea(end_to, Address(to, qword_count, Address::times_8, -8));
+ __ negptr(qword_count);
__ jmp(L_copy_32_bytes);
// Copy trailing qwords
__ BIND(L_copy_8_bytes);
__ movq(rax, Address(end_from, qword_count, Address::times_8, 8));
__ movq(Address(end_to, qword_count, Address::times_8, 8), rax);
- __ incrementq(qword_count);
+ __ increment(qword_count);
__ jcc(Assembler::notZero, L_copy_8_bytes);
if (is_oop) {
@@ -1938,7 +1939,7 @@ class StubGenerator: public StubCodeGenerator {
} else {
inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
restore_arg_regs();
- __ xorq(rax, rax); // return 0
+ __ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
}
@@ -1954,7 +1955,7 @@ class StubGenerator: public StubCodeGenerator {
inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
}
restore_arg_regs();
- __ xorq(rax, rax); // return 0
+ __ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -2008,7 +2009,7 @@ class StubGenerator: public StubCodeGenerator {
if (is_oop) {
// Save to and count for store barrier
- __ movq(saved_count, qword_count);
+ __ movptr(saved_count, qword_count);
// No registers are destroyed by this call
gen_write_ref_array_pre_barrier(to, saved_count);
}
@@ -2019,7 +2020,7 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_copy_8_bytes);
__ movq(rax, Address(from, qword_count, Address::times_8, -8));
__ movq(Address(to, qword_count, Address::times_8, -8), rax);
- __ decrementq(qword_count);
+ __ decrement(qword_count);
__ jcc(Assembler::notZero, L_copy_8_bytes);
if (is_oop) {
@@ -2027,7 +2028,7 @@ class StubGenerator: public StubCodeGenerator {
} else {
inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
restore_arg_regs();
- __ xorq(rax, rax); // return 0
+ __ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
}
@@ -2037,14 +2038,14 @@ class StubGenerator: public StubCodeGenerator {
if (is_oop) {
__ BIND(L_exit);
- __ leaq(rcx, Address(to, saved_count, Address::times_8, -8));
+ __ lea(rcx, Address(to, saved_count, Address::times_8, -8));
gen_write_ref_array_post_barrier(to, rcx, rax);
inc_counter_np(SharedRuntime::_oop_array_copy_ctr);
} else {
inc_counter_np(SharedRuntime::_jlong_array_copy_ctr);
}
restore_arg_regs();
- __ xorq(rax, rax); // return 0
+ __ xorptr(rax, rax); // return 0
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -2073,12 +2074,12 @@ class StubGenerator: public StubCodeGenerator {
Address super_cache_addr( sub_klass, sc_offset);
// if the pointers are equal, we are done (e.g., String[] elements)
- __ cmpq(super_klass, sub_klass);
+ __ cmpptr(super_klass, sub_klass);
__ jcc(Assembler::equal, L_success);
// check the supertype display:
Address super_check_addr(sub_klass, super_check_offset, Address::times_1, 0);
- __ cmpq(super_klass, super_check_addr); // test the super type
+ __ cmpptr(super_klass, super_check_addr); // test the super type
__ jcc(Assembler::equal, L_success);
// if it was a primary super, we can just fail immediately
@@ -2091,38 +2092,38 @@ class StubGenerator: public StubCodeGenerator {
// This code is rarely used, so simplicity is a virtue here.
inc_counter_np(SharedRuntime::_partial_subtype_ctr);
{
- __ pushq(rax);
- __ pushq(rcx);
- __ pushq(rdi);
+ __ push(rax);
+ __ push(rcx);
+ __ push(rdi);
assert_different_registers(sub_klass, super_klass, rax, rcx, rdi);
- __ movq(rdi, secondary_supers_addr);
+ __ movptr(rdi, secondary_supers_addr);
// Load the array length.
__ movl(rcx, Address(rdi, arrayOopDesc::length_offset_in_bytes()));
// Skip to start of data.
- __ addq(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
+ __ addptr(rdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
// Scan rcx words at [rdi] for occurance of rax
// Set NZ/Z based on last compare
- __ movq(rax, super_klass);
+ __ movptr(rax, super_klass);
if (UseCompressedOops) {
// Compare against compressed form. Don't need to uncompress because
// looks like orig rax is restored in popq below.
__ encode_heap_oop(rax);
__ repne_scanl();
} else {
- __ repne_scanq();
+ __ repne_scan();
}
// Unspill the temp. registers:
- __ popq(rdi);
- __ popq(rcx);
- __ popq(rax);
+ __ pop(rdi);
+ __ pop(rcx);
+ __ pop(rax);
__ jcc(Assembler::notEqual, L_miss);
}
// Success. Cache the super we found and proceed in triumph.
- __ movq(super_cache_addr, super_klass); // note: rax is dead
+ __ movptr(super_cache_addr, super_klass); // note: rax is dead
__ jmp(L_success);
// Fall through on failure!
@@ -2202,16 +2203,16 @@ class StubGenerator: public StubCodeGenerator {
saved_rip_offset,
saved_rarg0_offset
};
- __ subq(rsp, saved_rbp_offset * wordSize);
- __ movq(Address(rsp, saved_r13_offset * wordSize), r13);
- __ movq(Address(rsp, saved_r14_offset * wordSize), r14);
+ __ subptr(rsp, saved_rbp_offset * wordSize);
+ __ movptr(Address(rsp, saved_r13_offset * wordSize), r13);
+ __ movptr(Address(rsp, saved_r14_offset * wordSize), r14);
setup_arg_regs(4); // from => rdi, to => rsi, length => rdx
// ckoff => rcx, ckval => r8
// r9 and r10 may be used to save non-volatile registers
#ifdef _WIN64
// last argument (#4) is on stack on Win64
const int ckval_offset = saved_rarg0_offset + 4;
- __ movq(ckval, Address(rsp, ckval_offset * wordSize));
+ __ movptr(ckval, Address(rsp, ckval_offset * wordSize));
#endif
// check that int operands are properly extended to size_t
@@ -2242,15 +2243,15 @@ class StubGenerator: public StubCodeGenerator {
gen_write_ref_array_pre_barrier(to, count);
// Copy from low to high addresses, indexed from the end of each array.
- __ leaq(end_from, end_from_addr);
- __ leaq(end_to, end_to_addr);
- __ movq(r14_length, length); // save a copy of the length
- assert(length == count, ""); // else fix next line:
- __ negq(count); // negate and test the length
+ __ lea(end_from, end_from_addr);
+ __ lea(end_to, end_to_addr);
+ __ movptr(r14_length, length); // save a copy of the length
+ assert(length == count, ""); // else fix next line:
+ __ negptr(count); // negate and test the length
__ jcc(Assembler::notZero, L_load_element);
// Empty array: Nothing to do.
- __ xorq(rax, rax); // return 0 on (trivial) success
+ __ xorptr(rax, rax); // return 0 on (trivial) success
__ jmp(L_done);
// ======== begin loop ========
@@ -2262,13 +2263,13 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_store_element);
__ store_heap_oop(to_element_addr, rax_oop); // store the oop
- __ incrementq(count); // increment the count toward zero
+ __ increment(count); // increment the count toward zero
__ jcc(Assembler::zero, L_do_card_marks);
// ======== loop entry is here ========
__ BIND(L_load_element);
__ load_heap_oop(rax_oop, from_element_addr); // load the oop
- __ testq(rax_oop, rax_oop);
+ __ testptr(rax_oop, rax_oop);
__ jcc(Assembler::zero, L_store_element);
__ load_klass(r11_klass, rax_oop);// query the object klass
@@ -2280,23 +2281,23 @@ class StubGenerator: public StubCodeGenerator {
// Emit GC store barriers for the oops we have copied (r14 + rdx),
// and report their number to the caller.
assert_different_registers(rax, r14_length, count, to, end_to, rcx);
- __ leaq(end_to, to_element_addr);
+ __ lea(end_to, to_element_addr);
gen_write_ref_array_post_barrier(to, end_to, rcx);
- __ movq(rax, r14_length); // original oops
- __ addq(rax, count); // K = (original - remaining) oops
- __ notq(rax); // report (-1^K) to caller
+ __ movptr(rax, r14_length); // original oops
+ __ addptr(rax, count); // K = (original - remaining) oops
+ __ notptr(rax); // report (-1^K) to caller
__ jmp(L_done);
// Come here on success only.
__ BIND(L_do_card_marks);
- __ addq(end_to, -wordSize); // make an inclusive end pointer
+ __ addptr(end_to, -wordSize); // make an inclusive end pointer
gen_write_ref_array_post_barrier(to, end_to, rcx);
- __ xorq(rax, rax); // return 0 on success
+ __ xorptr(rax, rax); // return 0 on success
// Common exit point (success or failure).
__ BIND(L_done);
- __ movq(r13, Address(rsp, saved_r13_offset * wordSize));
- __ movq(r14, Address(rsp, saved_r14_offset * wordSize));
+ __ movptr(r13, Address(rsp, saved_r13_offset * wordSize));
+ __ movptr(r14, Address(rsp, saved_r14_offset * wordSize));
inc_counter_np(SharedRuntime::_checkcast_array_copy_ctr);
restore_arg_regs();
__ leave(); // required for proper stackwalking of RuntimeStub frame
@@ -2339,9 +2340,9 @@ class StubGenerator: public StubCodeGenerator {
// bump this on entry, not on exit:
inc_counter_np(SharedRuntime::_unsafe_array_copy_ctr);
- __ movq(bits, from);
- __ orq(bits, to);
- __ orq(bits, size);
+ __ mov(bits, from);
+ __ orptr(bits, to);
+ __ orptr(bits, size);
__ testb(bits, BytesPerLong-1);
__ jccb(Assembler::zero, L_long_aligned);
@@ -2353,15 +2354,15 @@ class StubGenerator: public StubCodeGenerator {
__ jump_cc(Assembler::notZero, RuntimeAddress(byte_copy_entry));
__ BIND(L_short_aligned);
- __ shrq(size, LogBytesPerShort); // size => short_count
+ __ shrptr(size, LogBytesPerShort); // size => short_count
__ jump(RuntimeAddress(short_copy_entry));
__ BIND(L_int_aligned);
- __ shrq(size, LogBytesPerInt); // size => int_count
+ __ shrptr(size, LogBytesPerInt); // size => int_count
__ jump(RuntimeAddress(int_copy_entry));
__ BIND(L_long_aligned);
- __ shrq(size, LogBytesPerLong); // size => qword_count
+ __ shrptr(size, LogBytesPerLong); // size => qword_count
__ jump(RuntimeAddress(long_copy_entry));
return start;
@@ -2469,7 +2470,7 @@ class StubGenerator: public StubCodeGenerator {
//
// if (src == NULL) return -1;
- __ testq(src, src); // src oop
+ __ testptr(src, src); // src oop
size_t j1off = __ offset();
__ jccb(Assembler::zero, L_failed_0);
@@ -2478,7 +2479,7 @@ class StubGenerator: public StubCodeGenerator {
__ jccb(Assembler::negative, L_failed_0);
// if (dst == NULL) return -1;
- __ testq(dst, dst); // dst oop
+ __ testptr(dst, dst); // dst oop
__ jccb(Assembler::zero, L_failed_0);
// if (dst_pos < 0) return -1;
@@ -2509,7 +2510,7 @@ class StubGenerator: public StubCodeGenerator {
// assert(src->klass() != NULL);
BLOCK_COMMENT("assert klasses not null");
{ Label L1, L2;
- __ testq(r10_src_klass, r10_src_klass);
+ __ testptr(r10_src_klass, r10_src_klass);
__ jcc(Assembler::notZero, L2); // it is broken if klass is NULL
__ bind(L1);
__ stop("broken null klass");
@@ -2574,9 +2575,9 @@ class StubGenerator: public StubCodeGenerator {
__ movl(r10_offset, rax_lh);
__ shrl(r10_offset, Klass::_lh_header_size_shift);
- __ andq(r10_offset, Klass::_lh_header_size_mask); // array_offset
- __ addq(src, r10_offset); // src array offset
- __ addq(dst, r10_offset); // dst array offset
+ __ andptr(r10_offset, Klass::_lh_header_size_mask); // array_offset
+ __ addptr(src, r10_offset); // src array offset
+ __ addptr(dst, r10_offset); // dst array offset
BLOCK_COMMENT("choose copy loop based on element size");
__ andl(rax_lh, Klass::_lh_log2_element_size_mask); // rax_lh -> rax_elsize
@@ -2591,25 +2592,25 @@ class StubGenerator: public StubCodeGenerator {
__ BIND(L_copy_bytes);
__ cmpl(rax_elsize, 0);
__ jccb(Assembler::notEqual, L_copy_shorts);
- __ leaq(from, Address(src, src_pos, Address::times_1, 0));// src_addr
- __ leaq(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr
- __ movslq(count, r11_length); // length
+ __ lea(from, Address(src, src_pos, Address::times_1, 0));// src_addr
+ __ lea(to, Address(dst, dst_pos, Address::times_1, 0));// dst_addr
+ __ movl2ptr(count, r11_length); // length
__ jump(RuntimeAddress(byte_copy_entry));
__ BIND(L_copy_shorts);
__ cmpl(rax_elsize, LogBytesPerShort);
__ jccb(Assembler::notEqual, L_copy_ints);
- __ leaq(from, Address(src, src_pos, Address::times_2, 0));// src_addr
- __ leaq(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr
- __ movslq(count, r11_length); // length
+ __ lea(from, Address(src, src_pos, Address::times_2, 0));// src_addr
+ __ lea(to, Address(dst, dst_pos, Address::times_2, 0));// dst_addr
+ __ movl2ptr(count, r11_length); // length
__ jump(RuntimeAddress(short_copy_entry));
__ BIND(L_copy_ints);
__ cmpl(rax_elsize, LogBytesPerInt);
__ jccb(Assembler::notEqual, L_copy_longs);
- __ leaq(from, Address(src, src_pos, Address::times_4, 0));// src_addr
- __ leaq(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr
- __ movslq(count, r11_length); // length
+ __ lea(from, Address(src, src_pos, Address::times_4, 0));// src_addr
+ __ lea(to, Address(dst, dst_pos, Address::times_4, 0));// dst_addr
+ __ movl2ptr(count, r11_length); // length
__ jump(RuntimeAddress(int_copy_entry));
__ BIND(L_copy_longs);
@@ -2621,9 +2622,9 @@ class StubGenerator: public StubCodeGenerator {
__ bind(L);
}
#endif
- __ leaq(from, Address(src, src_pos, Address::times_8, 0));// src_addr
- __ leaq(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr
- __ movslq(count, r11_length); // length
+ __ lea(from, Address(src, src_pos, Address::times_8, 0));// src_addr
+ __ lea(to, Address(dst, dst_pos, Address::times_8, 0));// dst_addr
+ __ movl2ptr(count, r11_length); // length
__ jump(RuntimeAddress(long_copy_entry));
// objArrayKlass
@@ -2640,11 +2641,11 @@ class StubGenerator: public StubCodeGenerator {
arraycopy_range_checks(src, src_pos, dst, dst_pos, r11_length,
r10, L_failed);
- __ leaq(from, Address(src, src_pos, TIMES_OOP,
+ __ lea(from, Address(src, src_pos, TIMES_OOP,
arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // src_addr
- __ leaq(to, Address(dst, dst_pos, TIMES_OOP,
- arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
- __ movslq(count, r11_length); // length
+ __ lea(to, Address(dst, dst_pos, TIMES_OOP,
+ arrayOopDesc::base_offset_in_bytes(T_OBJECT))); // dst_addr
+ __ movl2ptr(count, r11_length); // length
__ BIND(L_plain_copy);
__ jump(RuntimeAddress(oop_copy_entry));
@@ -2671,9 +2672,9 @@ class StubGenerator: public StubCodeGenerator {
#endif
// Marshal the base address arguments now, freeing registers.
- __ leaq(from, Address(src, src_pos, TIMES_OOP,
+ __ lea(from, Address(src, src_pos, TIMES_OOP,
arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
- __ leaq(to, Address(dst, dst_pos, TIMES_OOP,
+ __ lea(to, Address(dst, dst_pos, TIMES_OOP,
arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
__ movl(count, C_RARG4); // length (reloaded)
Register sco_temp = c_rarg3; // this register is free now
@@ -2691,19 +2692,19 @@ class StubGenerator: public StubCodeGenerator {
// Fetch destination element klass from the objArrayKlass header.
int ek_offset = (klassOopDesc::header_size() * HeapWordSize +
objArrayKlass::element_klass_offset_in_bytes());
- __ movq(r11_dst_klass, Address(r11_dst_klass, ek_offset));
+ __ movptr(r11_dst_klass, Address(r11_dst_klass, ek_offset));
__ movl(sco_temp, Address(r11_dst_klass, sco_offset));
assert_clean_int(sco_temp, rax);
// the checkcast_copy loop needs two extra arguments:
assert(c_rarg3 == sco_temp, "#3 already in place");
- __ movq(C_RARG4, r11_dst_klass); // dst.klass.element_klass
+ __ movptr(C_RARG4, r11_dst_klass); // dst.klass.element_klass
__ jump(RuntimeAddress(checkcast_copy_entry));
}
__ BIND(L_failed);
- __ xorq(rax, rax);
- __ notq(rax); // return -1
+ __ xorptr(rax, rax);
+ __ notptr(rax); // return -1
__ leave(); // required for proper stackwalking of RuntimeStub frame
__ ret(0);
@@ -2806,10 +2807,10 @@ class StubGenerator: public StubCodeGenerator {
// thread-local storage and also sets up last_Java_sp slightly
// differently than the real call_VM
if (restore_saved_exception_pc) {
- __ movq(rax,
- Address(r15_thread,
- in_bytes(JavaThread::saved_exception_pc_offset())));
- __ pushq(rax);
+ __ movptr(rax,
+ Address(r15_thread,
+ in_bytes(JavaThread::saved_exception_pc_offset())));
+ __ push(rax);
}
__ enter(); // required for proper stackwalking of RuntimeStub frame
@@ -2817,7 +2818,7 @@ class StubGenerator: public StubCodeGenerator {
assert(is_even(framesize/2), "sp not 16-byte aligned");
// return address and rbp are already in place
- __ subq(rsp, (framesize-4) << LogBytesPerInt); // prolog
+ __ subptr(rsp, (framesize-4) << LogBytesPerInt); // prolog
int frame_complete = __ pc() - start;
@@ -2825,7 +2826,7 @@ class StubGenerator: public StubCodeGenerator {
__ set_last_Java_frame(rsp, rbp, NULL);
// Call runtime
- __ movq(c_rarg0, r15_thread);
+ __ movptr(c_rarg0, r15_thread);
BLOCK_COMMENT("call runtime_entry");
__ call(RuntimeAddress(runtime_entry));
@@ -2841,8 +2842,8 @@ class StubGenerator: public StubCodeGenerator {
// check for pending exceptions
#ifdef ASSERT
Label L;
- __ cmpq(Address(r15_thread, Thread::pending_exception_offset()),
- (int) NULL);
+ __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()),
+ (int32_t) NULL_WORD);
__ jcc(Assembler::notEqual, L);
__ should_not_reach_here();
__ bind(L);
@@ -2865,7 +2866,7 @@ class StubGenerator: public StubCodeGenerator {
// Generates all stubs and initializes the entry points
// This platform-specific stub is needed by generate_call_stub()
- StubRoutines::amd64::_mxcsr_std = generate_fp_mask("mxcsr_std", 0x0000000000001F80);
+ StubRoutines::x86::_mxcsr_std = generate_fp_mask("mxcsr_std", 0x0000000000001F80);
// entry points that exist in all platforms Note: This is code
// that could be shared among different platforms - however the
@@ -2894,9 +2895,9 @@ class StubGenerator: public StubCodeGenerator {
generate_handler_for_unsafe_access();
// platform dependent
- StubRoutines::amd64::_get_previous_fp_entry = generate_get_previous_fp();
+ StubRoutines::x86::_get_previous_fp_entry = generate_get_previous_fp();
- StubRoutines::amd64::_verify_mxcsr_entry = generate_verify_mxcsr();
+ StubRoutines::x86::_verify_mxcsr_entry = generate_verify_mxcsr();
}
void generate_all() {
@@ -2948,15 +2949,15 @@ class StubGenerator: public StubCodeGenerator {
false);
// entry points that are platform specific
- StubRoutines::amd64::_f2i_fixup = generate_f2i_fixup();
- StubRoutines::amd64::_f2l_fixup = generate_f2l_fixup();
- StubRoutines::amd64::_d2i_fixup = generate_d2i_fixup();
- StubRoutines::amd64::_d2l_fixup = generate_d2l_fixup();
-
- StubRoutines::amd64::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF);
- StubRoutines::amd64::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000);
- StubRoutines::amd64::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF);
- StubRoutines::amd64::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000);
+ StubRoutines::x86::_f2i_fixup = generate_f2i_fixup();
+ StubRoutines::x86::_f2l_fixup = generate_f2l_fixup();
+ StubRoutines::x86::_d2i_fixup = generate_d2i_fixup();
+ StubRoutines::x86::_d2l_fixup = generate_d2l_fixup();
+
+ StubRoutines::x86::_float_sign_mask = generate_fp_mask("float_sign_mask", 0x7FFFFFFF7FFFFFFF);
+ StubRoutines::x86::_float_sign_flip = generate_fp_mask("float_sign_flip", 0x8000000080000000);
+ StubRoutines::x86::_double_sign_mask = generate_fp_mask("double_sign_mask", 0x7FFFFFFFFFFFFFFF);
+ StubRoutines::x86::_double_sign_flip = generate_fp_mask("double_sign_flip", 0x8000000000000000);
// support for verify_oop (must happen after universe_init)
StubRoutines::_verify_oop_subroutine_entry = generate_verify_oop();
diff --git a/src/cpu/x86/vm/stubRoutines_x86_32.cpp b/src/cpu/x86/vm/stubRoutines_x86_32.cpp
index 285eef97e..2bd98acab 100644
--- a/src/cpu/x86/vm/stubRoutines_x86_32.cpp
+++ b/src/cpu/x86/vm/stubRoutines_x86_32.cpp
@@ -28,6 +28,6 @@
// Implementation of the platform-specific part of StubRoutines - for
// a description of how to extend it, see the stubRoutines.hpp file.
-address StubRoutines::i486::_verify_mxcsr_entry = NULL;
-address StubRoutines::i486::_verify_fpu_cntrl_wrd_entry= NULL;
-address StubRoutines::i486::_call_stub_compiled_return = NULL;
+address StubRoutines::x86::_verify_mxcsr_entry = NULL;
+address StubRoutines::x86::_verify_fpu_cntrl_wrd_entry= NULL;
+address StubRoutines::x86::_call_stub_compiled_return = NULL;
diff --git a/src/cpu/x86/vm/stubRoutines_x86_32.hpp b/src/cpu/x86/vm/stubRoutines_x86_32.hpp
index 8f79b51ec..d1943f373 100644
--- a/src/cpu/x86/vm/stubRoutines_x86_32.hpp
+++ b/src/cpu/x86/vm/stubRoutines_x86_32.hpp
@@ -31,7 +31,7 @@ enum platform_dependent_constants {
code_size2 = 22000 // simply increase if too small (assembler will crash if too small)
};
-class i486 {
+class x86 {
friend class StubGenerator;
friend class VMStructs;
@@ -54,4 +54,4 @@ class i486 {
};
static bool returns_to_call_stub(address return_pc) { return (return_pc == _call_stub_return_address) ||
- return_pc == i486::get_call_stub_compiled_return(); }
+ return_pc == x86::get_call_stub_compiled_return(); }
diff --git a/src/cpu/x86/vm/stubRoutines_x86_64.cpp b/src/cpu/x86/vm/stubRoutines_x86_64.cpp
index 7979f3f0b..aaf91cad1 100644
--- a/src/cpu/x86/vm/stubRoutines_x86_64.cpp
+++ b/src/cpu/x86/vm/stubRoutines_x86_64.cpp
@@ -28,16 +28,16 @@
// Implementation of the platform-specific part of StubRoutines - for
// a description of how to extend it, see the stubRoutines.hpp file.
-address StubRoutines::amd64::_get_previous_fp_entry = NULL;
+address StubRoutines::x86::_get_previous_fp_entry = NULL;
-address StubRoutines::amd64::_verify_mxcsr_entry = NULL;
+address StubRoutines::x86::_verify_mxcsr_entry = NULL;
-address StubRoutines::amd64::_f2i_fixup = NULL;
-address StubRoutines::amd64::_f2l_fixup = NULL;
-address StubRoutines::amd64::_d2i_fixup = NULL;
-address StubRoutines::amd64::_d2l_fixup = NULL;
-address StubRoutines::amd64::_float_sign_mask = NULL;
-address StubRoutines::amd64::_float_sign_flip = NULL;
-address StubRoutines::amd64::_double_sign_mask = NULL;
-address StubRoutines::amd64::_double_sign_flip = NULL;
-address StubRoutines::amd64::_mxcsr_std = NULL;
+address StubRoutines::x86::_f2i_fixup = NULL;
+address StubRoutines::x86::_f2l_fixup = NULL;
+address StubRoutines::x86::_d2i_fixup = NULL;
+address StubRoutines::x86::_d2l_fixup = NULL;
+address StubRoutines::x86::_float_sign_mask = NULL;
+address StubRoutines::x86::_float_sign_flip = NULL;
+address StubRoutines::x86::_double_sign_mask = NULL;
+address StubRoutines::x86::_double_sign_flip = NULL;
+address StubRoutines::x86::_mxcsr_std = NULL;
diff --git a/src/cpu/x86/vm/stubRoutines_x86_64.hpp b/src/cpu/x86/vm/stubRoutines_x86_64.hpp
index da7e8fd56..37342b8d8 100644
--- a/src/cpu/x86/vm/stubRoutines_x86_64.hpp
+++ b/src/cpu/x86/vm/stubRoutines_x86_64.hpp
@@ -30,13 +30,13 @@ static bool returns_to_call_stub(address return_pc) { return return_pc == _
enum platform_dependent_constants
{
- code_size1 = 9000, // simply increase if too small (assembler will
+ code_size1 = 19000, // simply increase if too small (assembler will
// crash if too small)
code_size2 = 22000 // simply increase if too small (assembler will
// crash if too small)
};
-class amd64 {
+class x86 {
friend class StubGenerator;
private:
diff --git a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
index 61eb99b99..093d40457 100644
--- a/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
+++ b/src/cpu/x86/vm/templateInterpreter_x86_32.cpp
@@ -43,9 +43,9 @@ address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
//
#ifdef ASSERT
{ Label L;
- __ leal(rax, Address(rbp,
+ __ lea(rax, Address(rbp,
frame::interpreter_frame_monitor_block_top_offset * wordSize));
- __ cmpl(rax, rsp); // rax, = maximal rsp for current rbp,
+ __ cmpptr(rax, rsp); // rax, = maximal rsp for current rbp,
// (stack grows negative)
__ jcc(Assembler::aboveEqual, L); // check if frame is complete
__ stop ("interpreter frame not set up");
@@ -80,7 +80,7 @@ address TemplateInterpreterGenerator::generate_ArrayIndexOutOfBounds_handler(con
address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
address entry = __ pc();
// object is at TOS
- __ popl(rax);
+ __ pop(rax);
// expression stack must be empty before entering the VM if an exception
// happened
__ empty_expression_stack();
@@ -97,7 +97,7 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
address entry = __ pc();
if (pass_oop) {
// object is at TOS
- __ popl(rbx);
+ __ pop(rbx);
}
// expression stack must be empty before entering the VM if an exception happened
__ empty_expression_stack();
@@ -110,7 +110,7 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
if (message != NULL) {
__ lea(rbx, ExternalAddress((address)message));
} else {
- __ movl(rbx, NULL_WORD);
+ __ movptr(rbx, (int32_t)NULL_WORD);
}
__ call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::create_exception), rax, rbx);
}
@@ -123,7 +123,7 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(const ch
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
address entry = __ pc();
// NULL last_sp until next java call
- __ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
+ __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
__ dispatch_next(state);
return entry;
}
@@ -160,32 +160,32 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
// In SSE mode, interpreter returns FP results in xmm0 but they need
// to end up back on the FPU so it can operate on them.
if (state == ftos && UseSSE >= 1) {
- __ subl(rsp, wordSize);
+ __ subptr(rsp, wordSize);
__ movflt(Address(rsp, 0), xmm0);
__ fld_s(Address(rsp, 0));
- __ addl(rsp, wordSize);
+ __ addptr(rsp, wordSize);
} else if (state == dtos && UseSSE >= 2) {
- __ subl(rsp, 2*wordSize);
+ __ subptr(rsp, 2*wordSize);
__ movdbl(Address(rsp, 0), xmm0);
__ fld_d(Address(rsp, 0));
- __ addl(rsp, 2*wordSize);
+ __ addptr(rsp, 2*wordSize);
}
__ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_return_entry_for in interpreter");
// Restore stack bottom in case i2c adjusted stack
- __ movl(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
+ __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
// and NULL it as marker that rsp is now tos until next java call
- __ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
+ __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
__ restore_bcp();
__ restore_locals();
__ get_cache_and_index_at_bcp(rbx, rcx, 1);
__ movl(rbx, Address(rbx, rcx,
- Address::times_4, constantPoolCacheOopDesc::base_offset() +
+ Address::times_ptr, constantPoolCacheOopDesc::base_offset() +
ConstantPoolCacheEntry::flags_offset()));
- __ andl(rbx, 0xFF);
- __ leal(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));
+ __ andptr(rbx, 0xFF);
+ __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale()));
__ dispatch_next(state, step);
return entry;
}
@@ -196,29 +196,29 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state, i
// In SSE mode, FP results are in xmm0
if (state == ftos && UseSSE > 0) {
- __ subl(rsp, wordSize);
+ __ subptr(rsp, wordSize);
__ movflt(Address(rsp, 0), xmm0);
__ fld_s(Address(rsp, 0));
- __ addl(rsp, wordSize);
+ __ addptr(rsp, wordSize);
} else if (state == dtos && UseSSE >= 2) {
- __ subl(rsp, 2*wordSize);
+ __ subptr(rsp, 2*wordSize);
__ movdbl(Address(rsp, 0), xmm0);
__ fld_d(Address(rsp, 0));
- __ addl(rsp, 2*wordSize);
+ __ addptr(rsp, 2*wordSize);
}
__ MacroAssembler::verify_FPU(state == ftos || state == dtos ? 1 : 0, "generate_deopt_entry_for in interpreter");
// The stack is not extended by deopt but we must NULL last_sp as this
// entry is like a "return".
- __ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
+ __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
__ restore_bcp();
__ restore_locals();
// handle exceptions
{ Label L;
const Register thread = rcx;
__ get_thread(thread);
- __ cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
+ __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::zero, L);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_pending_exception));
__ should_not_reach_here();
@@ -254,14 +254,14 @@ address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type
address entry = __ pc();
switch (type) {
case T_BOOLEAN: __ c2bool(rax); break;
- case T_CHAR : __ andl(rax, 0xFFFF); break;
+ case T_CHAR : __ andptr(rax, 0xFFFF); break;
case T_BYTE : __ sign_extend_byte (rax); break;
case T_SHORT : __ sign_extend_short(rax); break;
case T_INT : /* nothing to do */ break;
case T_DOUBLE :
case T_FLOAT :
{ const Register t = InterpreterRuntime::SignatureHandlerGenerator::temp();
- __ popl(t); // remove return address first
+ __ pop(t); // remove return address first
__ pop_dtos_to_rsp();
// Must return a result for interpreter or compiler. In SSE
// mode, results are returned in xmm0 and the FPU stack must
@@ -280,13 +280,13 @@ address TemplateInterpreterGenerator::generate_result_handler_for(BasicType type
__ fld_d(Address(rsp, 0));
}
// and pop the temp
- __ addl(rsp, 2 * wordSize);
- __ pushl(t); // restore return address
+ __ addptr(rsp, 2 * wordSize);
+ __ push(t); // restore return address
}
break;
case T_OBJECT :
// retrieve result from frame
- __ movl(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
+ __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
// and verify it
__ verify_oop(rax);
break;
@@ -322,12 +322,12 @@ void InterpreterGenerator::generate_counter_incr(Label* overflow, Label* profile
const Address backedge_counter (rbx, methodOopDesc::backedge_counter_offset() + InvocationCounter::counter_offset());
if (ProfileInterpreter) { // %%% Merge this into methodDataOop
- __ increment(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset()));
+ __ incrementl(Address(rbx,methodOopDesc::interpreter_invocation_counter_offset()));
}
// Update standard invocation counters
__ movl(rax, backedge_counter); // load backedge counter
- __ increment(rcx, InvocationCounter::count_increment);
+ __ incrementl(rcx, InvocationCounter::count_increment);
__ andl(rax, InvocationCounter::count_mask_value); // mask out the status bits
__ movl(invocation_counter, rcx); // save invocation count
@@ -382,10 +382,10 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
// indicating if the counter overflow occurs at a backwards branch (non-NULL bcp).
// The call returns the address of the verified entry point for the method or NULL
// if the compilation did not complete (either went background or bailed out).
- __ movl(rax, (int)false);
+ __ movptr(rax, (int32_t)false);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rax);
- __ movl(rbx, Address(rbp, method_offset)); // restore methodOop
+ __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
// Preserve invariant that rsi/rdi contain bcp/locals of sender frame
// and jump to the interpreted entry.
@@ -433,7 +433,7 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
Label after_frame_check_pop;
- __ pushl(rsi);
+ __ push(rsi);
const Register thread = rsi;
@@ -443,43 +443,43 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
const Address stack_size(thread, Thread::stack_size_offset());
// locals + overhead, in bytes
- __ leal(rax, Address(noreg, rdx, Interpreter::stackElementScale(), overhead_size));
+ __ lea(rax, Address(noreg, rdx, Interpreter::stackElementScale(), overhead_size));
#ifdef ASSERT
Label stack_base_okay, stack_size_okay;
// verify that thread stack base is non-zero
- __ cmpl(stack_base, 0);
+ __ cmpptr(stack_base, (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, stack_base_okay);
__ stop("stack base is zero");
__ bind(stack_base_okay);
// verify that thread stack size is non-zero
- __ cmpl(stack_size, 0);
+ __ cmpptr(stack_size, 0);
__ jcc(Assembler::notEqual, stack_size_okay);
__ stop("stack size is zero");
__ bind(stack_size_okay);
#endif
// Add stack base to locals and subtract stack size
- __ addl(rax, stack_base);
- __ subl(rax, stack_size);
+ __ addptr(rax, stack_base);
+ __ subptr(rax, stack_size);
// Use the maximum number of pages we might bang.
const int max_pages = StackShadowPages > (StackRedPages+StackYellowPages) ? StackShadowPages :
(StackRedPages+StackYellowPages);
- __ addl(rax, max_pages * page_size);
+ __ addptr(rax, max_pages * page_size);
// check against the current stack bottom
- __ cmpl(rsp, rax);
+ __ cmpptr(rsp, rax);
__ jcc(Assembler::above, after_frame_check_pop);
- __ popl(rsi); // get saved bcp / (c++ prev state ).
+ __ pop(rsi); // get saved bcp / (c++ prev state ).
- __ popl(rax); // get return address
+ __ pop(rax); // get return address
__ jump(ExternalAddress(Interpreter::throw_StackOverflowError_entry()));
// all done with frame size check
__ bind(after_frame_check_pop);
- __ popl(rsi);
+ __ pop(rsi);
__ bind(after_frame_check);
}
@@ -507,18 +507,18 @@ void InterpreterGenerator::lock_method(void) {
const int mirror_offset = klassOopDesc::klass_part_offset_in_bytes() + Klass::java_mirror_offset_in_bytes();
__ movl(rax, access_flags);
__ testl(rax, JVM_ACC_STATIC);
- __ movl(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case)
+ __ movptr(rax, Address(rdi, Interpreter::local_offset_in_bytes(0))); // get receiver (assume this is frequent case)
__ jcc(Assembler::zero, done);
- __ movl(rax, Address(rbx, methodOopDesc::constants_offset()));
- __ movl(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
- __ movl(rax, Address(rax, mirror_offset));
+ __ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
+ __ movptr(rax, Address(rax, constantPoolOopDesc::pool_holder_offset_in_bytes()));
+ __ movptr(rax, Address(rax, mirror_offset));
__ bind(done);
}
// add space for monitor & lock
- __ subl(rsp, entry_size); // add space for a monitor entry
- __ movl(monitor_block_top, rsp); // set new monitor block top
- __ movl(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
- __ movl(rdx, rsp); // object address
+ __ subptr(rsp, entry_size); // add space for a monitor entry
+ __ movptr(monitor_block_top, rsp); // set new monitor block top
+ __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
+ __ mov(rdx, rsp); // object address
__ lock_object(rdx);
}
@@ -528,38 +528,38 @@ void InterpreterGenerator::lock_method(void) {
void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// initialize fixed part of activation frame
- __ pushl(rax); // save return address
+ __ push(rax); // save return address
__ enter(); // save old & set new rbp,
- __ pushl(rsi); // set sender sp
- __ pushl(NULL_WORD); // leave last_sp as null
- __ movl(rsi, Address(rbx,methodOopDesc::const_offset())); // get constMethodOop
- __ leal(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase
- __ pushl(rbx); // save methodOop
+ __ push(rsi); // set sender sp
+ __ push((int32_t)NULL_WORD); // leave last_sp as null
+ __ movptr(rsi, Address(rbx,methodOopDesc::const_offset())); // get constMethodOop
+ __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase
+ __ push(rbx); // save methodOop
if (ProfileInterpreter) {
Label method_data_continue;
- __ movl(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
- __ testl(rdx, rdx);
+ __ movptr(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
+ __ testptr(rdx, rdx);
__ jcc(Assembler::zero, method_data_continue);
- __ addl(rdx, in_bytes(methodDataOopDesc::data_offset()));
+ __ addptr(rdx, in_bytes(methodDataOopDesc::data_offset()));
__ bind(method_data_continue);
- __ pushl(rdx); // set the mdp (method data pointer)
+ __ push(rdx); // set the mdp (method data pointer)
} else {
- __ pushl(0);
+ __ push(0);
}
- __ movl(rdx, Address(rbx, methodOopDesc::constants_offset()));
- __ movl(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
- __ pushl(rdx); // set constant pool cache
- __ pushl(rdi); // set locals pointer
+ __ movptr(rdx, Address(rbx, methodOopDesc::constants_offset()));
+ __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
+ __ push(rdx); // set constant pool cache
+ __ push(rdi); // set locals pointer
if (native_call) {
- __ pushl(0); // no bcp
+ __ push(0); // no bcp
} else {
- __ pushl(rsi); // set bcp
+ __ push(rsi); // set bcp
}
- __ pushl(0); // reserve word for pointer to expression stack bottom
- __ movl(Address(rsp, 0), rsp); // set expression stack bottom
+ __ push(0); // reserve word for pointer to expression stack bottom
+ __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
}
// End of helpers
@@ -598,21 +598,21 @@ address InterpreterGenerator::generate_accessor_entry(void) {
// these conditions first and use slow path if necessary.
// rbx,: method
// rcx: receiver
- __ movl(rax, Address(rsp, wordSize));
+ __ movptr(rax, Address(rsp, wordSize));
// check if local 0 != NULL and read field
- __ testl(rax, rax);
+ __ testptr(rax, rax);
__ jcc(Assembler::zero, slow_path);
- __ movl(rdi, Address(rbx, methodOopDesc::constants_offset()));
+ __ movptr(rdi, Address(rbx, methodOopDesc::constants_offset()));
// read first instruction word and extract bytecode @ 1 and index @ 2
- __ movl(rdx, Address(rbx, methodOopDesc::const_offset()));
+ __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
__ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
// Shift codes right to get the index on the right.
// The bytecode fetched looks like <index><0xb4><0x2a>
__ shrl(rdx, 2*BitsPerByte);
__ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
- __ movl(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes()));
+ __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes()));
// rax,: local 0
// rbx,: method
@@ -629,21 +629,21 @@ address InterpreterGenerator::generate_accessor_entry(void) {
__ movl(rcx,
Address(rdi,
rdx,
- Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
+ Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
__ shrl(rcx, 2*BitsPerByte);
__ andl(rcx, 0xFF);
__ cmpl(rcx, Bytecodes::_getfield);
__ jcc(Assembler::notEqual, slow_path);
// Note: constant pool entry is not valid before bytecode is resolved
- __ movl(rcx,
- Address(rdi,
- rdx,
- Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset()));
+ __ movptr(rcx,
+ Address(rdi,
+ rdx,
+ Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset()));
__ movl(rdx,
Address(rdi,
rdx,
- Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset()));
+ Address::times_ptr, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::flags_offset()));
Label notByte, notShort, notChar;
const Address field_address (rax, rcx, Address::times_1);
@@ -682,13 +682,14 @@ address InterpreterGenerator::generate_accessor_entry(void) {
__ bind(okay);
#endif // ASSERT
// All the rest are a 32 bit wordsize
- __ movl(rax, field_address);
+ // This is ok for now. Since fast accessors should be going away
+ __ movptr(rax, field_address);
__ bind(xreturn_path);
// _ireturn/_areturn
- __ popl(rdi); // get return address
- __ movl(rsp, rsi); // set sp to sender sp
+ __ pop(rdi); // get return address
+ __ mov(rsp, rsi); // set sp to sender sp
__ jmp(rdi);
// generate a vanilla interpreter entry as the slow path
@@ -732,18 +733,18 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// rcx: size of parameters
// rsi: sender sp
- __ popl(rax); // get return address
+ __ pop(rax); // get return address
// for natives the size of locals is zero
// compute beginning of parameters (rdi)
- __ leal(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
+ __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
// add 2 zero-initialized slots for native calls
// NULL result handler
- __ pushl(NULL_WORD);
+ __ push((int32_t)NULL_WORD);
// NULL oop temp (mirror or jni oop result)
- __ pushl(NULL_WORD);
+ __ push((int32_t)NULL_WORD);
if (inc_counter) __ movl(rcx, invocation_counter); // (pre-)fetch invocation count
// initialize fixed part of activation frame
@@ -818,8 +819,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
{ Label L;
const Address monitor_block_top (rbp,
frame::interpreter_frame_monitor_block_top_offset * wordSize);
- __ movl(rax, monitor_block_top);
- __ cmpl(rax, rsp);
+ __ movptr(rax, monitor_block_top);
+ __ cmpptr(rax, rsp);
__ jcc(Assembler::equal, L);
__ stop("broken stack frame setup in interpreter");
__ bind(L);
@@ -838,19 +839,19 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ get_method(method);
__ verify_oop(method);
__ load_unsigned_word(t, Address(method, methodOopDesc::size_of_parameters_offset()));
- __ shll(t, Interpreter::logStackElementSize());
- __ addl(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
- __ subl(rsp, t);
- __ andl(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
+ __ shlptr(t, Interpreter::logStackElementSize());
+ __ addptr(t, 2*wordSize); // allocate two more slots for JNIEnv and possible mirror
+ __ subptr(rsp, t);
+ __ andptr(rsp, -(StackAlignmentInBytes)); // gcc needs 16 byte aligned stacks to do XMM intrinsics
// get signature handler
{ Label L;
- __ movl(t, Address(method, methodOopDesc::signature_handler_offset()));
- __ testl(t, t);
+ __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
+ __ testptr(t, t);
__ jcc(Assembler::notZero, L);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
__ get_method(method);
- __ movl(t, Address(method, methodOopDesc::signature_handler_offset()));
+ __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
__ bind(L);
}
@@ -867,7 +868,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// result handler is in rax,
// set result handler
- __ movl(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), rax);
+ __ movptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize), rax);
// pass mirror handle if static call
{ Label L;
@@ -876,34 +877,34 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ testl(t, JVM_ACC_STATIC);
__ jcc(Assembler::zero, L);
// get mirror
- __ movl(t, Address(method, methodOopDesc:: constants_offset()));
- __ movl(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
- __ movl(t, Address(t, mirror_offset));
+ __ movptr(t, Address(method, methodOopDesc:: constants_offset()));
+ __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
+ __ movptr(t, Address(t, mirror_offset));
// copy mirror into activation frame
- __ movl(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t);
+ __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize), t);
// pass handle to mirror
- __ leal(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
- __ movl(Address(rsp, wordSize), t);
+ __ lea(t, Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
+ __ movptr(Address(rsp, wordSize), t);
__ bind(L);
}
// get native function entry point
{ Label L;
- __ movl(rax, Address(method, methodOopDesc::native_function_offset()));
+ __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
- __ cmp32(rax, unsatisfied.addr());
+ __ cmpptr(rax, unsatisfied.addr());
__ jcc(Assembler::notEqual, L);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::prepare_native_call), method);
__ get_method(method);
__ verify_oop(method);
- __ movl(rax, Address(method, methodOopDesc::native_function_offset()));
+ __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
__ bind(L);
}
// pass JNIEnv
__ get_thread(thread);
- __ leal(t, Address(thread, JavaThread::jni_environment_offset()));
- __ movl(Address(rsp, 0), t);
+ __ lea(t, Address(thread, JavaThread::jni_environment_offset()));
+ __ movptr(Address(rsp, 0), t);
// set_last_Java_frame_before_call
// It is enough that the pc()
@@ -934,14 +935,14 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ ldmxcsr(ExternalAddress(StubRoutines::addr_mxcsr_std()));
}
else if (CheckJNICalls ) {
- __ call(RuntimeAddress(StubRoutines::i486::verify_mxcsr_entry()));
+ __ call(RuntimeAddress(StubRoutines::x86::verify_mxcsr_entry()));
}
}
// Either restore the x87 floating pointer control word after returning
// from the JNI call or verify that it wasn't changed.
if (CheckJNICalls) {
- __ call(RuntimeAddress(StubRoutines::i486::verify_fpu_cntrl_wrd_entry()));
+ __ call(RuntimeAddress(StubRoutines::x86::verify_fpu_cntrl_wrd_entry()));
}
// save potential result in ST(0) & rdx:rax
@@ -975,7 +976,10 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ movl(Address(thread, JavaThread::thread_state_offset()), _thread_in_native_trans);
if(os::is_MP()) {
if (UseMembar) {
- __ membar(); // Force this write out before the read below
+ // Force this write out before the read below
+ __ membar(Assembler::Membar_mask_bits(
+ Assembler::LoadLoad | Assembler::LoadStore |
+ Assembler::StoreLoad | Assembler::StoreStore));
} else {
// Write serialization page so VM thread can do a pseudo remote membar.
// We use the current thread pointer to calculate a thread specific
@@ -1008,7 +1012,7 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// preserved and correspond to the bcp/locals pointers. So we do a runtime call
// by hand.
//
- __ pushl(thread);
+ __ push(thread);
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address,
JavaThread::check_special_condition_for_native_trans)));
__ increment(rsp, wordSize);
@@ -1023,8 +1027,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ reset_last_Java_frame(thread, true, true);
// reset handle block
- __ movl(t, Address(thread, JavaThread::active_handles_offset()));
- __ movl(Address(t, JNIHandleBlock::top_offset_in_bytes()), 0);
+ __ movptr(t, Address(thread, JavaThread::active_handles_offset()));
+ __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
// If result was an oop then unbox and save it in the frame
{ Label L;
@@ -1033,14 +1037,14 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ cmpptr(Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize),
handler.addr());
__ jcc(Assembler::notEqual, no_oop);
- __ cmpl(Address(rsp, 0), NULL_WORD);
+ __ cmpptr(Address(rsp, 0), (int32_t)NULL_WORD);
__ pop(ltos);
- __ testl(rax, rax);
+ __ testptr(rax, rax);
__ jcc(Assembler::zero, store_result);
// unbox
- __ movl(rax, Address(rax, 0));
+ __ movptr(rax, Address(rax, 0));
__ bind(store_result);
- __ movl(Address(rbp, (frame::interpreter_frame_oop_temp_offset)*wordSize), rax);
+ __ movptr(Address(rbp, (frame::interpreter_frame_oop_temp_offset)*wordSize), rax);
// keep stack depth as expected by pushing oop which will eventually be discarded
__ push(ltos);
__ bind(no_oop);
@@ -1051,9 +1055,9 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ cmpl(Address(thread, JavaThread::stack_guard_state_offset()), JavaThread::stack_guard_yellow_disabled);
__ jcc(Assembler::notEqual, no_reguard);
- __ pushad();
+ __ pusha();
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
- __ popad();
+ __ popa();
__ bind(no_reguard);
}
@@ -1063,12 +1067,12 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// Can't call_VM until bcp is within reasonable.
__ get_method(method); // method is junk from thread_in_native to now.
__ verify_oop(method);
- __ movl(rsi, Address(method,methodOopDesc::const_offset())); // get constMethodOop
- __ leal(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase
+ __ movptr(rsi, Address(method,methodOopDesc::const_offset())); // get constMethodOop
+ __ lea(rsi, Address(rsi,constMethodOopDesc::codes_offset())); // get codebase
// handle exceptions (exception handling will handle unlocking!)
{ Label L;
- __ cmpl(Address(thread, Thread::pending_exception_offset()), NULL_WORD);
+ __ cmpptr(Address(thread, Thread::pending_exception_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::zero, L);
// Note: At some point we may want to unify this with the code used in call_VM_base();
// i.e., we should use the StubRoutines::forward_exception code. For now this
@@ -1089,10 +1093,10 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// to check that the object has not been unlocked by an explicit monitorexit bytecode.
const Address monitor(rbp, frame::interpreter_frame_initial_sp_offset * wordSize - (int)sizeof(BasicObjectLock));
- __ leal(rdx, monitor); // address of first monitor
+ __ lea(rdx, monitor); // address of first monitor
- __ movl(t, Address(rdx, BasicObjectLock::obj_offset_in_bytes()));
- __ testl(t, t);
+ __ movptr(t, Address(rdx, BasicObjectLock::obj_offset_in_bytes()));
+ __ testptr(t, t);
__ jcc(Assembler::notZero, unlock);
// Entry already unlocked, need to throw exception
@@ -1114,14 +1118,14 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// restore potential result in rdx:rax, call result handler to restore potential result in ST0 & handle result
__ pop(ltos);
- __ movl(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
+ __ movptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
__ call(t);
// remove activation
- __ movl(t, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
+ __ movptr(t, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
__ leave(); // remove frame anchor
- __ popl(rdi); // get return address
- __ movl(rsp, t); // set sp to sender sp
+ __ pop(rdi); // get return address
+ __ mov(rsp, t); // set sp to sender sp
__ jmp(rdi);
if (inc_counter) {
@@ -1165,10 +1169,10 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
generate_stack_overflow_check();
// get return address
- __ popl(rax);
+ __ pop(rax);
// compute beginning of parameters (rdi)
- __ leal(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
+ __ lea(rdi, Address(rsp, rcx, Interpreter::stackElementScale(), -wordSize));
// rdx - # of additional locals
// allocate space for locals
@@ -1178,8 +1182,10 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ testl(rdx, rdx);
__ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
__ bind(loop);
- if (TaggedStackInterpreter) __ pushl(NULL_WORD); // push tag
- __ pushl(NULL_WORD); // initialize local variables
+ if (TaggedStackInterpreter) {
+ __ push((int32_t)NULL_WORD); // push tag
+ }
+ __ push((int32_t)NULL_WORD); // initialize local variables
__ decrement(rdx); // until everything initialized
__ jcc(Assembler::greater, loop);
__ bind(exit);
@@ -1262,8 +1268,8 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
{ Label L;
const Address monitor_block_top (rbp,
frame::interpreter_frame_monitor_block_top_offset * wordSize);
- __ movl(rax, monitor_block_top);
- __ cmpl(rax, rsp);
+ __ movptr(rax, monitor_block_top);
+ __ cmpptr(rax, rsp);
__ jcc(Assembler::equal, L);
__ stop("broken stack frame setup in interpreter");
__ bind(L);
@@ -1283,12 +1289,12 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi, true);
- __ movl(rbx, Address(rbp, method_offset)); // restore methodOop
- __ movl(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
- __ movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
+ __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
+ __ movptr(rax, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
+ __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
__ test_method_data_pointer(rax, profile_method_continue);
- __ addl(rax, in_bytes(methodDataOopDesc::data_offset()));
- __ movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
+ __ addptr(rax, in_bytes(methodDataOopDesc::data_offset()));
+ __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rax);
__ jmp(profile_method_continue);
}
// Handle overflow of counter and compile method
@@ -1482,7 +1488,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// Restore sp to interpreter_frame_last_sp even though we are going
// to empty the expression stack for the exception processing.
- __ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
+ __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
// rax,: exception
// rdx: return address/pc that threw exception
__ restore_bcp(); // rsi points to call/send
@@ -1544,7 +1550,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// deoptimization blob's unpack entry because of the presence of
// adapter frames in C2.
Label caller_not_deoptimized;
- __ movl(rdx, Address(rbp, frame::return_addr_offset * wordSize));
+ __ movptr(rdx, Address(rbp, frame::return_addr_offset * wordSize));
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::interpreter_contains), rdx);
__ testl(rax, rax);
__ jcc(Assembler::notZero, caller_not_deoptimized);
@@ -1553,10 +1559,10 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ get_method(rax);
__ verify_oop(rax);
__ load_unsigned_word(rax, Address(rax, in_bytes(methodOopDesc::size_of_parameters_offset())));
- __ shll(rax, Interpreter::logStackElementSize());
+ __ shlptr(rax, Interpreter::logStackElementSize());
__ restore_locals();
- __ subl(rdi, rax);
- __ addl(rdi, wordSize);
+ __ subptr(rdi, rax);
+ __ addptr(rdi, wordSize);
// Save these arguments
__ get_thread(rcx);
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, Deoptimization::popframe_preserve_args), rcx, rax, rdi);
@@ -1592,8 +1598,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// maintain this kind of invariant all the time we call a small
// fixup routine to move the mutated arguments onto the top of our
// expression stack if necessary.
- __ movl(rax, rsp);
- __ movl(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
+ __ mov(rax, rsp);
+ __ movptr(rbx, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
__ get_thread(rcx);
// PC must point into interpreter here
__ set_last_Java_frame(rcx, noreg, rbp, __ pc());
@@ -1601,8 +1607,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
__ get_thread(rcx);
__ reset_last_Java_frame(rcx, true, true);
// Restore the last_sp and null it out
- __ movl(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
- __ movl(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
+ __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
+ __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
__ restore_bcp();
__ restore_locals();
@@ -1624,13 +1630,13 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// preserve exception over this code sequence
__ pop_ptr(rax);
__ get_thread(rcx);
- __ movl(Address(rcx, JavaThread::vm_result_offset()), rax);
+ __ movptr(Address(rcx, JavaThread::vm_result_offset()), rax);
// remove the activation (without doing throws on illegalMonitorExceptions)
__ remove_activation(vtos, rdx, false, true, false);
// restore exception
__ get_thread(rcx);
- __ movl(rax, Address(rcx, JavaThread::vm_result_offset()));
- __ movl(Address(rcx, JavaThread::vm_result_offset()), NULL_WORD);
+ __ movptr(rax, Address(rcx, JavaThread::vm_result_offset()));
+ __ movptr(Address(rcx, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
__ verify_oop(rax);
// Inbetween activations - previous activation type unknown yet
@@ -1641,12 +1647,12 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// rdx: return address/pc that threw exception
// rsp: expression stack of caller
// rbp,: rbp, of caller
- __ pushl(rax); // save exception
- __ pushl(rdx); // save return address
+ __ push(rax); // save exception
+ __ push(rdx); // save return address
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::exception_handler_for_return_address), rdx);
- __ movl(rbx, rax); // save exception handler
- __ popl(rdx); // restore return address
- __ popl(rax); // restore exception
+ __ mov(rbx, rax); // save exception handler
+ __ pop(rdx); // restore return address
+ __ pop(rax); // restore exception
// Note that an "issuing PC" is actually the next PC after the call
__ jmp(rbx); // jump to exception handler of caller
}
@@ -1665,7 +1671,7 @@ address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state
__ load_earlyret_value(state);
__ get_thread(rcx);
- __ movl(rcx, Address(rcx, JavaThread::jvmti_thread_state_offset()));
+ __ movptr(rcx, Address(rcx, JavaThread::jvmti_thread_state_offset()));
const Address cond_addr(rcx, JvmtiThreadState::earlyret_state_offset());
// Clear the earlyret state
@@ -1716,12 +1722,12 @@ address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
address entry = __ pc();
// prepare expression stack
- __ popl(rcx); // pop return address so expression stack is 'pure'
+ __ pop(rcx); // pop return address so expression stack is 'pure'
__ push(state); // save tosca
// pass tosca registers as arguments & call tracer
__ call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode), rcx, rax, rdx);
- __ movl(rcx, rax); // make sure return address is not destroyed by pop(state)
+ __ mov(rcx, rax); // make sure return address is not destroyed by pop(state)
__ pop(state); // restore tosca
// return
@@ -1732,12 +1738,12 @@ address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
void TemplateInterpreterGenerator::count_bytecode() {
- __ increment(ExternalAddress((address) &BytecodeCounter::_counter_value));
+ __ incrementl(ExternalAddress((address) &BytecodeCounter::_counter_value));
}
void TemplateInterpreterGenerator::histogram_bytecode(Template* t) {
- __ increment(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
+ __ incrementl(ExternalAddress((address) &BytecodeHistogram::_counters[t->bytecode()]));
}
@@ -1747,7 +1753,7 @@ void TemplateInterpreterGenerator::histogram_bytecode_pair(Template* t) {
__ orl(rbx, ((int)t->bytecode()) << BytecodePairHistogram::log2_number_of_codes);
ExternalAddress table((address) BytecodePairHistogram::_counters);
Address index(noreg, rbx, Address::times_4);
- __ increment(ArrayAddress(table, index));
+ __ incrementl(ArrayAddress(table, index));
}
diff --git a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
index 3f7d4719b..9caf33f6b 100644
--- a/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
+++ b/src/cpu/x86/vm/templateInterpreter_x86_64.cpp
@@ -27,6 +27,8 @@
#define __ _masm->
+#ifndef CC_INTERP
+
const int method_offset = frame::interpreter_frame_method_offset * wordSize;
const int bci_offset = frame::interpreter_frame_bcx_offset * wordSize;
const int locals_offset = frame::interpreter_frame_locals_offset * wordSize;
@@ -39,11 +41,11 @@ address TemplateInterpreterGenerator::generate_StackOverflowError_handler() {
#ifdef ASSERT
{
Label L;
- __ leaq(rax, Address(rbp,
- frame::interpreter_frame_monitor_block_top_offset *
- wordSize));
- __ cmpq(rax, rsp); // rax = maximal rsp for current rbp (stack
- // grows negative)
+ __ lea(rax, Address(rbp,
+ frame::interpreter_frame_monitor_block_top_offset *
+ wordSize));
+ __ cmpptr(rax, rsp); // rax = maximal rsp for current rbp (stack
+ // grows negative)
__ jcc(Assembler::aboveEqual, L); // check if frame is complete
__ stop ("interpreter frame not set up");
__ bind(L);
@@ -84,7 +86,7 @@ address TemplateInterpreterGenerator::generate_ClassCastException_handler() {
address entry = __ pc();
// object is at TOS
- __ popq(c_rarg1);
+ __ pop(c_rarg1);
// expression stack must be empty before entering the VM if an
// exception happened
@@ -104,7 +106,7 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(
address entry = __ pc();
if (pass_oop) {
// object is at TOS
- __ popq(c_rarg2);
+ __ pop(c_rarg2);
}
// expression stack must be empty before entering the VM if an
// exception happened
@@ -137,7 +139,7 @@ address TemplateInterpreterGenerator::generate_exception_handler_common(
address TemplateInterpreterGenerator::generate_continuation_for(TosState state) {
address entry = __ pc();
// NULL last_sp until next java call
- __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
+ __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
__ dispatch_next(state);
return entry;
}
@@ -153,12 +155,13 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
address entry = __ pc();
// Restore stack bottom in case i2c adjusted stack
- __ movq(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
+ __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
// and NULL it as marker that esp is now tos until next java call
- __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
+ __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
__ restore_bcp();
__ restore_locals();
+
__ get_cache_and_index_at_bcp(rbx, rcx, 1);
__ movl(rbx, Address(rbx, rcx,
Address::times_8,
@@ -166,7 +169,7 @@ address TemplateInterpreterGenerator::generate_return_entry_for(TosState state,
3 * wordSize));
__ andl(rbx, 0xFF);
if (TaggedStackInterpreter) __ shll(rbx, 1); // 2 slots per parameter.
- __ leaq(rsp, Address(rsp, rbx, Address::times_8));
+ __ lea(rsp, Address(rsp, rbx, Address::times_8));
__ dispatch_next(state, step);
return entry;
}
@@ -176,13 +179,13 @@ address TemplateInterpreterGenerator::generate_deopt_entry_for(TosState state,
int step) {
address entry = __ pc();
// NULL last_sp until next java call
- __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
+ __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
__ restore_bcp();
__ restore_locals();
// handle exceptions
{
Label L;
- __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int) NULL);
+ __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
__ jcc(Assembler::zero, L);
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
@@ -231,7 +234,7 @@ address TemplateInterpreterGenerator::generate_result_handler_for(
case T_DOUBLE : /* nothing to do */ break;
case T_OBJECT :
// retrieve result from frame
- __ movq(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
+ __ movptr(rax, Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize));
// and verify it
__ verify_oop(rax);
break;
@@ -336,7 +339,7 @@ void InterpreterGenerator::generate_counter_overflow(Label* do_continue) {
InterpreterRuntime::frequency_counter_overflow),
c_rarg1);
- __ movq(rbx, Address(rbp, method_offset)); // restore methodOop
+ __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
// Preserve invariant that r13/r14 contain bcp/locals of sender frame
// and jump to the interpreted entry.
__ jmp(*do_continue, relocInfo::none);
@@ -385,36 +388,36 @@ void InterpreterGenerator::generate_stack_overflow_check(void) {
const Address stack_size(r15_thread, Thread::stack_size_offset());
// locals + overhead, in bytes
- __ movq(rax, rdx);
- __ shll(rax, Interpreter::logStackElementSize()); // 2 slots per parameter.
- __ addq(rax, overhead_size);
+ __ mov(rax, rdx);
+ __ shlptr(rax, Interpreter::logStackElementSize()); // 2 slots per parameter.
+ __ addptr(rax, overhead_size);
#ifdef ASSERT
Label stack_base_okay, stack_size_okay;
// verify that thread stack base is non-zero
- __ cmpq(stack_base, 0);
+ __ cmpptr(stack_base, (int32_t)NULL_WORD);
__ jcc(Assembler::notEqual, stack_base_okay);
__ stop("stack base is zero");
__ bind(stack_base_okay);
// verify that thread stack size is non-zero
- __ cmpq(stack_size, 0);
+ __ cmpptr(stack_size, 0);
__ jcc(Assembler::notEqual, stack_size_okay);
__ stop("stack size is zero");
__ bind(stack_size_okay);
#endif
// Add stack base to locals and subtract stack size
- __ addq(rax, stack_base);
- __ subq(rax, stack_size);
+ __ addptr(rax, stack_base);
+ __ subptr(rax, stack_size);
// add in the red and yellow zone sizes
- __ addq(rax, (StackRedPages + StackYellowPages) * page_size);
+ __ addptr(rax, (StackRedPages + StackYellowPages) * page_size);
// check against the current stack bottom
- __ cmpq(rsp, rax);
+ __ cmpptr(rsp, rax);
__ jcc(Assembler::above, after_frame_check);
- __ popq(rax); // get return address
+ __ pop(rax); // get return address
__ jump(ExternalAddress(Interpreter::throw_StackOverflowError_entry()));
// all done with frame size check
@@ -458,17 +461,17 @@ void InterpreterGenerator::lock_method(void) {
__ movl(rax, access_flags);
__ testl(rax, JVM_ACC_STATIC);
// get receiver (assume this is frequent case)
- __ movq(rax, Address(r14, Interpreter::local_offset_in_bytes(0)));
+ __ movptr(rax, Address(r14, Interpreter::local_offset_in_bytes(0)));
__ jcc(Assembler::zero, done);
- __ movq(rax, Address(rbx, methodOopDesc::constants_offset()));
- __ movq(rax, Address(rax,
- constantPoolOopDesc::pool_holder_offset_in_bytes()));
- __ movq(rax, Address(rax, mirror_offset));
+ __ movptr(rax, Address(rbx, methodOopDesc::constants_offset()));
+ __ movptr(rax, Address(rax,
+ constantPoolOopDesc::pool_holder_offset_in_bytes()));
+ __ movptr(rax, Address(rax, mirror_offset));
#ifdef ASSERT
{
Label L;
- __ testq(rax, rax);
+ __ testptr(rax, rax);
__ jcc(Assembler::notZero, L);
__ stop("synchronization object is NULL");
__ bind(L);
@@ -479,11 +482,11 @@ void InterpreterGenerator::lock_method(void) {
}
// add space for monitor & lock
- __ subq(rsp, entry_size); // add space for a monitor entry
- __ movq(monitor_block_top, rsp); // set new monitor block top
+ __ subptr(rsp, entry_size); // add space for a monitor entry
+ __ movptr(monitor_block_top, rsp); // set new monitor block top
// store object
- __ movq(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
- __ movq(c_rarg1, rsp); // object address
+ __ movptr(Address(rsp, BasicObjectLock::obj_offset_in_bytes()), rax);
+ __ movptr(c_rarg1, rsp); // object address
__ lock_object(c_rarg1);
}
@@ -498,40 +501,187 @@ void InterpreterGenerator::lock_method(void) {
// rdx: cp cache
void TemplateInterpreterGenerator::generate_fixed_frame(bool native_call) {
// initialize fixed part of activation frame
- __ pushq(rax); // save return address
+ __ push(rax); // save return address
__ enter(); // save old & set new rbp
- __ pushq(r13); // set sender sp
- __ pushq((int)NULL_WORD); // leave last_sp as null
- __ movq(r13, Address(rbx, methodOopDesc::const_offset())); // get constMethodOop
- __ leaq(r13, Address(r13, constMethodOopDesc::codes_offset())); // get codebase
- __ pushq(rbx); // save methodOop
+ __ push(r13); // set sender sp
+ __ push((int)NULL_WORD); // leave last_sp as null
+ __ movptr(r13, Address(rbx, methodOopDesc::const_offset())); // get constMethodOop
+ __ lea(r13, Address(r13, constMethodOopDesc::codes_offset())); // get codebase
+ __ push(rbx); // save methodOop
if (ProfileInterpreter) {
Label method_data_continue;
- __ movq(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
- __ testq(rdx, rdx);
+ __ movptr(rdx, Address(rbx, in_bytes(methodOopDesc::method_data_offset())));
+ __ testptr(rdx, rdx);
__ jcc(Assembler::zero, method_data_continue);
- __ addq(rdx, in_bytes(methodDataOopDesc::data_offset()));
+ __ addptr(rdx, in_bytes(methodDataOopDesc::data_offset()));
__ bind(method_data_continue);
- __ pushq(rdx); // set the mdp (method data pointer)
+ __ push(rdx); // set the mdp (method data pointer)
} else {
- __ pushq(0);
+ __ push(0);
}
- __ movq(rdx, Address(rbx, methodOopDesc::constants_offset()));
- __ movq(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
- __ pushq(rdx); // set constant pool cache
- __ pushq(r14); // set locals pointer
+ __ movptr(rdx, Address(rbx, methodOopDesc::constants_offset()));
+ __ movptr(rdx, Address(rdx, constantPoolOopDesc::cache_offset_in_bytes()));
+ __ push(rdx); // set constant pool cache
+ __ push(r14); // set locals pointer
if (native_call) {
- __ pushq(0); // no bcp
+ __ push(0); // no bcp
} else {
- __ pushq(r13); // set bcp
+ __ push(r13); // set bcp
}
- __ pushq(0); // reserve word for pointer to expression stack bottom
- __ movq(Address(rsp, 0), rsp); // set expression stack bottom
+ __ push(0); // reserve word for pointer to expression stack bottom
+ __ movptr(Address(rsp, 0), rsp); // set expression stack bottom
}
// End of helpers
+// Various method entries
+//------------------------------------------------------------------------------------------------------------------------
+//
+//
+
+// Call an accessor method (assuming it is resolved, otherwise drop
+// into vanilla (slow path) entry
+address InterpreterGenerator::generate_accessor_entry(void) {
+ // rbx: methodOop
+
+ // r13: senderSP must preserver for slow path, set SP to it on fast path
+
+ address entry_point = __ pc();
+ Label xreturn_path;
+
+ // do fastpath for resolved accessor methods
+ if (UseFastAccessorMethods) {
+ // Code: _aload_0, _(i|a)getfield, _(i|a)return or any rewrites
+ // thereof; parameter size = 1
+ // Note: We can only use this code if the getfield has been resolved
+ // and if we don't have a null-pointer exception => check for
+ // these conditions first and use slow path if necessary.
+ Label slow_path;
+ // If we need a safepoint check, generate full interpreter entry.
+ __ cmp32(ExternalAddress(SafepointSynchronize::address_of_state()),
+ SafepointSynchronize::_not_synchronized);
+
+ __ jcc(Assembler::notEqual, slow_path);
+ // rbx: method
+ __ movptr(rax, Address(rsp, wordSize));
+
+ // check if local 0 != NULL and read field
+ __ testptr(rax, rax);
+ __ jcc(Assembler::zero, slow_path);
+
+ __ movptr(rdi, Address(rbx, methodOopDesc::constants_offset()));
+ // read first instruction word and extract bytecode @ 1 and index @ 2
+ __ movptr(rdx, Address(rbx, methodOopDesc::const_offset()));
+ __ movl(rdx, Address(rdx, constMethodOopDesc::codes_offset()));
+ // Shift codes right to get the index on the right.
+ // The bytecode fetched looks like <index><0xb4><0x2a>
+ __ shrl(rdx, 2 * BitsPerByte);
+ __ shll(rdx, exact_log2(in_words(ConstantPoolCacheEntry::size())));
+ __ movptr(rdi, Address(rdi, constantPoolOopDesc::cache_offset_in_bytes()));
+
+ // rax: local 0
+ // rbx: method
+ // rdx: constant pool cache index
+ // rdi: constant pool cache
+
+ // check if getfield has been resolved and read constant pool cache entry
+ // check the validity of the cache entry by testing whether _indices field
+ // contains Bytecode::_getfield in b1 byte.
+ assert(in_words(ConstantPoolCacheEntry::size()) == 4,
+ "adjust shift below");
+ __ movl(rcx,
+ Address(rdi,
+ rdx,
+ Address::times_8,
+ constantPoolCacheOopDesc::base_offset() +
+ ConstantPoolCacheEntry::indices_offset()));
+ __ shrl(rcx, 2 * BitsPerByte);
+ __ andl(rcx, 0xFF);
+ __ cmpl(rcx, Bytecodes::_getfield);
+ __ jcc(Assembler::notEqual, slow_path);
+
+ // Note: constant pool entry is not valid before bytecode is resolved
+ __ movptr(rcx,
+ Address(rdi,
+ rdx,
+ Address::times_8,
+ constantPoolCacheOopDesc::base_offset() +
+ ConstantPoolCacheEntry::f2_offset()));
+ // edx: flags
+ __ movl(rdx,
+ Address(rdi,
+ rdx,
+ Address::times_8,
+ constantPoolCacheOopDesc::base_offset() +
+ ConstantPoolCacheEntry::flags_offset()));
+
+ Label notObj, notInt, notByte, notShort;
+ const Address field_address(rax, rcx, Address::times_1);
+
+ // Need to differentiate between igetfield, agetfield, bgetfield etc.
+ // because they are different sizes.
+ // Use the type from the constant pool cache
+ __ shrl(rdx, ConstantPoolCacheEntry::tosBits);
+ // Make sure we don't need to mask edx for tosBits after the above shift
+ ConstantPoolCacheEntry::verify_tosBits();
+
+ __ cmpl(rdx, atos);
+ __ jcc(Assembler::notEqual, notObj);
+ // atos
+ __ load_heap_oop(rax, field_address);
+ __ jmp(xreturn_path);
+
+ __ bind(notObj);
+ __ cmpl(rdx, itos);
+ __ jcc(Assembler::notEqual, notInt);
+ // itos
+ __ movl(rax, field_address);
+ __ jmp(xreturn_path);
+
+ __ bind(notInt);
+ __ cmpl(rdx, btos);
+ __ jcc(Assembler::notEqual, notByte);
+ // btos
+ __ load_signed_byte(rax, field_address);
+ __ jmp(xreturn_path);
+
+ __ bind(notByte);
+ __ cmpl(rdx, stos);
+ __ jcc(Assembler::notEqual, notShort);
+ // stos
+ __ load_signed_word(rax, field_address);
+ __ jmp(xreturn_path);
+
+ __ bind(notShort);
+#ifdef ASSERT
+ Label okay;
+ __ cmpl(rdx, ctos);
+ __ jcc(Assembler::equal, okay);
+ __ stop("what type is this?");
+ __ bind(okay);
+#endif
+ // ctos
+ __ load_unsigned_word(rax, field_address);
+
+ __ bind(xreturn_path);
+
+ // _ireturn/_areturn
+ __ pop(rdi);
+ __ mov(rsp, r13);
+ __ jmp(rdi);
+ __ ret(0);
+
+ // generate a vanilla interpreter entry as the slow path
+ __ bind(slow_path);
+ (void) generate_normal_entry(false);
+ } else {
+ (void) generate_normal_entry(false);
+ }
+
+ return entry_point;
+}
+
// Interpreter stub for calling a native method. (asm interpreter)
// This sets up a somewhat different looking stack for calling the
// native method than the typical interpreter frame setup.
@@ -561,20 +711,20 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// rbx: methodOop
// rcx: size of parameters
// r13: sender sp
- __ popq(rax); // get return address
+ __ pop(rax); // get return address
// for natives the size of locals is zero
// compute beginning of parameters (r14)
if (TaggedStackInterpreter) __ shll(rcx, 1); // 2 slots per parameter.
- __ leaq(r14, Address(rsp, rcx, Address::times_8, -wordSize));
+ __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
// add 2 zero-initialized slots for native calls
// initialize result_handler slot
- __ pushq((int) NULL);
+ __ push((int) NULL_WORD);
// slot for oop temp
// (static native method holder mirror/jni oop result)
- __ pushq((int) NULL);
+ __ push((int) NULL_WORD);
if (inc_counter) {
__ movl(rcx, invocation_counter); // (pre-)fetch invocation count
@@ -651,8 +801,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
Label L;
const Address monitor_block_top(rbp,
frame::interpreter_frame_monitor_block_top_offset * wordSize);
- __ movq(rax, monitor_block_top);
- __ cmpq(rax, rsp);
+ __ movptr(rax, monitor_block_top);
+ __ cmpptr(rax, rsp);
__ jcc(Assembler::equal, L);
__ stop("broken stack frame setup in interpreter");
__ bind(L);
@@ -674,22 +824,22 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
methodOopDesc::size_of_parameters_offset()));
__ shll(t, Interpreter::logStackElementSize());
- __ subq(rsp, t);
- __ subq(rsp, frame::arg_reg_save_area_bytes); // windows
- __ andq(rsp, -16); // must be 16 byte boundry (see amd64 ABI)
+ __ subptr(rsp, t);
+ __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
+ __ andptr(rsp, -16); // must be 16 byte boundry (see amd64 ABI)
// get signature handler
{
Label L;
- __ movq(t, Address(method, methodOopDesc::signature_handler_offset()));
- __ testq(t, t);
+ __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
+ __ testptr(t, t);
__ jcc(Assembler::notZero, L);
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::prepare_native_call),
method);
__ get_method(method);
- __ movq(t, Address(method, methodOopDesc::signature_handler_offset()));
+ __ movptr(t, Address(method, methodOopDesc::signature_handler_offset()));
__ bind(L);
}
@@ -711,9 +861,9 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// result handler is in rax
// set result handler
- __ movq(Address(rbp,
- (frame::interpreter_frame_result_handler_offset) * wordSize),
- rax);
+ __ movptr(Address(rbp,
+ (frame::interpreter_frame_result_handler_offset) * wordSize),
+ rax);
// pass mirror handle if static call
{
@@ -724,25 +874,25 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ testl(t, JVM_ACC_STATIC);
__ jcc(Assembler::zero, L);
// get mirror
- __ movq(t, Address(method, methodOopDesc::constants_offset()));
- __ movq(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
- __ movq(t, Address(t, mirror_offset));
+ __ movptr(t, Address(method, methodOopDesc::constants_offset()));
+ __ movptr(t, Address(t, constantPoolOopDesc::pool_holder_offset_in_bytes()));
+ __ movptr(t, Address(t, mirror_offset));
// copy mirror into activation frame
- __ movq(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize),
+ __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize),
t);
// pass handle to mirror
- __ leaq(c_rarg1,
- Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
+ __ lea(c_rarg1,
+ Address(rbp, frame::interpreter_frame_oop_temp_offset * wordSize));
__ bind(L);
}
// get native function entry point
{
Label L;
- __ movq(rax, Address(method, methodOopDesc::native_function_offset()));
+ __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
ExternalAddress unsatisfied(SharedRuntime::native_method_throw_unsatisfied_link_error_entry());
__ movptr(rscratch2, unsatisfied.addr());
- __ cmpq(rax, rscratch2);
+ __ cmpptr(rax, rscratch2);
__ jcc(Assembler::notEqual, L);
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
@@ -750,12 +900,12 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
method);
__ get_method(method);
__ verify_oop(method);
- __ movq(rax, Address(method, methodOopDesc::native_function_offset()));
+ __ movptr(rax, Address(method, methodOopDesc::native_function_offset()));
__ bind(L);
}
// pass JNIEnv
- __ leaq(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset()));
+ __ lea(c_rarg0, Address(r15_thread, JavaThread::jni_environment_offset()));
// It is enough that the pc() points into the right code
// segment. It does not have to be the correct return pc.
@@ -786,10 +936,10 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// register after returning from the JNI Call or verify that
// it wasn't changed during -Xcheck:jni.
if (RestoreMXCSROnJNICalls) {
- __ ldmxcsr(ExternalAddress(StubRoutines::amd64::mxcsr_std()));
+ __ ldmxcsr(ExternalAddress(StubRoutines::x86::mxcsr_std()));
}
else if (CheckJNICalls) {
- __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::amd64::verify_mxcsr_entry())));
+ __ call(RuntimeAddress(CAST_FROM_FN_PTR(address, StubRoutines::x86::verify_mxcsr_entry())));
}
// NOTE: The order of these pushes is known to frame::interpreter_frame_result
@@ -838,12 +988,12 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// preserved and correspond to the bcp/locals pointers. So we do a
// runtime call by hand.
//
- __ movq(c_rarg0, r15_thread);
- __ movq(r12, rsp); // remember sp
- __ subq(rsp, frame::arg_reg_save_area_bytes); // windows
- __ andq(rsp, -16); // align stack as required by ABI
+ __ mov(c_rarg0, r15_thread);
+ __ mov(r12, rsp); // remember sp
+ __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
+ __ andptr(rsp, -16); // align stack as required by ABI
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, JavaThread::check_special_condition_for_native_trans)));
- __ movq(rsp, r12); // restore sp
+ __ mov(rsp, r12); // restore sp
__ reinit_heapbase();
__ bind(Continue);
}
@@ -855,8 +1005,8 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ reset_last_Java_frame(true, true);
// reset handle block
- __ movq(t, Address(r15_thread, JavaThread::active_handles_offset()));
- __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), NULL_WORD);
+ __ movptr(t, Address(r15_thread, JavaThread::active_handles_offset()));
+ __ movptr(Address(t, JNIHandleBlock::top_offset_in_bytes()), (int32_t)NULL_WORD);
// If result is an oop unbox and store it in frame where gc will see it
// and result handler will pick it up
@@ -864,15 +1014,15 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
{
Label no_oop, store_result;
__ lea(t, ExternalAddress(AbstractInterpreter::result_handler(T_OBJECT)));
- __ cmpq(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
+ __ cmpptr(t, Address(rbp, frame::interpreter_frame_result_handler_offset*wordSize));
__ jcc(Assembler::notEqual, no_oop);
// retrieve result
__ pop(ltos);
- __ testq(rax, rax);
+ __ testptr(rax, rax);
__ jcc(Assembler::zero, store_result);
- __ movq(rax, Address(rax, 0));
+ __ movptr(rax, Address(rax, 0));
__ bind(store_result);
- __ movq(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax);
+ __ movptr(Address(rbp, frame::interpreter_frame_oop_temp_offset*wordSize), rax);
// keep stack depth as expected by pushing oop which will eventually be discarde
__ push(ltos);
__ bind(no_oop);
@@ -885,13 +1035,13 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
JavaThread::stack_guard_yellow_disabled);
__ jcc(Assembler::notEqual, no_reguard);
- __ pushaq(); // XXX only save smashed registers
- __ movq(r12, rsp); // remember sp
- __ subq(rsp, frame::arg_reg_save_area_bytes); // windows
- __ andq(rsp, -16); // align stack as required by ABI
+ __ pusha(); // XXX only save smashed registers
+ __ mov(r12, rsp); // remember sp
+ __ subptr(rsp, frame::arg_reg_save_area_bytes); // windows
+ __ andptr(rsp, -16); // align stack as required by ABI
__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, SharedRuntime::reguard_yellow_pages)));
- __ movq(rsp, r12); // restore sp
- __ popaq(); // XXX only restore smashed registers
+ __ mov(rsp, r12); // restore sp
+ __ popa(); // XXX only restore smashed registers
__ reinit_heapbase();
__ bind(no_reguard);
@@ -906,12 +1056,12 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
// restore r13 to have legal interpreter frame, i.e., bci == 0 <=>
// r13 == code_base()
- __ movq(r13, Address(method, methodOopDesc::const_offset())); // get constMethodOop
- __ leaq(r13, Address(r13, constMethodOopDesc::codes_offset())); // get codebase
+ __ movptr(r13, Address(method, methodOopDesc::const_offset())); // get constMethodOop
+ __ lea(r13, Address(r13, constMethodOopDesc::codes_offset())); // get codebase
// handle exceptions (exception handling will handle unlocking!)
{
Label L;
- __ cmpq(Address(r15_thread, Thread::pending_exception_offset()), (int) NULL);
+ __ cmpptr(Address(r15_thread, Thread::pending_exception_offset()), (int32_t) NULL_WORD);
__ jcc(Assembler::zero, L);
// Note: At some point we may want to unify this with the code
// used in call_VM_base(); i.e., we should use the
@@ -942,10 +1092,10 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
wordSize - sizeof(BasicObjectLock)));
// monitor expect in c_rarg1 for slow unlock path
- __ leaq(c_rarg1, monitor); // address of first monitor
+ __ lea(c_rarg1, monitor); // address of first monitor
- __ movq(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
- __ testq(t, t);
+ __ movptr(t, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
+ __ testptr(t, t);
__ jcc(Assembler::notZero, unlock);
// Entry already unlocked, need to throw exception
@@ -973,17 +1123,17 @@ address InterpreterGenerator::generate_native_entry(bool synchronized) {
__ pop(ltos);
__ pop(dtos);
- __ movq(t, Address(rbp,
- (frame::interpreter_frame_result_handler_offset) * wordSize));
+ __ movptr(t, Address(rbp,
+ (frame::interpreter_frame_result_handler_offset) * wordSize));
__ call(t);
// remove activation
- __ movq(t, Address(rbp,
- frame::interpreter_frame_sender_sp_offset *
- wordSize)); // get sender sp
+ __ movptr(t, Address(rbp,
+ frame::interpreter_frame_sender_sp_offset *
+ wordSize)); // get sender sp
__ leave(); // remove frame anchor
- __ popq(rdi); // get return address
- __ movq(rsp, t); // set sp to sender sp
+ __ pop(rdi); // get return address
+ __ mov(rsp, t); // set sp to sender sp
__ jmp(rdi);
if (inc_counter) {
@@ -1032,11 +1182,11 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
generate_stack_overflow_check();
// get return address
- __ popq(rax);
+ __ pop(rax);
// compute beginning of parameters (r14)
if (TaggedStackInterpreter) __ shll(rcx, 1); // 2 slots per parameter.
- __ leaq(r14, Address(rsp, rcx, Address::times_8, -wordSize));
+ __ lea(r14, Address(rsp, rcx, Address::times_8, -wordSize));
// rdx - # of additional locals
// allocate space for locals
@@ -1046,8 +1196,8 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
__ testl(rdx, rdx);
__ jcc(Assembler::lessEqual, exit); // do nothing if rdx <= 0
__ bind(loop);
- if (TaggedStackInterpreter) __ pushq((int) NULL); // push tag
- __ pushq((int) NULL); // initialize local variables
+ if (TaggedStackInterpreter) __ push((int) NULL_WORD); // push tag
+ __ push((int) NULL_WORD); // initialize local variables
__ decrementl(rdx); // until everything initialized
__ jcc(Assembler::greater, loop);
__ bind(exit);
@@ -1137,8 +1287,8 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
Label L;
const Address monitor_block_top (rbp,
frame::interpreter_frame_monitor_block_top_offset * wordSize);
- __ movq(rax, monitor_block_top);
- __ cmpq(rax, rsp);
+ __ movptr(rax, monitor_block_top);
+ __ cmpptr(rax, rsp);
__ jcc(Assembler::equal, L);
__ stop("broken stack frame setup in interpreter");
__ bind(L);
@@ -1160,14 +1310,14 @@ address InterpreterGenerator::generate_normal_entry(bool synchronized) {
CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method),
r13, true);
- __ movq(rbx, Address(rbp, method_offset)); // restore methodOop
- __ movq(rax, Address(rbx,
- in_bytes(methodOopDesc::method_data_offset())));
- __ movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
- rax);
+ __ movptr(rbx, Address(rbp, method_offset)); // restore methodOop
+ __ movptr(rax, Address(rbx,
+ in_bytes(methodOopDesc::method_data_offset())));
+ __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
+ rax);
__ test_method_data_pointer(rax, profile_method_continue);
- __ addq(rax, in_bytes(methodDataOopDesc::data_offset()));
- __ movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
+ __ addptr(rax, in_bytes(methodDataOopDesc::data_offset()));
+ __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
rax);
__ jmp(profile_method_continue);
}
@@ -1357,7 +1507,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
Interpreter::_rethrow_exception_entry = __ pc();
// Restore sp to interpreter_frame_last_sp even though we are going
// to empty the expression stack for the exception processing.
- __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
+ __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
// rax: exception
// rdx: return address/pc that threw exception
__ restore_bcp(); // r13 points to call/send
@@ -1369,7 +1519,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// rax: exception
// r13: exception bcp
__ verify_oop(rax);
- __ movq(c_rarg1, rax);
+ __ mov(c_rarg1, rax);
// expression stack must be empty before entering the VM in case of
// an exception
@@ -1424,7 +1574,7 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// deoptimization blob's unpack entry because of the presence of
// adapter frames in C2.
Label caller_not_deoptimized;
- __ movq(c_rarg1, Address(rbp, frame::return_addr_offset * wordSize));
+ __ movptr(c_rarg1, Address(rbp, frame::return_addr_offset * wordSize));
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
InterpreterRuntime::interpreter_contains), c_rarg1);
__ testl(rax, rax);
@@ -1437,8 +1587,8 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
size_of_parameters_offset())));
__ shll(rax, Interpreter::logStackElementSize());
__ restore_locals(); // XXX do we need this?
- __ subq(r14, rax);
- __ addq(r14, wordSize);
+ __ subptr(r14, rax);
+ __ addptr(r14, wordSize);
// Save these arguments
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
Deoptimization::
@@ -1477,15 +1627,15 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// maintain this kind of invariant all the time we call a small
// fixup routine to move the mutated arguments onto the top of our
// expression stack if necessary.
- __ movq(c_rarg1, rsp);
- __ movq(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
+ __ mov(c_rarg1, rsp);
+ __ movptr(c_rarg2, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
// PC must point into interpreter here
__ set_last_Java_frame(noreg, rbp, __ pc());
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address, InterpreterRuntime::popframe_move_outgoing_args), r15_thread, c_rarg1, c_rarg2);
__ reset_last_Java_frame(true, true);
// Restore the last_sp and null it out
- __ movq(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
- __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), NULL_WORD);
+ __ movptr(rsp, Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize));
+ __ movptr(Address(rbp, frame::interpreter_frame_last_sp_offset * wordSize), (int32_t)NULL_WORD);
__ restore_bcp(); // XXX do we need this?
__ restore_locals(); // XXX do we need this?
@@ -1506,12 +1656,12 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// preserve exception over this code sequence
__ pop_ptr(rax);
- __ movq(Address(r15_thread, JavaThread::vm_result_offset()), rax);
+ __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), rax);
// remove the activation (without doing throws on illegalMonitorExceptions)
__ remove_activation(vtos, rdx, false, true, false);
// restore exception
- __ movq(rax, Address(r15_thread, JavaThread::vm_result_offset()));
- __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), NULL_WORD);
+ __ movptr(rax, Address(r15_thread, JavaThread::vm_result_offset()));
+ __ movptr(Address(r15_thread, JavaThread::vm_result_offset()), (int32_t)NULL_WORD);
__ verify_oop(rax);
// In between activations - previous activation type unknown yet
@@ -1522,14 +1672,14 @@ void TemplateInterpreterGenerator::generate_throw_exception() {
// rdx: return address/pc that threw exception
// rsp: expression stack of caller
// rbp: ebp of caller
- __ pushq(rax); // save exception
- __ pushq(rdx); // save return address
+ __ push(rax); // save exception
+ __ push(rdx); // save return address
__ super_call_VM_leaf(CAST_FROM_FN_PTR(address,
SharedRuntime::exception_handler_for_return_address),
rdx);
- __ movq(rbx, rax); // save exception handler
- __ popq(rdx); // restore return address
- __ popq(rax); // restore exception
+ __ mov(rbx, rax); // save exception handler
+ __ pop(rdx); // restore return address
+ __ pop(rax); // restore exception
// Note that an "issuing PC" is actually the next PC after the call
__ jmp(rbx); // jump to exception
// handler of caller
@@ -1547,7 +1697,7 @@ address TemplateInterpreterGenerator::generate_earlyret_entry_for(TosState state
__ empty_expression_stack();
__ load_earlyret_value(state);
- __ movq(rdx, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
+ __ movptr(rdx, Address(r15_thread, JavaThread::jvmti_thread_state_offset()));
Address cond_addr(rdx, JvmtiThreadState::earlyret_state_offset());
// Clear the earlyret state
@@ -1609,21 +1759,21 @@ address TemplateInterpreterGenerator::generate_trace_code(TosState state) {
address entry = __ pc();
__ push(state);
- __ pushq(c_rarg0);
- __ pushq(c_rarg1);
- __ pushq(c_rarg2);
- __ pushq(c_rarg3);
- __ movq(c_rarg2, rax); // Pass itos
+ __ push(c_rarg0);
+ __ push(c_rarg1);
+ __ push(c_rarg2);
+ __ push(c_rarg3);
+ __ mov(c_rarg2, rax); // Pass itos
#ifdef _WIN64
__ movflt(xmm3, xmm0); // Pass ftos
#endif
__ call_VM(noreg,
CAST_FROM_FN_PTR(address, SharedRuntime::trace_bytecode),
c_rarg1, c_rarg2, c_rarg3);
- __ popq(c_rarg3);
- __ popq(c_rarg2);
- __ popq(c_rarg1);
- __ popq(c_rarg0);
+ __ pop(c_rarg3);
+ __ pop(c_rarg2);
+ __ pop(c_rarg1);
+ __ pop(c_rarg0);
__ pop(state);
__ ret(0); // return from result handler
@@ -1657,10 +1807,10 @@ void TemplateInterpreterGenerator::trace_bytecode(Template* t) {
assert(Interpreter::trace_code(t->tos_in()) != NULL,
"entry must have been generated");
- __ movq(r12, rsp); // remember sp
- __ andq(rsp, -16); // align stack as required by ABI
+ __ mov(r12, rsp); // remember sp
+ __ andptr(rsp, -16); // align stack as required by ABI
__ call(RuntimeAddress(Interpreter::trace_code(t->tos_in())));
- __ movq(rsp, r12); // restore sp
+ __ mov(rsp, r12); // restore sp
__ reinit_heapbase();
}
@@ -1674,3 +1824,4 @@ void TemplateInterpreterGenerator::stop_interpreter_at() {
__ bind(L);
}
#endif // !PRODUCT
+#endif // ! CC_INTERP
diff --git a/src/cpu/x86/vm/templateTable_x86_32.cpp b/src/cpu/x86/vm/templateTable_x86_32.cpp
index 24e7e12fc..31c6975ec 100644
--- a/src/cpu/x86/vm/templateTable_x86_32.cpp
+++ b/src/cpu/x86/vm/templateTable_x86_32.cpp
@@ -119,12 +119,14 @@ void TemplateTable::patch_bytecode(Bytecodes::Code bytecode, Register bc,
if (!RewriteBytecodes) return;
// the pair bytecodes have already done the load.
- if (load_bc_into_scratch) __ movl(bc, bytecode);
+ if (load_bc_into_scratch) {
+ __ movl(bc, bytecode);
+ }
Label patch_done;
if (JvmtiExport::can_post_breakpoint()) {
Label fast_patch;
// if a breakpoint is present we can't rewrite the stream directly
- __ movzxb(scratch, at_bcp(0));
+ __ movzbl(scratch, at_bcp(0));
__ cmpl(scratch, Bytecodes::_breakpoint);
__ jcc(Assembler::notEqual, fast_patch);
__ get_method(scratch);
@@ -169,16 +171,16 @@ void TemplateTable::shouldnotreachhere() {
void TemplateTable::aconst_null() {
transition(vtos, atos);
- __ xorl(rax, rax);
+ __ xorptr(rax, rax);
}
void TemplateTable::iconst(int value) {
transition(vtos, itos);
if (value == 0) {
- __ xorl(rax, rax);
+ __ xorptr(rax, rax);
} else {
- __ movl(rax, value);
+ __ movptr(rax, value);
}
}
@@ -186,12 +188,12 @@ void TemplateTable::iconst(int value) {
void TemplateTable::lconst(int value) {
transition(vtos, ltos);
if (value == 0) {
- __ xorl(rax, rax);
+ __ xorptr(rax, rax);
} else {
- __ movl(rax, value);
+ __ movptr(rax, value);
}
assert(value >= 0, "check this code");
- __ xorl(rdx, rdx);
+ __ xorptr(rdx, rdx);
}
@@ -223,7 +225,7 @@ void TemplateTable::bipush() {
void TemplateTable::sipush() {
transition(vtos, itos);
__ load_unsigned_word(rax, at_bcp(1));
- __ bswap(rax);
+ __ bswapl(rax);
__ sarl(rax, 16);
}
@@ -241,7 +243,7 @@ void TemplateTable::ldc(bool wide) {
const int tags_offset = typeArrayOopDesc::header_size(T_BYTE) * wordSize;
// get type
- __ xorl(rdx, rdx);
+ __ xorptr(rdx, rdx);
__ movb(rdx, Address(rax, rbx, Address::times_1, tags_offset));
// unresolved string - get the resolved string
@@ -271,7 +273,7 @@ void TemplateTable::ldc(bool wide) {
__ cmpl(rdx, JVM_CONSTANT_Float);
__ jccb(Assembler::notEqual, notFloat);
// ftos
- __ fld_s( Address(rcx, rbx, Address::times_4, base_offset));
+ __ fld_s( Address(rcx, rbx, Address::times_ptr, base_offset));
__ push(ftos);
__ jmp(Done);
@@ -288,13 +290,14 @@ void TemplateTable::ldc(bool wide) {
#endif
Label isOop;
// atos and itos
- __ movl(rax, Address(rcx, rbx, Address::times_4, base_offset));
// String is only oop type we will see here
__ cmpl(rdx, JVM_CONSTANT_String);
__ jccb(Assembler::equal, isOop);
+ __ movl(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
__ push(itos);
__ jmp(Done);
__ bind(isOop);
+ __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset));
__ push(atos);
if (VerifyOops) {
@@ -316,14 +319,14 @@ void TemplateTable::ldc2_w() {
__ cmpb(Address(rax, rbx, Address::times_1, tags_offset), JVM_CONSTANT_Double);
__ jccb(Assembler::notEqual, Long);
// dtos
- __ fld_d( Address(rcx, rbx, Address::times_4, base_offset));
+ __ fld_d( Address(rcx, rbx, Address::times_ptr, base_offset));
__ push(dtos);
__ jmpb(Done);
__ bind(Long);
// ltos
- __ movl(rax, Address(rcx, rbx, Address::times_4, base_offset + 0 * wordSize));
- __ movl(rdx, Address(rcx, rbx, Address::times_4, base_offset + 1 * wordSize));
+ __ movptr(rax, Address(rcx, rbx, Address::times_ptr, base_offset + 0 * wordSize));
+ NOT_LP64(__ movptr(rdx, Address(rcx, rbx, Address::times_ptr, base_offset + 1 * wordSize)));
__ push(ltos);
@@ -333,7 +336,7 @@ void TemplateTable::ldc2_w() {
void TemplateTable::locals_index(Register reg, int offset) {
__ load_unsigned_byte(reg, at_bcp(offset));
- __ negl(reg);
+ __ negptr(reg);
}
@@ -399,8 +402,8 @@ void TemplateTable::fast_iload() {
void TemplateTable::lload() {
transition(vtos, ltos);
locals_index(rbx);
- __ movl(rax, laddress(rbx));
- __ movl(rdx, haddress(rbx));
+ __ movptr(rax, laddress(rbx));
+ NOT_LP64(__ movl(rdx, haddress(rbx)));
debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
}
@@ -421,10 +424,10 @@ void TemplateTable::dload() {
// float instruction into ST0
__ movl(rax, laddress(rbx));
__ movl(rdx, haddress(rbx));
- __ pushl(rdx); // push hi first
- __ pushl(rax);
+ __ push(rdx); // push hi first
+ __ push(rax);
__ fld_d(Address(rsp, 0));
- __ addl(rsp, 2*wordSize);
+ __ addptr(rsp, 2*wordSize);
debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
} else {
__ fld_d(daddress(rbx));
@@ -435,16 +438,16 @@ void TemplateTable::dload() {
void TemplateTable::aload() {
transition(vtos, atos);
locals_index(rbx);
- __ movl(rax, iaddress(rbx));
+ __ movptr(rax, aaddress(rbx));
debug_only(__ verify_local_tag(frame::TagReference, rbx));
}
void TemplateTable::locals_index_wide(Register reg) {
__ movl(reg, at_bcp(2));
- __ bswap(reg);
+ __ bswapl(reg);
__ shrl(reg, 16);
- __ negl(reg);
+ __ negptr(reg);
}
@@ -459,8 +462,8 @@ void TemplateTable::wide_iload() {
void TemplateTable::wide_lload() {
transition(vtos, ltos);
locals_index_wide(rbx);
- __ movl(rax, laddress(rbx));
- __ movl(rdx, haddress(rbx));
+ __ movptr(rax, laddress(rbx));
+ NOT_LP64(__ movl(rdx, haddress(rbx)));
debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
}
@@ -481,8 +484,8 @@ void TemplateTable::wide_dload() {
// float instruction into ST0
__ movl(rax, laddress(rbx));
__ movl(rdx, haddress(rbx));
- __ pushl(rdx); // push hi first
- __ pushl(rax);
+ __ push(rdx); // push hi first
+ __ push(rax);
__ fld_d(Address(rsp, 0));
__ addl(rsp, 2*wordSize);
debug_only(__ verify_local_tag(frame::TagCategory2, rbx));
@@ -495,7 +498,7 @@ void TemplateTable::wide_dload() {
void TemplateTable::wide_aload() {
transition(vtos, atos);
locals_index_wide(rbx);
- __ movl(rax, iaddress(rbx));
+ __ movptr(rax, aaddress(rbx));
debug_only(__ verify_local_tag(frame::TagReference, rbx));
}
@@ -509,12 +512,13 @@ void TemplateTable::index_check_without_pop(Register array, Register index) {
// destroys rbx,
// check array
__ null_check(array, arrayOopDesc::length_offset_in_bytes());
+ LP64_ONLY(__ movslq(index, index));
// check index
__ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
if (index != rbx) {
// ??? convention: move aberrant index into rbx, for exception message
assert(rbx != array, "different registers");
- __ movl(rbx, index);
+ __ mov(rbx, index);
}
__ jump_cc(Assembler::aboveEqual,
ExternalAddress(Interpreter::_throw_ArrayIndexOutOfBoundsException_entry));
@@ -535,10 +539,10 @@ void TemplateTable::laload() {
// rax,: index
// rdx: array
index_check(rdx, rax);
- __ movl(rbx, rax);
+ __ mov(rbx, rax);
// rbx,: index
- __ movl(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
- __ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize));
+ __ movptr(rax, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize));
+ NOT_LP64(__ movl(rdx, Address(rdx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize)));
}
@@ -565,7 +569,7 @@ void TemplateTable::aaload() {
// rdx: array
index_check(rdx, rax); // kills rbx,
// rax,: index
- __ movl(rax, Address(rdx, rax, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
+ __ movptr(rax, Address(rdx, rax, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
}
@@ -576,7 +580,7 @@ void TemplateTable::baload() {
// rax,: index
// can do better code for P5 - fix this at some point
__ load_signed_byte(rbx, Address(rdx, rax, Address::times_1, arrayOopDesc::base_offset_in_bytes(T_BYTE)));
- __ movl(rax, rbx);
+ __ mov(rax, rbx);
}
@@ -587,7 +591,7 @@ void TemplateTable::caload() {
// rax,: index
// can do better code for P5 - may want to improve this at some point
__ load_unsigned_word(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
- __ movl(rax, rbx);
+ __ mov(rax, rbx);
}
// iload followed by caload frequent pair
@@ -602,7 +606,7 @@ void TemplateTable::fast_icaload() {
index_check(rdx, rax);
// rax,: index
__ load_unsigned_word(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_CHAR)));
- __ movl(rax, rbx);
+ __ mov(rax, rbx);
}
void TemplateTable::saload() {
@@ -612,7 +616,7 @@ void TemplateTable::saload() {
// rax,: index
// can do better code for P5 - may want to improve this at some point
__ load_signed_word(rbx, Address(rdx, rax, Address::times_2, arrayOopDesc::base_offset_in_bytes(T_SHORT)));
- __ movl(rax, rbx);
+ __ mov(rax, rbx);
}
@@ -625,8 +629,8 @@ void TemplateTable::iload(int n) {
void TemplateTable::lload(int n) {
transition(vtos, ltos);
- __ movl(rax, laddress(n));
- __ movl(rdx, haddress(n));
+ __ movptr(rax, laddress(n));
+ NOT_LP64(__ movptr(rdx, haddress(n)));
debug_only(__ verify_local_tag(frame::TagCategory2, n));
}
@@ -645,10 +649,10 @@ void TemplateTable::dload(int n) {
// float instruction into ST0
__ movl(rax, laddress(n));
__ movl(rdx, haddress(n));
- __ pushl(rdx); // push hi first
- __ pushl(rax);
+ __ push(rdx); // push hi first
+ __ push(rax);
__ fld_d(Address(rsp, 0));
- __ addl(rsp, 2*wordSize); // reset rsp
+ __ addptr(rsp, 2*wordSize); // reset rsp
debug_only(__ verify_local_tag(frame::TagCategory2, n));
} else {
__ fld_d(daddress(n));
@@ -658,7 +662,7 @@ void TemplateTable::dload(int n) {
void TemplateTable::aload(int n) {
transition(vtos, atos);
- __ movl(rax, aaddress(n));
+ __ movptr(rax, aaddress(n));
debug_only(__ verify_local_tag(frame::TagReference, n));
}
@@ -740,8 +744,8 @@ void TemplateTable::istore() {
void TemplateTable::lstore() {
transition(ltos, vtos);
locals_index(rbx);
- __ movl(laddress(rbx), rax);
- __ movl(haddress(rbx), rdx);
+ __ movptr(laddress(rbx), rax);
+ NOT_LP64(__ movptr(haddress(rbx), rdx));
__ tag_local(frame::TagCategory2, rbx);
}
@@ -759,12 +763,12 @@ void TemplateTable::dstore() {
locals_index(rbx);
if (TaggedStackInterpreter) {
// Store double on stack and reload into locals nonadjacently
- __ subl(rsp, 2 * wordSize);
+ __ subptr(rsp, 2 * wordSize);
__ fstp_d(Address(rsp, 0));
- __ popl(rax);
- __ popl(rdx);
- __ movl(laddress(rbx), rax);
- __ movl(haddress(rbx), rdx);
+ __ pop(rax);
+ __ pop(rdx);
+ __ movptr(laddress(rbx), rax);
+ __ movptr(haddress(rbx), rdx);
__ tag_local(frame::TagCategory2, rbx);
} else {
__ fstp_d(daddress(rbx));
@@ -776,7 +780,7 @@ void TemplateTable::astore() {
transition(vtos, vtos);
__ pop_ptr(rax, rdx); // will need to pop tag too
locals_index(rbx);
- __ movl(aaddress(rbx), rax);
+ __ movptr(aaddress(rbx), rax);
__ tag_local(rdx, rbx); // need to store same tag in local may be returnAddr
}
@@ -794,8 +798,8 @@ void TemplateTable::wide_lstore() {
transition(vtos, vtos);
__ pop_l(rax, rdx);
locals_index_wide(rbx);
- __ movl(laddress(rbx), rax);
- __ movl(haddress(rbx), rdx);
+ __ movptr(laddress(rbx), rax);
+ NOT_LP64(__ movl(haddress(rbx), rdx));
__ tag_local(frame::TagCategory2, rbx);
}
@@ -814,7 +818,7 @@ void TemplateTable::wide_astore() {
transition(vtos, vtos);
__ pop_ptr(rax, rdx);
locals_index_wide(rbx);
- __ movl(aaddress(rbx), rax);
+ __ movptr(aaddress(rbx), rax);
__ tag_local(rdx, rbx);
}
@@ -838,8 +842,8 @@ void TemplateTable::lastore() {
// rdx: high(value)
index_check(rcx, rbx); // prefer index in rbx,
// rbx,: index
- __ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
- __ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx);
+ __ movptr(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 0 * wordSize), rax);
+ NOT_LP64(__ movl(Address(rcx, rbx, Address::times_8, arrayOopDesc::base_offset_in_bytes(T_LONG) + 1 * wordSize), rdx));
}
@@ -869,21 +873,21 @@ void TemplateTable::aastore() {
Label is_null, ok_is_subtype, done;
transition(vtos, vtos);
// stack: ..., array, index, value
- __ movl(rax, at_tos()); // Value
+ __ movptr(rax, at_tos()); // Value
__ movl(rcx, at_tos_p1()); // Index
- __ movl(rdx, at_tos_p2()); // Array
+ __ movptr(rdx, at_tos_p2()); // Array
index_check_without_pop(rdx, rcx); // kills rbx,
// do array store check - check for NULL value first
- __ testl(rax, rax);
+ __ testptr(rax, rax);
__ jcc(Assembler::zero, is_null);
// Move subklass into EBX
- __ movl(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
+ __ movptr(rbx, Address(rax, oopDesc::klass_offset_in_bytes()));
// Move superklass into EAX
- __ movl(rax, Address(rdx, oopDesc::klass_offset_in_bytes()));
- __ movl(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes()));
- // Compress array+index*4+12 into a single register. Frees ECX.
- __ leal(rdx, Address(rdx, rcx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
+ __ movptr(rax, Address(rdx, oopDesc::klass_offset_in_bytes()));
+ __ movptr(rax, Address(rax, sizeof(oopDesc) + objArrayKlass::element_klass_offset_in_bytes()));
+ // Compress array+index*wordSize+12 into a single register. Frees ECX.
+ __ lea(rdx, Address(rdx, rcx, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
// Generate subtype check. Blows ECX. Resets EDI to locals.
// Superklass in EAX. Subklass in EBX.
@@ -895,19 +899,19 @@ void TemplateTable::aastore() {
// Come here on success
__ bind(ok_is_subtype);
- __ movl(rax, at_rsp()); // Value
- __ movl(Address(rdx, 0), rax);
+ __ movptr(rax, at_rsp()); // Value
+ __ movptr(Address(rdx, 0), rax);
__ store_check(rdx);
__ jmpb(done);
// Have a NULL in EAX, EDX=array, ECX=index. Store NULL at ary[idx]
__ bind(is_null);
__ profile_null_seen(rbx);
- __ movl(Address(rdx, rcx, Address::times_4, arrayOopDesc::base_offset_in_bytes(T_OBJECT)), rax);
+ __ movptr(Address(rdx, rcx, Address::times_ptr, arrayOopDesc::base_offset_in_bytes(T_OBJECT)), rax);
// Pop stack arguments
__ bind(done);
- __ addl(rsp, 3 * Interpreter::stackElementSize());
+ __ addptr(rsp, 3 * Interpreter::stackElementSize());
}
@@ -947,8 +951,8 @@ void TemplateTable::istore(int n) {
void TemplateTable::lstore(int n) {
transition(ltos, vtos);
- __ movl(laddress(n), rax);
- __ movl(haddress(n), rdx);
+ __ movptr(laddress(n), rax);
+ NOT_LP64(__ movptr(haddress(n), rdx));
__ tag_local(frame::TagCategory2, n);
}
@@ -963,10 +967,10 @@ void TemplateTable::fstore(int n) {
void TemplateTable::dstore(int n) {
transition(dtos, vtos);
if (TaggedStackInterpreter) {
- __ subl(rsp, 2 * wordSize);
+ __ subptr(rsp, 2 * wordSize);
__ fstp_d(Address(rsp, 0));
- __ popl(rax);
- __ popl(rdx);
+ __ pop(rax);
+ __ pop(rdx);
__ movl(laddress(n), rax);
__ movl(haddress(n), rdx);
__ tag_local(frame::TagCategory2, n);
@@ -979,20 +983,20 @@ void TemplateTable::dstore(int n) {
void TemplateTable::astore(int n) {
transition(vtos, vtos);
__ pop_ptr(rax, rdx);
- __ movl(aaddress(n), rax);
+ __ movptr(aaddress(n), rax);
__ tag_local(rdx, n);
}
void TemplateTable::pop() {
transition(vtos, vtos);
- __ addl(rsp, Interpreter::stackElementSize());
+ __ addptr(rsp, Interpreter::stackElementSize());
}
void TemplateTable::pop2() {
transition(vtos, vtos);
- __ addl(rsp, 2*Interpreter::stackElementSize());
+ __ addptr(rsp, 2*Interpreter::stackElementSize());
}
@@ -1099,14 +1103,14 @@ void TemplateTable::iop2(Operation op) {
transition(itos, itos);
switch (op) {
case add : __ pop_i(rdx); __ addl (rax, rdx); break;
- case sub : __ movl(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
+ case sub : __ mov(rdx, rax); __ pop_i(rax); __ subl (rax, rdx); break;
case mul : __ pop_i(rdx); __ imull(rax, rdx); break;
case _and : __ pop_i(rdx); __ andl (rax, rdx); break;
case _or : __ pop_i(rdx); __ orl (rax, rdx); break;
case _xor : __ pop_i(rdx); __ xorl (rax, rdx); break;
- case shl : __ movl(rcx, rax); __ pop_i(rax); __ shll (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
- case shr : __ movl(rcx, rax); __ pop_i(rax); __ sarl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
- case ushr : __ movl(rcx, rax); __ pop_i(rax); __ shrl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
+ case shl : __ mov(rcx, rax); __ pop_i(rax); __ shll (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
+ case shr : __ mov(rcx, rax); __ pop_i(rax); __ sarl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
+ case ushr : __ mov(rcx, rax); __ pop_i(rax); __ shrl (rax); break; // implicit masking of lower 5 bits by Intel shift instr.
default : ShouldNotReachHere();
}
}
@@ -1118,7 +1122,7 @@ void TemplateTable::lop2(Operation op) {
switch (op) {
case add : __ addl(rax, rbx); __ adcl(rdx, rcx); break;
case sub : __ subl(rbx, rax); __ sbbl(rcx, rdx);
- __ movl(rax, rbx); __ movl(rdx, rcx); break;
+ __ mov(rax, rbx); __ mov(rdx, rcx); break;
case _and: __ andl(rax, rbx); __ andl(rdx, rcx); break;
case _or : __ orl (rax, rbx); __ orl (rdx, rcx); break;
case _xor: __ xorl(rax, rbx); __ xorl(rdx, rcx); break;
@@ -1129,7 +1133,7 @@ void TemplateTable::lop2(Operation op) {
void TemplateTable::idiv() {
transition(itos, itos);
- __ movl(rcx, rax);
+ __ mov(rcx, rax);
__ pop_i(rax);
// Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
// they are not equal, one could do a normal division (no correction
@@ -1141,52 +1145,52 @@ void TemplateTable::idiv() {
void TemplateTable::irem() {
transition(itos, itos);
- __ movl(rcx, rax);
+ __ mov(rcx, rax);
__ pop_i(rax);
// Note: could xor rax, and rcx and compare with (-1 ^ min_int). If
// they are not equal, one could do a normal division (no correction
// needed), which may speed up this implementation for the common case.
// (see also JVM spec., p.243 & p.271)
__ corrected_idivl(rcx);
- __ movl(rax, rdx);
+ __ mov(rax, rdx);
}
void TemplateTable::lmul() {
transition(ltos, ltos);
__ pop_l(rbx, rcx);
- __ pushl(rcx); __ pushl(rbx);
- __ pushl(rdx); __ pushl(rax);
+ __ push(rcx); __ push(rbx);
+ __ push(rdx); __ push(rax);
__ lmul(2 * wordSize, 0);
- __ addl(rsp, 4 * wordSize); // take off temporaries
+ __ addptr(rsp, 4 * wordSize); // take off temporaries
}
void TemplateTable::ldiv() {
transition(ltos, ltos);
__ pop_l(rbx, rcx);
- __ pushl(rcx); __ pushl(rbx);
- __ pushl(rdx); __ pushl(rax);
+ __ push(rcx); __ push(rbx);
+ __ push(rdx); __ push(rax);
// check if y = 0
__ orl(rax, rdx);
__ jump_cc(Assembler::zero,
ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::ldiv));
- __ addl(rsp, 4 * wordSize); // take off temporaries
+ __ addptr(rsp, 4 * wordSize); // take off temporaries
}
void TemplateTable::lrem() {
transition(ltos, ltos);
__ pop_l(rbx, rcx);
- __ pushl(rcx); __ pushl(rbx);
- __ pushl(rdx); __ pushl(rax);
+ __ push(rcx); __ push(rbx);
+ __ push(rdx); __ push(rax);
// check if y = 0
__ orl(rax, rdx);
__ jump_cc(Assembler::zero,
ExternalAddress(Interpreter::_throw_ArithmeticException_entry));
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::lrem));
- __ addl(rsp, 4 * wordSize);
+ __ addptr(rsp, 4 * wordSize);
}
@@ -1200,7 +1204,7 @@ void TemplateTable::lshl() {
void TemplateTable::lshr() {
transition(itos, ltos);
- __ movl(rcx, rax); // get shift count
+ __ mov(rcx, rax); // get shift count
__ pop_l(rax, rdx); // get shift value
__ lshr(rdx, rax, true);
}
@@ -1208,7 +1212,7 @@ void TemplateTable::lshr() {
void TemplateTable::lushr() {
transition(itos, ltos);
- __ movl(rcx, rax); // get shift count
+ __ mov(rcx, rax); // get shift count
__ pop_l(rax, rdx); // get shift value
__ lshr(rdx, rax);
}
@@ -1226,7 +1230,7 @@ void TemplateTable::fop2(Operation op) {
default : ShouldNotReachHere();
}
__ f2ieee();
- __ popl(rax); // pop float thing off
+ __ pop(rax); // pop float thing off
}
@@ -1280,8 +1284,8 @@ void TemplateTable::dop2(Operation op) {
}
__ d2ieee();
// Pop double precision number from rsp.
- __ popl(rax);
- __ popl(rdx);
+ __ pop(rax);
+ __ pop(rdx);
}
@@ -1321,7 +1325,7 @@ void TemplateTable::wide_iinc() {
transition(vtos, vtos);
__ movl(rdx, at_bcp(4)); // get constant
locals_index_wide(rbx);
- __ bswap(rdx); // swap bytes & sign-extend constant
+ __ bswapl(rdx); // swap bytes & sign-extend constant
__ sarl(rdx, 16);
__ addl(iaddress(rbx), rdx);
// Note: should probably use only one movl to get both
@@ -1375,62 +1379,65 @@ void TemplateTable::convert() {
#endif // ASSERT
// Conversion
- // (Note: use pushl(rcx)/popl(rcx) for 1/2-word stack-ptr manipulation)
+ // (Note: use push(rcx)/pop(rcx) for 1/2-word stack-ptr manipulation)
switch (bytecode()) {
case Bytecodes::_i2l:
__ extend_sign(rdx, rax);
break;
case Bytecodes::_i2f:
- __ pushl(rax); // store int on tos
+ __ push(rax); // store int on tos
__ fild_s(at_rsp()); // load int to ST0
__ f2ieee(); // truncate to float size
- __ popl(rcx); // adjust rsp
+ __ pop(rcx); // adjust rsp
break;
case Bytecodes::_i2d:
- __ pushl(rax); // add one slot for d2ieee()
- __ pushl(rax); // store int on tos
+ __ push(rax); // add one slot for d2ieee()
+ __ push(rax); // store int on tos
__ fild_s(at_rsp()); // load int to ST0
__ d2ieee(); // truncate to double size
- __ popl(rcx); // adjust rsp
- __ popl(rcx);
+ __ pop(rcx); // adjust rsp
+ __ pop(rcx);
break;
case Bytecodes::_i2b:
__ shll(rax, 24); // truncate upper 24 bits
__ sarl(rax, 24); // and sign-extend byte
+ LP64_ONLY(__ movsbl(rax, rax));
break;
case Bytecodes::_i2c:
__ andl(rax, 0xFFFF); // truncate upper 16 bits
+ LP64_ONLY(__ movzwl(rax, rax));
break;
case Bytecodes::_i2s:
__ shll(rax, 16); // truncate upper 16 bits
__ sarl(rax, 16); // and sign-extend short
+ LP64_ONLY(__ movswl(rax, rax));
break;
case Bytecodes::_l2i:
/* nothing to do */
break;
case Bytecodes::_l2f:
- __ pushl(rdx); // store long on tos
- __ pushl(rax);
+ __ push(rdx); // store long on tos
+ __ push(rax);
__ fild_d(at_rsp()); // load long to ST0
__ f2ieee(); // truncate to float size
- __ popl(rcx); // adjust rsp
- __ popl(rcx);
+ __ pop(rcx); // adjust rsp
+ __ pop(rcx);
break;
case Bytecodes::_l2d:
- __ pushl(rdx); // store long on tos
- __ pushl(rax);
+ __ push(rdx); // store long on tos
+ __ push(rax);
__ fild_d(at_rsp()); // load long to ST0
__ d2ieee(); // truncate to double size
- __ popl(rcx); // adjust rsp
- __ popl(rcx);
+ __ pop(rcx); // adjust rsp
+ __ pop(rcx);
break;
case Bytecodes::_f2i:
- __ pushl(rcx); // reserve space for argument
+ __ push(rcx); // reserve space for argument
__ fstp_s(at_rsp()); // pass float argument on stack
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2i), 1);
break;
case Bytecodes::_f2l:
- __ pushl(rcx); // reserve space for argument
+ __ push(rcx); // reserve space for argument
__ fstp_s(at_rsp()); // pass float argument on stack
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::f2l), 1);
break;
@@ -1438,21 +1445,21 @@ void TemplateTable::convert() {
/* nothing to do */
break;
case Bytecodes::_d2i:
- __ pushl(rcx); // reserve space for argument
- __ pushl(rcx);
+ __ push(rcx); // reserve space for argument
+ __ push(rcx);
__ fstp_d(at_rsp()); // pass double argument on stack
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2i), 2);
break;
case Bytecodes::_d2l:
- __ pushl(rcx); // reserve space for argument
- __ pushl(rcx);
+ __ push(rcx); // reserve space for argument
+ __ push(rcx);
__ fstp_d(at_rsp()); // pass double argument on stack
__ call_VM_leaf(CAST_FROM_FN_PTR(address, SharedRuntime::d2l), 2);
break;
case Bytecodes::_d2f:
- __ pushl(rcx); // reserve space for f2ieee()
+ __ push(rcx); // reserve space for f2ieee()
__ f2ieee(); // truncate to float size
- __ popl(rcx); // adjust rsp
+ __ pop(rcx); // adjust rsp
break;
default :
ShouldNotReachHere();
@@ -1465,7 +1472,7 @@ void TemplateTable::lcmp() {
// y = rdx:rax
__ pop_l(rbx, rcx); // get x = rcx:rbx
__ lcmp2int(rcx, rbx, rdx, rax);// rcx := cmp(x, y)
- __ movl(rax, rcx);
+ __ mov(rax, rcx);
}
@@ -1476,9 +1483,9 @@ void TemplateTable::float_cmp(bool is_float, int unordered_result) {
} else {
__ pop_dtos_to_rsp();
__ fld_d(at_rsp());
- __ popl(rdx);
+ __ pop(rdx);
}
- __ popl(rcx);
+ __ pop(rcx);
__ fcmp2int(rax, unordered_result < 0);
}
@@ -1493,8 +1500,10 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// Load up EDX with the branch displacement
__ movl(rdx, at_bcp(1));
- __ bswap(rdx);
+ __ bswapl(rdx);
if (!is_wide) __ sarl(rdx, 16);
+ LP64_ONLY(__ movslq(rdx, rdx));
+
// Handle all the JSR stuff here, then exit.
// It's much shorter and cleaner than intermingling with the
@@ -1504,10 +1513,10 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1, 0));
// compute return address as bci in rax,
- __ leal(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(constMethodOopDesc::codes_offset())));
- __ subl(rax, Address(rcx, methodOopDesc::const_offset()));
+ __ lea(rax, at_bcp((is_wide ? 5 : 3) - in_bytes(constMethodOopDesc::codes_offset())));
+ __ subptr(rax, Address(rcx, methodOopDesc::const_offset()));
// Adjust the bcp in ESI by the displacement in EDX
- __ addl(rsi, rdx);
+ __ addptr(rsi, rdx);
// Push return address
__ push_i(rax);
// jsr returns vtos
@@ -1518,7 +1527,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// Normal (non-jsr) branch handling
// Adjust the bcp in ESI by the displacement in EDX
- __ addl(rsi, rdx);
+ __ addptr(rsi, rdx);
assert(UseLoopCounter || !UseOnStackReplacement, "on-stack-replacement requires loop counters");
Label backedge_counter_overflow;
@@ -1537,7 +1546,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// increment counter
__ movl(rax, Address(rcx, be_offset)); // load backedge counter
- __ increment(rax, InvocationCounter::count_increment); // increment counter
+ __ incrementl(rax, InvocationCounter::count_increment); // increment counter
__ movl(Address(rcx, be_offset), rax); // store counter
__ movl(rax, Address(rcx, inv_offset)); // load invocation counter
@@ -1565,7 +1574,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// routine while the method is being compiled, add a second test to make
// sure the overflow function is called only once every overflow_frequency.
const int overflow_frequency = 1024;
- __ andl(rbx, overflow_frequency-1);
+ __ andptr(rbx, overflow_frequency-1);
__ jcc(Assembler::zero, backedge_counter_overflow);
}
@@ -1596,14 +1605,14 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ bind(profile_method);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::profile_method), rsi);
__ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
- __ movl(rcx, Address(rbp, method_offset));
- __ movl(rcx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
- __ movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
+ __ movptr(rcx, Address(rbp, method_offset));
+ __ movptr(rcx, Address(rcx, in_bytes(methodOopDesc::method_data_offset())));
+ __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
__ test_method_data_pointer(rcx, dispatch);
// offset non-null mdp by MDO::data_offset() + IR::profile_method()
- __ addl(rcx, in_bytes(methodDataOopDesc::data_offset()));
- __ addl(rcx, rax);
- __ movl(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
+ __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset()));
+ __ addptr(rcx, rax);
+ __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize), rcx);
__ jmp(dispatch);
}
@@ -1611,8 +1620,8 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// invocation counter overflow
__ bind(backedge_counter_overflow);
- __ negl(rdx);
- __ addl(rdx, rsi); // branch bcp
+ __ negptr(rdx);
+ __ addptr(rdx, rsi); // branch bcp
call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::frequency_counter_overflow), rdx);
__ load_unsigned_byte(rbx, Address(rsi, 0)); // restore target bytecode
@@ -1621,7 +1630,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// rdx: scratch
// rdi: locals pointer
// rsi: bcp
- __ testl(rax, rax); // test result
+ __ testptr(rax, rax); // test result
__ jcc(Assembler::zero, dispatch); // no osr if null
// nmethod may have been invalidated (VM may block upon call_VM return)
__ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
@@ -1632,19 +1641,19 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// We need to prepare to execute the OSR method. First we must
// migrate the locals and monitors off of the stack.
- __ movl(rbx, rax); // save the nmethod
+ __ mov(rbx, rax); // save the nmethod
const Register thread = rcx;
__ get_thread(thread);
call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
// rax, is OSR buffer, move it to expected parameter location
- __ movl(rcx, rax);
+ __ mov(rcx, rax);
// pop the interpreter frame
- __ movl(rdx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
+ __ movptr(rdx, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
__ leave(); // remove frame anchor
- __ popl(rdi); // get return address
- __ movl(rsp, rdx); // set sp to sender sp
+ __ pop(rdi); // get return address
+ __ mov(rsp, rdx); // set sp to sender sp
Label skip;
@@ -1663,29 +1672,29 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ jcc(Assembler::notEqual, chkint);
// yes adjust to the specialized call stub return.
- assert(StubRoutines::i486::get_call_stub_compiled_return() != NULL, "must be set");
- __ lea(rdi, ExternalAddress(StubRoutines::i486::get_call_stub_compiled_return()));
+ assert(StubRoutines::x86::get_call_stub_compiled_return() != NULL, "must be set");
+ __ lea(rdi, ExternalAddress(StubRoutines::x86::get_call_stub_compiled_return()));
__ jmp(skip);
__ bind(chkint);
// Are we returning to the interpreter? Look for sentinel
- __ cmpl(Address(rdi, -8), Interpreter::return_sentinel);
+ __ cmpl(Address(rdi, -2*wordSize), Interpreter::return_sentinel);
__ jcc(Assembler::notEqual, skip);
// Adjust to compiled return back to interpreter
- __ movl(rdi, Address(rdi, -4));
+ __ movptr(rdi, Address(rdi, -wordSize));
__ bind(skip);
// Align stack pointer for compiled code (note that caller is
// responsible for undoing this fixup by remembering the old SP
// in an rbp,-relative location)
- __ andl(rsp, -(StackAlignmentInBytes));
+ __ andptr(rsp, -(StackAlignmentInBytes));
// push the (possibly adjusted) return address
- __ pushl(rdi);
+ __ push(rdi);
// and begin the OSR nmethod
__ jmp(Address(rbx, nmethod::osr_entry_point_offset()));
@@ -1723,7 +1732,7 @@ void TemplateTable::if_nullcmp(Condition cc) {
transition(atos, vtos);
// assume branch is more often taken than not (loops use backward branches)
Label not_taken;
- __ testl(rax, rax);
+ __ testptr(rax, rax);
__ jcc(j_not(cc), not_taken);
branch(false, false);
__ bind(not_taken);
@@ -1736,7 +1745,7 @@ void TemplateTable::if_acmp(Condition cc) {
// assume branch is more often taken than not (loops use backward branches)
Label not_taken;
__ pop_ptr(rdx);
- __ cmpl(rdx, rax);
+ __ cmpptr(rdx, rax);
__ jcc(j_not(cc), not_taken);
branch(false, false);
__ bind(not_taken);
@@ -1747,12 +1756,12 @@ void TemplateTable::if_acmp(Condition cc) {
void TemplateTable::ret() {
transition(vtos, vtos);
locals_index(rbx);
- __ movl(rbx, iaddress(rbx)); // get return bci, compute return bcp
+ __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
__ profile_ret(rbx, rcx);
__ get_method(rax);
- __ movl(rsi, Address(rax, methodOopDesc::const_offset()));
- __ leal(rsi, Address(rsi, rbx, Address::times_1,
- constMethodOopDesc::codes_offset()));
+ __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
+ __ lea(rsi, Address(rsi, rbx, Address::times_1,
+ constMethodOopDesc::codes_offset()));
__ dispatch_next(vtos);
}
@@ -1760,11 +1769,11 @@ void TemplateTable::ret() {
void TemplateTable::wide_ret() {
transition(vtos, vtos);
locals_index_wide(rbx);
- __ movl(rbx, iaddress(rbx)); // get return bci, compute return bcp
+ __ movptr(rbx, iaddress(rbx)); // get return bci, compute return bcp
__ profile_ret(rbx, rcx);
__ get_method(rax);
- __ movl(rsi, Address(rax, methodOopDesc::const_offset()));
- __ leal(rsi, Address(rsi, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
+ __ movptr(rsi, Address(rax, methodOopDesc::const_offset()));
+ __ lea(rsi, Address(rsi, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
__ dispatch_next(vtos);
}
@@ -1773,13 +1782,13 @@ void TemplateTable::tableswitch() {
Label default_case, continue_execution;
transition(itos, vtos);
// align rsi
- __ leal(rbx, at_bcp(wordSize));
- __ andl(rbx, -wordSize);
+ __ lea(rbx, at_bcp(wordSize));
+ __ andptr(rbx, -wordSize);
// load lo & hi
__ movl(rcx, Address(rbx, 1 * wordSize));
__ movl(rdx, Address(rbx, 2 * wordSize));
- __ bswap(rcx);
- __ bswap(rdx);
+ __ bswapl(rcx);
+ __ bswapl(rdx);
// check against lo & hi
__ cmpl(rax, rcx);
__ jccb(Assembler::less, default_case);
@@ -1787,13 +1796,13 @@ void TemplateTable::tableswitch() {
__ jccb(Assembler::greater, default_case);
// lookup dispatch offset
__ subl(rax, rcx);
- __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * wordSize));
+ __ movl(rdx, Address(rbx, rax, Address::times_4, 3 * BytesPerInt));
__ profile_switch_case(rax, rbx, rcx);
// continue execution
__ bind(continue_execution);
- __ bswap(rdx);
+ __ bswapl(rdx);
__ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
- __ addl(rsi, rdx);
+ __ addptr(rsi, rdx);
__ dispatch_only(vtos);
// handle default
__ bind(default_case);
@@ -1812,21 +1821,21 @@ void TemplateTable::lookupswitch() {
void TemplateTable::fast_linearswitch() {
transition(itos, vtos);
Label loop_entry, loop, found, continue_execution;
- // bswap rax, so we can avoid bswapping the table entries
- __ bswap(rax);
+ // bswapl rax, so we can avoid bswapping the table entries
+ __ bswapl(rax);
// align rsi
- __ leal(rbx, at_bcp(wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
- __ andl(rbx, -wordSize);
+ __ lea(rbx, at_bcp(wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
+ __ andptr(rbx, -wordSize);
// set counter
__ movl(rcx, Address(rbx, wordSize));
- __ bswap(rcx);
+ __ bswapl(rcx);
__ jmpb(loop_entry);
// table search
__ bind(loop);
__ cmpl(rax, Address(rbx, rcx, Address::times_8, 2 * wordSize));
__ jccb(Assembler::equal, found);
__ bind(loop_entry);
- __ decrement(rcx);
+ __ decrementl(rcx);
__ jcc(Assembler::greaterEqual, loop);
// default case
__ profile_switch_default(rax);
@@ -1838,9 +1847,9 @@ void TemplateTable::fast_linearswitch() {
__ profile_switch_case(rcx, rax, rbx);
// continue execution
__ bind(continue_execution);
- __ bswap(rdx);
+ __ bswapl(rdx);
__ load_unsigned_byte(rbx, Address(rsi, rdx, Address::times_1));
- __ addl(rsi, rdx);
+ __ addptr(rsi, rdx);
__ dispatch_only(vtos);
}
@@ -1882,13 +1891,13 @@ void TemplateTable::fast_binaryswitch() {
// setup array
__ save_bcp();
- __ leal(array, at_bcp(3*wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
- __ andl(array, -wordSize);
+ __ lea(array, at_bcp(3*wordSize)); // btw: should be able to get rid of this instruction (change offsets below)
+ __ andptr(array, -wordSize);
// initialize i & j
__ xorl(i, i); // i = 0;
__ movl(j, Address(array, -wordSize)); // j = length(array);
// Convert j into native byteordering
- __ bswap(j);
+ __ bswapl(j);
// and start
Label entry;
__ jmp(entry);
@@ -1906,19 +1915,19 @@ void TemplateTable::fast_binaryswitch() {
// }
// Convert array[h].match to native byte-ordering before compare
__ movl(temp, Address(array, h, Address::times_8, 0*wordSize));
- __ bswap(temp);
+ __ bswapl(temp);
__ cmpl(key, temp);
if (VM_Version::supports_cmov()) {
__ cmovl(Assembler::less , j, h); // j = h if (key < array[h].fast_match())
__ cmovl(Assembler::greaterEqual, i, h); // i = h if (key >= array[h].fast_match())
} else {
Label set_i, end_of_if;
- __ jccb(Assembler::greaterEqual, set_i); // {
- __ movl(j, h); // j = h;
- __ jmp(end_of_if); // }
- __ bind(set_i); // else {
- __ movl(i, h); // i = h;
- __ bind(end_of_if); // }
+ __ jccb(Assembler::greaterEqual, set_i); // {
+ __ mov(j, h); // j = h;
+ __ jmp(end_of_if); // }
+ __ bind(set_i); // else {
+ __ mov(i, h); // i = h;
+ __ bind(end_of_if); // }
}
// while (i+1 < j)
__ bind(entry);
@@ -1931,30 +1940,32 @@ void TemplateTable::fast_binaryswitch() {
Label default_case;
// Convert array[i].match to native byte-ordering before compare
__ movl(temp, Address(array, i, Address::times_8, 0*wordSize));
- __ bswap(temp);
+ __ bswapl(temp);
__ cmpl(key, temp);
__ jcc(Assembler::notEqual, default_case);
// entry found -> j = offset
__ movl(j , Address(array, i, Address::times_8, 1*wordSize));
__ profile_switch_case(i, key, array);
- __ bswap(j);
+ __ bswapl(j);
+ LP64_ONLY(__ movslq(j, j));
__ restore_bcp();
__ restore_locals(); // restore rdi
__ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
- __ addl(rsi, j);
+ __ addptr(rsi, j);
__ dispatch_only(vtos);
// default case -> j = default offset
__ bind(default_case);
__ profile_switch_default(i);
__ movl(j, Address(array, -2*wordSize));
- __ bswap(j);
+ __ bswapl(j);
+ LP64_ONLY(__ movslq(j, j));
__ restore_bcp();
__ restore_locals(); // restore rdi
__ load_unsigned_byte(rbx, Address(rsi, j, Address::times_1));
- __ addl(rsi, j);
+ __ addptr(rsi, j);
__ dispatch_only(vtos);
}
@@ -1965,8 +1976,8 @@ void TemplateTable::_return(TosState state) {
if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
assert(state == vtos, "only valid state");
- __ movl(rax, aaddress(0));
- __ movl(rdi, Address(rax, oopDesc::klass_offset_in_bytes()));
+ __ movptr(rax, aaddress(0));
+ __ movptr(rdi, Address(rax, oopDesc::klass_offset_in_bytes()));
__ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
__ testl(rdi, JVM_ACC_HAS_FINALIZER);
Label skip_register_finalizer;
@@ -2007,10 +2018,10 @@ void TemplateTable::_return(TosState state) {
// requirement (1) but miss the volatile-store-volatile-load case. This final
// case is placed after volatile-stores although it could just as well go
// before volatile-loads.
-void TemplateTable::volatile_barrier( ) {
+void TemplateTable::volatile_barrier(Assembler::Membar_mask_bits order_constraint ) {
// Helper function to insert a is-volatile test and memory barrier
if( !os::is_MP() ) return; // Not needed on single CPU
- __ membar();
+ __ membar(order_constraint);
}
void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Register index) {
@@ -2023,10 +2034,13 @@ void TemplateTable::resolve_cache_and_index(int byte_no, Register Rcache, Regist
const int shift_count = (1 + byte_no)*BitsPerByte;
Label resolved;
__ get_cache_and_index_at_bcp(Rcache, index, 1);
- __ movl(temp, Address(Rcache, index, Address::times_4, constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
+ __ movl(temp, Address(Rcache,
+ index,
+ Address::times_ptr,
+ constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::indices_offset()));
__ shrl(temp, shift_count);
// have we resolved this bytecode?
- __ andl(temp, 0xFF);
+ __ andptr(temp, 0xFF);
__ cmpl(temp, (int)bytecode());
__ jcc(Assembler::equal, resolved);
@@ -2062,16 +2076,16 @@ void TemplateTable::load_field_cp_cache_entry(Register obj,
ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
// Field offset
- __ movl(off, Address(cache, index, Address::times_4,
- in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())));
+ __ movptr(off, Address(cache, index, Address::times_ptr,
+ in_bytes(cp_base_offset + ConstantPoolCacheEntry::f2_offset())));
// Flags
- __ movl(flags, Address(cache, index, Address::times_4,
+ __ movl(flags, Address(cache, index, Address::times_ptr,
in_bytes(cp_base_offset + ConstantPoolCacheEntry::flags_offset())));
// klass overwrite register
if (is_static) {
- __ movl(obj, Address(cache, index, Address::times_4,
- in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())));
+ __ movptr(obj, Address(cache, index, Address::times_ptr,
+ in_bytes(cp_base_offset + ConstantPoolCacheEntry::f1_offset())));
}
}
@@ -2104,12 +2118,11 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
resolve_cache_and_index(byte_no, cache, index);
- assert(wordSize == 4, "adjust code below");
- __ movl(method, Address(cache, index, Address::times_4, method_offset));
+ __ movptr(method, Address(cache, index, Address::times_ptr, method_offset));
if (itable_index != noreg) {
- __ movl(itable_index, Address(cache, index, Address::times_4, index_offset));
+ __ movptr(itable_index, Address(cache, index, Address::times_ptr, index_offset));
}
- __ movl(flags , Address(cache, index, Address::times_4, flags_offset ));
+ __ movl(flags , Address(cache, index, Address::times_ptr, flags_offset ));
}
@@ -2129,11 +2142,11 @@ void TemplateTable::jvmti_post_field_access(Register cache,
__ jcc(Assembler::zero, L1);
// cache entry pointer
- __ addl(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
+ __ addptr(cache, in_bytes(constantPoolCacheOopDesc::base_offset()));
__ shll(index, LogBytesPerWord);
- __ addl(cache, index);
+ __ addptr(cache, index);
if (is_static) {
- __ movl(rax, 0); // NULL object reference
+ __ xorptr(rax, rax); // NULL object reference
} else {
__ pop(atos); // Get the object
__ verify_oop(rax);
@@ -2177,7 +2190,7 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
__ shrl(flags, ConstantPoolCacheEntry::tosBits);
assert(btos == 0, "change code, btos != 0");
// btos
- __ andl(flags, 0x0f);
+ __ andptr(flags, 0x0f);
__ jcc(Assembler::notZero, notByte);
__ load_signed_byte(rax, lo );
@@ -2245,10 +2258,10 @@ void TemplateTable::getfield_or_static(int byte_no, bool is_static) {
// Generate code as if volatile. There just aren't enough registers to
// save that information and this code is faster than the test.
__ fild_d(lo); // Must load atomically
- __ subl(rsp,2*wordSize); // Make space for store
+ __ subptr(rsp,2*wordSize); // Make space for store
__ fistp_d(Address(rsp,0));
- __ popl(rax);
- __ popl(rdx);
+ __ pop(rax);
+ __ pop(rdx);
__ push(ltos);
// Don't rewrite to _fast_lgetfield for potential volatile case.
@@ -2319,16 +2332,16 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is
if (is_static) {
// Life is simple. Null out the object pointer.
- __ xorl(rbx, rbx);
+ __ xorptr(rbx, rbx);
} else {
// Life is harder. The stack holds the value on top, followed by the object.
// We don't know the size of the value, though; it could be one or two words
// depending on its type. As a result, we must find the type to determine where
// the object is.
Label two_word, valsize_known;
- __ movl(rcx, Address(rax, rdx, Address::times_4, in_bytes(cp_base_offset +
+ __ movl(rcx, Address(rax, rdx, Address::times_ptr, in_bytes(cp_base_offset +
ConstantPoolCacheEntry::flags_offset())));
- __ movl(rbx, rsp);
+ __ mov(rbx, rsp);
__ shrl(rcx, ConstantPoolCacheEntry::tosBits);
// Make sure we don't need to mask rcx for tosBits after the above shift
ConstantPoolCacheEntry::verify_tosBits();
@@ -2336,22 +2349,22 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is
__ jccb(Assembler::equal, two_word);
__ cmpl(rcx, dtos);
__ jccb(Assembler::equal, two_word);
- __ addl(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
+ __ addptr(rbx, Interpreter::expr_offset_in_bytes(1)); // one word jvalue (not ltos, dtos)
__ jmpb(valsize_known);
__ bind(two_word);
- __ addl(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
+ __ addptr(rbx, Interpreter::expr_offset_in_bytes(2)); // two words jvalue
__ bind(valsize_known);
// setup object pointer
- __ movl(rbx, Address(rbx, 0));
+ __ movptr(rbx, Address(rbx, 0));
}
// cache entry pointer
- __ addl(rax, in_bytes(cp_base_offset));
+ __ addptr(rax, in_bytes(cp_base_offset));
__ shll(rdx, LogBytesPerWord);
- __ addl(rax, rdx);
+ __ addptr(rax, rdx);
// object (tos)
- __ movl(rcx, rsp);
+ __ mov(rcx, rsp);
// rbx,: object pointer set up above (NULL if static)
// rax,: cache entry pointer
// rcx: jvalue object on the stack
@@ -2426,7 +2439,7 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(atos);
if (!is_static) pop_and_check_object(obj);
- __ movl(lo, rax );
+ __ movptr(lo, rax );
__ store_check(obj, lo); // Need to mark card
if (!is_static) {
patch_bytecode(Bytecodes::_fast_aputfield, rcx, rbx);
@@ -2472,12 +2485,14 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
if (!is_static) pop_and_check_object(obj);
// Replace with real volatile test
- __ pushl(rdx);
- __ pushl(rax); // Must update atomically with FIST
+ __ push(rdx);
+ __ push(rax); // Must update atomically with FIST
__ fild_d(Address(rsp,0)); // So load into FPU register
__ fistp_d(lo); // and put into memory atomically
- __ addl(rsp,2*wordSize);
- volatile_barrier();
+ __ addptr(rsp, 2*wordSize);
+ // volatile_barrier();
+ volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
+ Assembler::StoreStore));
// Don't rewrite volatile version
__ jmp(notVolatile);
@@ -2485,8 +2500,8 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
__ pop(ltos); // overwrites rdx
if (!is_static) pop_and_check_object(obj);
- __ movl(hi, rdx);
- __ movl(lo, rax);
+ NOT_LP64(__ movptr(hi, rdx));
+ __ movptr(lo, rax);
if (!is_static) {
patch_bytecode(Bytecodes::_fast_lputfield, rcx, rbx);
}
@@ -2527,7 +2542,8 @@ void TemplateTable::putfield_or_static(int byte_no, bool is_static) {
// Check for volatile store
__ testl(rdx, rdx);
__ jcc(Assembler::zero, notVolatile);
- volatile_barrier( );
+ volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
+ Assembler::StoreStore));
__ bind(notVolatile);
}
@@ -2552,10 +2568,10 @@ void TemplateTable::jvmti_post_fast_field_mod() {
__ pop_ptr(rbx); // copy the object pointer from tos
__ verify_oop(rbx);
__ push_ptr(rbx); // put the object pointer back on tos
- __ subl(rsp, sizeof(jvalue)); // add space for a jvalue object
- __ movl(rcx, rsp);
+ __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object
+ __ mov(rcx, rsp);
__ push_ptr(rbx); // save object pointer so we can steal rbx,
- __ movl(rbx, 0);
+ __ xorptr(rbx, rbx);
const Address lo_value(rcx, rbx, Address::times_1, 0*wordSize);
const Address hi_value(rcx, rbx, Address::times_1, 1*wordSize);
switch (bytecode()) { // load values into the jvalue object
@@ -2563,21 +2579,28 @@ void TemplateTable::jvmti_post_fast_field_mod() {
case Bytecodes::_fast_sputfield: __ movw(lo_value, rax); break;
case Bytecodes::_fast_cputfield: __ movw(lo_value, rax); break;
case Bytecodes::_fast_iputfield: __ movl(lo_value, rax); break;
- case Bytecodes::_fast_lputfield: __ movl(hi_value, rdx); __ movl(lo_value, rax); break;
+ case Bytecodes::_fast_lputfield:
+ NOT_LP64(__ movptr(hi_value, rdx));
+ __ movptr(lo_value, rax);
+ break;
+
// need to call fld_s() after fstp_s() to restore the value for below
case Bytecodes::_fast_fputfield: __ fstp_s(lo_value); __ fld_s(lo_value); break;
+
// need to call fld_d() after fstp_d() to restore the value for below
case Bytecodes::_fast_dputfield: __ fstp_d(lo_value); __ fld_d(lo_value); break;
+
// since rcx is not an object we don't call store_check() here
- case Bytecodes::_fast_aputfield: __ movl(lo_value, rax); break;
+ case Bytecodes::_fast_aputfield: __ movptr(lo_value, rax); break;
+
default: ShouldNotReachHere();
}
__ pop_ptr(rbx); // restore copy of object pointer
// Save rax, and sometimes rdx because call_VM() will clobber them,
// then use them for JVM/DI purposes
- __ pushl(rax);
- if (bytecode() == Bytecodes::_fast_lputfield) __ pushl(rdx);
+ __ push(rax);
+ if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
// access constant pool cache entry
__ get_cache_entry_pointer_at_bcp(rax, rdx, 1);
__ verify_oop(rbx);
@@ -2585,9 +2608,9 @@ void TemplateTable::jvmti_post_fast_field_mod() {
// rax,: cache entry pointer
// rcx: jvalue object on the stack
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::post_field_modification), rbx, rax, rcx);
- if (bytecode() == Bytecodes::_fast_lputfield) __ popl(rdx); // restore high value
- __ popl(rax); // restore lower value
- __ addl(rsp, sizeof(jvalue)); // release jvalue object space
+ if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx); // restore high value
+ __ pop(rax); // restore lower value
+ __ addptr(rsp, sizeof(jvalue)); // release jvalue object space
__ bind(L2);
}
}
@@ -2603,12 +2626,12 @@ void TemplateTable::fast_storefield(TosState state) {
__ get_cache_and_index_at_bcp(rcx, rbx, 1);
// test for volatile with rdx but rdx is tos register for lputfield.
- if (bytecode() == Bytecodes::_fast_lputfield) __ pushl(rdx);
- __ movl(rdx, Address(rcx, rbx, Address::times_4, in_bytes(base +
+ if (bytecode() == Bytecodes::_fast_lputfield) __ push(rdx);
+ __ movl(rdx, Address(rcx, rbx, Address::times_ptr, in_bytes(base +
ConstantPoolCacheEntry::flags_offset())));
// replace index with field offset from cache entry
- __ movl(rbx, Address(rcx, rbx, Address::times_4, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
+ __ movptr(rbx, Address(rcx, rbx, Address::times_ptr, in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
// Doug Lea believes this is not needed with current Sparcs (TSO) and Intel (PSO).
// volatile_barrier( );
@@ -2620,7 +2643,7 @@ void TemplateTable::fast_storefield(TosState state) {
__ testl(rdx, rdx);
__ jcc(Assembler::zero, notVolatile);
- if (bytecode() == Bytecodes::_fast_lputfield) __ popl(rdx);
+ if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
// Get object from stack
pop_and_check_object(rcx);
@@ -2635,22 +2658,26 @@ void TemplateTable::fast_storefield(TosState state) {
case Bytecodes::_fast_sputfield: // fall through
case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
- case Bytecodes::_fast_lputfield: __ movl(hi, rdx); __ movl(lo, rax); break;
+ case Bytecodes::_fast_lputfield:
+ NOT_LP64(__ movptr(hi, rdx));
+ __ movptr(lo, rax);
+ break;
case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
- case Bytecodes::_fast_aputfield: __ movl(lo, rax); __ store_check(rcx, lo); break;
+ case Bytecodes::_fast_aputfield: __ movptr(lo, rax); __ store_check(rcx, lo); break;
default:
ShouldNotReachHere();
}
Label done;
- volatile_barrier( );
+ volatile_barrier(Assembler::Membar_mask_bits(Assembler::StoreLoad |
+ Assembler::StoreStore));
__ jmpb(done);
// Same code as above, but don't need rdx to test for volatile.
__ bind(notVolatile);
- if (bytecode() == Bytecodes::_fast_lputfield) __ popl(rdx);
+ if (bytecode() == Bytecodes::_fast_lputfield) __ pop(rdx);
// Get object from stack
pop_and_check_object(rcx);
@@ -2661,10 +2688,13 @@ void TemplateTable::fast_storefield(TosState state) {
case Bytecodes::_fast_sputfield: // fall through
case Bytecodes::_fast_cputfield: __ movw(lo, rax); break;
case Bytecodes::_fast_iputfield: __ movl(lo, rax); break;
- case Bytecodes::_fast_lputfield: __ movl(hi, rdx); __ movl(lo, rax); break;
+ case Bytecodes::_fast_lputfield:
+ NOT_LP64(__ movptr(hi, rdx));
+ __ movptr(lo, rax);
+ break;
case Bytecodes::_fast_fputfield: __ fstp_s(lo); break;
case Bytecodes::_fast_dputfield: __ fstp_d(lo); break;
- case Bytecodes::_fast_aputfield: __ movl(lo, rax); __ store_check(rcx, lo); break;
+ case Bytecodes::_fast_aputfield: __ movptr(lo, rax); __ store_check(rcx, lo); break;
default:
ShouldNotReachHere();
}
@@ -2697,7 +2727,10 @@ void TemplateTable::fast_accessfield(TosState state) {
// access constant pool cache
__ get_cache_and_index_at_bcp(rcx, rbx, 1);
// replace index with field offset from cache entry
- __ movl(rbx, Address(rcx, rbx, Address::times_4, in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
+ __ movptr(rbx, Address(rcx,
+ rbx,
+ Address::times_ptr,
+ in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
// rax,: object
@@ -2709,14 +2742,14 @@ void TemplateTable::fast_accessfield(TosState state) {
// access field
switch (bytecode()) {
- case Bytecodes::_fast_bgetfield: __ movsxb(rax, lo ); break;
+ case Bytecodes::_fast_bgetfield: __ movsbl(rax, lo ); break;
case Bytecodes::_fast_sgetfield: __ load_signed_word(rax, lo ); break;
case Bytecodes::_fast_cgetfield: __ load_unsigned_word(rax, lo ); break;
case Bytecodes::_fast_igetfield: __ movl(rax, lo); break;
case Bytecodes::_fast_lgetfield: __ stop("should not be rewritten"); break;
case Bytecodes::_fast_fgetfield: __ fld_s(lo); break;
case Bytecodes::_fast_dgetfield: __ fld_d(lo); break;
- case Bytecodes::_fast_agetfield: __ movl(rax, lo); __ verify_oop(rax); break;
+ case Bytecodes::_fast_agetfield: __ movptr(rax, lo); __ verify_oop(rax); break;
default:
ShouldNotReachHere();
}
@@ -2728,11 +2761,14 @@ void TemplateTable::fast_accessfield(TosState state) {
void TemplateTable::fast_xaccess(TosState state) {
transition(vtos, state);
// get receiver
- __ movl(rax, aaddress(0));
+ __ movptr(rax, aaddress(0));
debug_only(__ verify_local_tag(frame::TagReference, 0));
// access constant pool cache
__ get_cache_and_index_at_bcp(rcx, rdx, 2);
- __ movl(rbx, Address(rcx, rdx, Address::times_4, in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
+ __ movptr(rbx, Address(rcx,
+ rdx,
+ Address::times_ptr,
+ in_bytes(constantPoolCacheOopDesc::base_offset() + ConstantPoolCacheEntry::f2_offset())));
// make sure exception is reported in correct bcp range (getfield is next instruction)
__ increment(rsi);
__ null_check(rax);
@@ -2740,7 +2776,7 @@ void TemplateTable::fast_xaccess(TosState state) {
if (state == itos) {
__ movl(rax, lo);
} else if (state == atos) {
- __ movl(rax, lo);
+ __ movptr(rax, lo);
__ verify_oop(rax);
} else if (state == ftos) {
__ fld_s(lo);
@@ -2784,7 +2820,7 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no,
__ movl(recv, flags);
__ andl(recv, 0xFF);
// recv count is 0 based?
- __ movl(recv, Address(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1)));
+ __ movptr(recv, Address(rsp, recv, Interpreter::stackElementScale(), -Interpreter::expr_offset_in_bytes(1)));
__ verify_oop(recv);
}
@@ -2794,7 +2830,7 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no,
}
if (save_flags) {
- __ movl(rsi, flags);
+ __ mov(rsi, flags);
}
// compute return type
@@ -2802,20 +2838,19 @@ void TemplateTable::prepare_invoke(Register method, Register index, int byte_no,
// Make sure we don't need to mask flags for tosBits after the above shift
ConstantPoolCacheEntry::verify_tosBits();
// load return address
- { const int table =
- is_invokeinterface
- ? (int)Interpreter::return_5_addrs_by_index_table()
- : (int)Interpreter::return_3_addrs_by_index_table();
- __ movl(flags, Address(noreg, flags, Address::times_4, table));
+ {
+ ExternalAddress table(is_invokeinterface ? (address)Interpreter::return_5_addrs_by_index_table() :
+ (address)Interpreter::return_3_addrs_by_index_table());
+ __ movptr(flags, ArrayAddress(table, Address(noreg, flags, Address::times_ptr)));
}
// push return address
- __ pushl(flags);
+ __ push(flags);
// Restore flag value from the constant pool cache, and restore rsi
// for later null checks. rsi is the bytecode pointer
if (save_flags) {
- __ movl(flags, rsi);
+ __ mov(flags, rsi);
__ restore_bcp();
}
}
@@ -2852,7 +2887,7 @@ void TemplateTable::invokevirtual_helper(Register index, Register recv,
// get receiver klass
__ null_check(recv, oopDesc::klass_offset_in_bytes());
// Keep recv in rcx for callee expects it there
- __ movl(rax, Address(recv, oopDesc::klass_offset_in_bytes()));
+ __ movptr(rax, Address(recv, oopDesc::klass_offset_in_bytes()));
__ verify_oop(rax);
// profile this call
@@ -2861,7 +2896,7 @@ void TemplateTable::invokevirtual_helper(Register index, Register recv,
// get target methodOop & entry point
const int base = instanceKlass::vtable_start_offset() * wordSize;
assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below");
- __ movl(method, Address(rax, index, Address::times_4, base + vtableEntry::method_offset_in_bytes()));
+ __ movptr(method, Address(rax, index, Address::times_ptr, base + vtableEntry::method_offset_in_bytes()));
__ jump_from_interpreted(method, rdx);
}
@@ -2927,19 +2962,19 @@ void TemplateTable::invokeinterface(int byte_no) {
// Get receiver klass into rdx - also a null check
__ restore_locals(); // restore rdi
- __ movl(rdx, Address(rcx, oopDesc::klass_offset_in_bytes()));
+ __ movptr(rdx, Address(rcx, oopDesc::klass_offset_in_bytes()));
__ verify_oop(rdx);
// profile this call
__ profile_virtual_call(rdx, rsi, rdi);
- __ movl(rdi, rdx); // Save klassOop in rdi
+ __ mov(rdi, rdx); // Save klassOop in rdi
// Compute start of first itableOffsetEntry (which is at the end of the vtable)
const int base = instanceKlass::vtable_start_offset() * wordSize;
- assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below");
+ assert(vtableEntry::size() * wordSize == (1 << (int)Address::times_ptr), "adjust the scaling in the code below");
__ movl(rsi, Address(rdx, instanceKlass::vtable_length_offset() * wordSize)); // Get length of vtable
- __ leal(rdx, Address(rdx, rsi, Address::times_4, base));
+ __ lea(rdx, Address(rdx, rsi, Address::times_4, base));
if (HeapWordsPerLong > 1) {
// Round up to align_object_offset boundary
__ round_to(rdx, BytesPerLong);
@@ -2949,20 +2984,20 @@ void TemplateTable::invokeinterface(int byte_no) {
__ jmpb(entry);
__ bind(search);
- __ addl(rdx, itableOffsetEntry::size() * wordSize);
+ __ addptr(rdx, itableOffsetEntry::size() * wordSize);
__ bind(entry);
// Check that the entry is non-null. A null entry means that the receiver
// class doesn't implement the interface, and wasn't the same as the
// receiver class checked when the interface was resolved.
- __ pushl(rdx);
- __ movl(rdx, Address(rdx, itableOffsetEntry::interface_offset_in_bytes()));
- __ testl(rdx, rdx);
+ __ push(rdx);
+ __ movptr(rdx, Address(rdx, itableOffsetEntry::interface_offset_in_bytes()));
+ __ testptr(rdx, rdx);
__ jcc(Assembler::notZero, interface_ok);
// throw exception
- __ popl(rdx); // pop saved register first.
- __ popl(rbx); // pop return address (pushed by prepare_invoke)
+ __ pop(rdx); // pop saved register first.
+ __ pop(rbx); // pop return address (pushed by prepare_invoke)
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
__ call_VM(noreg, CAST_FROM_FN_PTR(address,
@@ -2971,15 +3006,15 @@ void TemplateTable::invokeinterface(int byte_no) {
__ should_not_reach_here();
__ bind(interface_ok);
- __ popl(rdx);
+ __ pop(rdx);
- __ cmpl(rax, Address(rdx, itableOffsetEntry::interface_offset_in_bytes()));
+ __ cmpptr(rax, Address(rdx, itableOffsetEntry::interface_offset_in_bytes()));
__ jcc(Assembler::notEqual, search);
__ movl(rdx, Address(rdx, itableOffsetEntry::offset_offset_in_bytes()));
- __ addl(rdx, rdi); // Add offset to klassOop
- assert(itableMethodEntry::size() * wordSize == 4, "adjust the scaling in the code below");
- __ movl(rbx, Address(rdx, rbx, Address::times_4));
+ __ addptr(rdx, rdi); // Add offset to klassOop
+ assert(itableMethodEntry::size() * wordSize == (1 << (int)Address::times_ptr), "adjust the scaling in the code below");
+ __ movptr(rbx, Address(rdx, rbx, Address::times_ptr));
// rbx,: methodOop to call
// rcx: receiver
// Check for abstract method error
@@ -2987,12 +3022,12 @@ void TemplateTable::invokeinterface(int byte_no) {
// interpreter entry point and a conditional jump to it in case of a null
// method.
{ Label L;
- __ testl(rbx, rbx);
+ __ testptr(rbx, rbx);
__ jcc(Assembler::notZero, L);
// throw exception
// note: must restore interpreter registers to canonical
// state for exception handling to work correctly!
- __ popl(rbx); // pop return address (pushed by prepare_invoke)
+ __ pop(rbx); // pop return address (pushed by prepare_invoke)
__ restore_bcp(); // rsi must be correct for exception handler (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as well (was destroyed)
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::throw_AbstractMethodError));
@@ -3023,8 +3058,8 @@ void TemplateTable::_new() {
__ get_cpool_and_tags(rcx, rax);
// get instanceKlass
- __ movl(rcx, Address(rcx, rdx, Address::times_4, sizeof(constantPoolOopDesc)));
- __ pushl(rcx); // save the contexts of klass for initializing the header
+ __ movptr(rcx, Address(rcx, rdx, Address::times_ptr, sizeof(constantPoolOopDesc)));
+ __ push(rcx); // save the contexts of klass for initializing the header
// make sure the class we're about to instantiate has been resolved.
// Note: slow_case does a pop of stack, which is why we loaded class/pushed above
@@ -3057,11 +3092,11 @@ void TemplateTable::_new() {
const Register thread = rcx;
__ get_thread(thread);
- __ movl(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
- __ leal(rbx, Address(rax, rdx, Address::times_1));
- __ cmpl(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
+ __ movptr(rax, Address(thread, in_bytes(JavaThread::tlab_top_offset())));
+ __ lea(rbx, Address(rax, rdx, Address::times_1));
+ __ cmpptr(rbx, Address(thread, in_bytes(JavaThread::tlab_end_offset())));
__ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
- __ movl(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
+ __ movptr(Address(thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
if (ZeroTLAB) {
// the fields have been already cleared
__ jmp(initialize_header);
@@ -3079,9 +3114,9 @@ void TemplateTable::_new() {
Label retry;
__ bind(retry);
- __ mov32(rax, heap_top);
- __ leal(rbx, Address(rax, rdx, Address::times_1));
- __ cmp32(rbx, ExternalAddress((address)Universe::heap()->end_addr()));
+ __ movptr(rax, heap_top);
+ __ lea(rbx, Address(rax, rdx, Address::times_1));
+ __ cmpptr(rbx, ExternalAddress((address)Universe::heap()->end_addr()));
__ jcc(Assembler::above, slow_case);
// Compare rax, with the top addr, and if still equal, store the new
@@ -3091,8 +3126,7 @@ void TemplateTable::_new() {
// rax,: object begin
// rbx,: object end
// rdx: instance size in bytes
- if (os::is_MP()) __ lock();
- __ cmpxchgptr(rbx, heap_top);
+ __ locked_cmpxchgptr(rbx, heap_top);
// if someone beat us on the allocation, try again, otherwise continue
__ jcc(Assembler::notEqual, retry);
@@ -3124,8 +3158,8 @@ void TemplateTable::_new() {
// initialize remaining object fields: rdx was a multiple of 8
{ Label loop;
__ bind(loop);
- __ movl(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
- __ movl(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx);
+ __ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 1*oopSize), rcx);
+ NOT_LP64(__ movptr(Address(rax, rdx, Address::times_8, sizeof(oopDesc) - 2*oopSize), rcx));
__ decrement(rdx);
__ jcc(Assembler::notZero, loop);
}
@@ -3133,15 +3167,15 @@ void TemplateTable::_new() {
// initialize object header only.
__ bind(initialize_header);
if (UseBiasedLocking) {
- __ popl(rcx); // get saved klass back in the register.
- __ movl(rbx, Address(rcx, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
- __ movl(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
+ __ pop(rcx); // get saved klass back in the register.
+ __ movptr(rbx, Address(rcx, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
+ __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()), rbx);
} else {
- __ movl(Address(rax, oopDesc::mark_offset_in_bytes ()),
- (int)markOopDesc::prototype()); // header
- __ popl(rcx); // get saved klass back in the register.
+ __ movptr(Address(rax, oopDesc::mark_offset_in_bytes ()),
+ (int32_t)markOopDesc::prototype()); // header
+ __ pop(rcx); // get saved klass back in the register.
}
- __ movl(Address(rax, oopDesc::klass_offset_in_bytes()), rcx); // klass
+ __ movptr(Address(rax, oopDesc::klass_offset_in_bytes()), rcx); // klass
{
SkipIfEqual skip_if(_masm, &DTraceAllocProbes, 0);
@@ -3157,7 +3191,7 @@ void TemplateTable::_new() {
// slow case
__ bind(slow_case);
- __ popl(rcx); // restore stack pointer to what it was when we came in.
+ __ pop(rcx); // restore stack pointer to what it was when we came in.
__ get_constant_pool(rax);
__ get_unsigned_2_byte_index_at_bcp(rdx, 1);
call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::_new), rax, rdx);
@@ -3194,7 +3228,7 @@ void TemplateTable::arraylength() {
void TemplateTable::checkcast() {
transition(atos, atos);
Label done, is_null, ok_is_subtype, quicked, resolved;
- __ testl(rax, rax); // Object is in EAX
+ __ testptr(rax, rax); // Object is in EAX
__ jcc(Assembler::zero, is_null);
// Get cpool & tags index
@@ -3211,24 +3245,24 @@ void TemplateTable::checkcast() {
// Get superklass in EAX and subklass in EBX
__ bind(quicked);
- __ movl(rdx, rax); // Save object in EDX; EAX needed for subtype check
- __ movl(rax, Address(rcx, rbx, Address::times_4, sizeof(constantPoolOopDesc)));
+ __ mov(rdx, rax); // Save object in EDX; EAX needed for subtype check
+ __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
__ bind(resolved);
- __ movl(rbx, Address(rdx, oopDesc::klass_offset_in_bytes()));
+ __ movptr(rbx, Address(rdx, oopDesc::klass_offset_in_bytes()));
// Generate subtype check. Blows ECX. Resets EDI. Object in EDX.
// Superklass in EAX. Subklass in EBX.
__ gen_subtype_check( rbx, ok_is_subtype );
// Come here on failure
- __ pushl(rdx);
+ __ push(rdx);
// object is at TOS
__ jump(ExternalAddress(Interpreter::_throw_ClassCastException_entry));
// Come here on success
__ bind(ok_is_subtype);
- __ movl(rax,rdx); // Restore object in EDX
+ __ mov(rax,rdx); // Restore object in EDX
// Collect counts on whether this check-cast sees NULLs a lot or not.
if (ProfileInterpreter) {
@@ -3245,7 +3279,7 @@ void TemplateTable::checkcast() {
void TemplateTable::instanceof() {
transition(atos, itos);
Label done, is_null, ok_is_subtype, quicked, resolved;
- __ testl(rax, rax);
+ __ testptr(rax, rax);
__ jcc(Assembler::zero, is_null);
// Get cpool & tags index
@@ -3258,13 +3292,13 @@ void TemplateTable::instanceof() {
__ push(atos);
call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc) );
__ pop_ptr(rdx);
- __ movl(rdx, Address(rdx, oopDesc::klass_offset_in_bytes()));
+ __ movptr(rdx, Address(rdx, oopDesc::klass_offset_in_bytes()));
__ jmp(resolved);
// Get superklass in EAX and subklass in EDX
__ bind(quicked);
- __ movl(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
- __ movl(rax, Address(rcx, rbx, Address::times_4, sizeof(constantPoolOopDesc)));
+ __ movptr(rdx, Address(rax, oopDesc::klass_offset_in_bytes()));
+ __ movptr(rax, Address(rcx, rbx, Address::times_ptr, sizeof(constantPoolOopDesc)));
__ bind(resolved);
@@ -3306,7 +3340,7 @@ void TemplateTable::_breakpoint() {
// get the unpatched byte code
__ get_method(rcx);
__ call_VM(noreg, CAST_FROM_FN_PTR(address, InterpreterRuntime::get_original_bytecode_at), rcx, rsi);
- __ movl(rbx, rax);
+ __ mov(rbx, rax);
// post the breakpoint event
__ get_method(rcx);
@@ -3362,50 +3396,50 @@ void TemplateTable::monitorenter() {
// find a free slot in the monitor block (result in rdx)
{ Label entry, loop, exit;
- __ movl(rcx, monitor_block_top); // points to current entry, starting with top-most entry
- __ leal(rbx, monitor_block_bot); // points to word before bottom of monitor block
+ __ movptr(rcx, monitor_block_top); // points to current entry, starting with top-most entry
+ __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
__ jmpb(entry);
__ bind(loop);
- __ cmpl(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), NULL_WORD); // check if current entry is used
+ __ cmpptr(Address(rcx, BasicObjectLock::obj_offset_in_bytes()), (int32_t)NULL_WORD); // check if current entry is used
// TODO - need new func here - kbt
if (VM_Version::supports_cmov()) {
- __ cmovl(Assembler::equal, rdx, rcx); // if not used then remember entry in rdx
+ __ cmov(Assembler::equal, rdx, rcx); // if not used then remember entry in rdx
} else {
Label L;
__ jccb(Assembler::notEqual, L);
- __ movl(rdx, rcx); // if not used then remember entry in rdx
+ __ mov(rdx, rcx); // if not used then remember entry in rdx
__ bind(L);
}
- __ cmpl(rax, Address(rcx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
- __ jccb(Assembler::equal, exit); // if same object then stop searching
- __ addl(rcx, entry_size); // otherwise advance to next entry
+ __ cmpptr(rax, Address(rcx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
+ __ jccb(Assembler::equal, exit); // if same object then stop searching
+ __ addptr(rcx, entry_size); // otherwise advance to next entry
__ bind(entry);
- __ cmpl(rcx, rbx); // check if bottom reached
+ __ cmpptr(rcx, rbx); // check if bottom reached
__ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
__ bind(exit);
}
- __ testl(rdx, rdx); // check if a slot has been found
- __ jccb(Assembler::notZero, allocated); // if found, continue with that one
+ __ testptr(rdx, rdx); // check if a slot has been found
+ __ jccb(Assembler::notZero, allocated); // if found, continue with that one
// allocate one if there's no free slot
{ Label entry, loop;
// 1. compute new pointers // rsp: old expression stack top
- __ movl(rdx, monitor_block_bot); // rdx: old expression stack bottom
- __ subl(rsp, entry_size); // move expression stack top
- __ subl(rdx, entry_size); // move expression stack bottom
- __ movl(rcx, rsp); // set start value for copy loop
- __ movl(monitor_block_bot, rdx); // set new monitor block top
+ __ movptr(rdx, monitor_block_bot); // rdx: old expression stack bottom
+ __ subptr(rsp, entry_size); // move expression stack top
+ __ subptr(rdx, entry_size); // move expression stack bottom
+ __ mov(rcx, rsp); // set start value for copy loop
+ __ movptr(monitor_block_bot, rdx); // set new monitor block top
__ jmp(entry);
// 2. move expression stack contents
__ bind(loop);
- __ movl(rbx, Address(rcx, entry_size)); // load expression stack word from old location
- __ movl(Address(rcx, 0), rbx); // and store it at new location
- __ addl(rcx, wordSize); // advance to next word
+ __ movptr(rbx, Address(rcx, entry_size)); // load expression stack word from old location
+ __ movptr(Address(rcx, 0), rbx); // and store it at new location
+ __ addptr(rcx, wordSize); // advance to next word
__ bind(entry);
- __ cmpl(rcx, rdx); // check if bottom reached
+ __ cmpptr(rcx, rdx); // check if bottom reached
__ jcc(Assembler::notEqual, loop); // if not at bottom then copy next word
}
@@ -3417,7 +3451,7 @@ void TemplateTable::monitorenter() {
// The object has already been poped from the stack, so the expression stack looks correct.
__ increment(rsi);
- __ movl(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
+ __ movptr(Address(rdx, BasicObjectLock::obj_offset_in_bytes()), rax); // store object
__ lock_object(rdx);
// check to make sure this monitor doesn't cause stack overflow after locking
@@ -3442,16 +3476,16 @@ void TemplateTable::monitorexit() {
// find matching slot
{ Label entry, loop;
- __ movl(rdx, monitor_block_top); // points to current entry, starting with top-most entry
- __ leal(rbx, monitor_block_bot); // points to word before bottom of monitor block
+ __ movptr(rdx, monitor_block_top); // points to current entry, starting with top-most entry
+ __ lea(rbx, monitor_block_bot); // points to word before bottom of monitor block
__ jmpb(entry);
__ bind(loop);
- __ cmpl(rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
+ __ cmpptr(rax, Address(rdx, BasicObjectLock::obj_offset_in_bytes())); // check if current entry is for same object
__ jcc(Assembler::equal, found); // if same object then stop searching
- __ addl(rdx, entry_size); // otherwise advance to next entry
+ __ addptr(rdx, entry_size); // otherwise advance to next entry
__ bind(entry);
- __ cmpl(rdx, rbx); // check if bottom reached
+ __ cmpptr(rdx, rbx); // check if bottom reached
__ jcc(Assembler::notEqual, loop); // if not at bottom then check this entry
}
@@ -3476,7 +3510,8 @@ void TemplateTable::monitorexit() {
void TemplateTable::wide() {
transition(vtos, vtos);
__ load_unsigned_byte(rbx, at_bcp(1));
- __ jmp(Address(noreg, rbx, Address::times_4, int(Interpreter::_wentry_point)));
+ ExternalAddress wtable((address)Interpreter::_wentry_point);
+ __ jump(ArrayAddress(wtable, Address(noreg, rbx, Address::times_ptr)));
// Note: the rsi increment step is part of the individual wide bytecode implementations
}
@@ -3490,10 +3525,10 @@ void TemplateTable::multianewarray() {
// last dim is on top of stack; we want address of first one:
// first_addr = last_addr + (ndims - 1) * stackElementSize - 1*wordsize
// the latter wordSize to point to the beginning of the array.
- __ leal( rax, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
+ __ lea( rax, Address(rsp, rax, Interpreter::stackElementScale(), -wordSize));
call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray), rax); // pass in rax,
__ load_unsigned_byte(rbx, at_bcp(3));
- __ leal(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts
+ __ lea(rsp, Address(rsp, rbx, Interpreter::stackElementScale())); // get rid of counts
}
#endif /* !CC_INTERP */
diff --git a/src/cpu/x86/vm/templateTable_x86_32.hpp b/src/cpu/x86/vm/templateTable_x86_32.hpp
index 58f092356..f02b9b1ea 100644
--- a/src/cpu/x86/vm/templateTable_x86_32.hpp
+++ b/src/cpu/x86/vm/templateTable_x86_32.hpp
@@ -26,7 +26,7 @@
Bytecodes::Code code);
static void invokevirtual_helper(Register index, Register recv,
Register flags);
- static void volatile_barrier( );
+ static void volatile_barrier(Assembler::Membar_mask_bits order_constraint );
// Helpers
static void index_check(Register array, Register index);
diff --git a/src/cpu/x86/vm/templateTable_x86_64.cpp b/src/cpu/x86/vm/templateTable_x86_64.cpp
index c831b0cdc..b239d635b 100644
--- a/src/cpu/x86/vm/templateTable_x86_64.cpp
+++ b/src/cpu/x86/vm/templateTable_x86_64.cpp
@@ -25,6 +25,8 @@
#include "incls/_precompiled.incl"
#include "incls/_templateTable_x86_64.cpp.incl"
+#ifndef CC_INTERP
+
#define __ _masm->
// Platform-dependent initialization
@@ -317,7 +319,7 @@ void TemplateTable::ldc(bool wide) {
__ jmp(Done);
__ bind(isOop);
- __ movq(rax, Address(rcx, rbx, Address::times_8, base_offset));
+ __ movptr(rax, Address(rcx, rbx, Address::times_8, base_offset));
__ push_ptr(rax);
if (VerifyOops) {
@@ -355,8 +357,8 @@ void TemplateTable::ldc2_w() {
void TemplateTable::locals_index(Register reg, int offset) {
__ load_unsigned_byte(reg, at_bcp(offset));
- __ negq(reg);
- if (TaggedStackInterpreter) __ shlq(reg, 1); // index = index*2
+ __ negptr(reg);
+ if (TaggedStackInterpreter) __ shlptr(reg, 1); // index = index*2
}
void TemplateTable::iload() {
@@ -443,7 +445,7 @@ void TemplateTable::dload() {
void TemplateTable::aload() {
transition(vtos, atos);
locals_index(rbx);
- __ movq(rax, aaddress(rbx));
+ __ movptr(rax, aaddress(rbx));
debug_only(__ verify_local_tag(frame::TagReference, rbx));
}
@@ -451,8 +453,8 @@ void TemplateTable::locals_index_wide(Register reg) {
__ movl(reg, at_bcp(2));
__ bswapl(reg);
__ shrl(reg, 16);
- __ negq(reg);
- if (TaggedStackInterpreter) __ shlq(reg, 1); // index = index*2
+ __ negptr(reg);
+ if (TaggedStackInterpreter) __ shlptr(reg, 1); // index = index*2
}
void TemplateTable::wide_iload() {
@@ -486,7 +488,7 @@ void TemplateTable::wide_dload() {
void TemplateTable::wide_aload() {
transition(vtos, atos);
locals_index_wide(rbx);
- __ movq(rax, aaddress(rbx));
+ __ movptr(rax, aaddress(rbx));
debug_only(__ verify_local_tag(frame::TagReference, rbx));
}
@@ -495,7 +497,7 @@ void TemplateTable::index_check(Register array, Register index) {
// check array
__ null_check(array, arrayOopDesc::length_offset_in_bytes());
// sign extend index for use by indexed load
- __ movslq(index, index);
+ __ movl2ptr(index, index);
// check index
__ cmpl(index, Address(array, arrayOopDesc::length_offset_in_bytes()));
if (index != rbx) {
@@ -642,7 +644,7 @@ void TemplateTable::dload(int n) {
void TemplateTable::aload(int n) {
transition(vtos, atos);
- __ movq(rax, aaddress(n));
+ __ movptr(rax, aaddress(n));
debug_only(__ verify_local_tag(frame::TagReference, n));
}
@@ -757,7 +759,7 @@ void TemplateTable::astore() {
transition(vtos, vtos);
__ pop_ptr(rax, rdx); // will need to pop tag too
locals_index(rbx);
- __ movq(aaddress(rbx), rax);
+ __ movptr(aaddress(rbx), rax);
__ tag_local(rdx, rbx); // store tag from stack, might be returnAddr
}
@@ -797,7 +799,7 @@ void TemplateTable::wide_astore() {
transition(vtos, vtos);
__ pop_ptr(rax, rdx); // will need to pop tag too
locals_index_wide(rbx);
- __ movq(aaddress(rbx), rax);
+ __ movptr(aaddress(rbx), rax);
__ tag_local(rdx, rbx); // store tag from stack, might be returnAddr
}
@@ -861,25 +863,25 @@ void TemplateTable::aastore() {
Label is_null, ok_is_subtype, done;
transition(vtos, vtos);
// stack: ..., array, index, value
- __ movq(rax, at_tos()); // value
+ __ movptr(rax, at_tos()); // value
__ movl(rcx, at_tos_p1()); // index
- __ movq(rdx, at_tos_p2()); // array
+ __ movptr(rdx, at_tos_p2()); // array
index_check(rdx, rcx); // kills rbx
// do array store check - check for NULL value first
- __ testq(rax, rax);
+ __ testptr(rax, rax);
__ jcc(Assembler::zero, is_null);
// Move subklass into rbx
__ load_klass(rbx, rax);
// Move superklass into rax
__ load_klass(rax, rdx);
- __ movq(rax, Address(rax,
- sizeof(oopDesc) +
- objArrayKlass::element_klass_offset_in_bytes()));
+ __ movptr(rax, Address(rax,
+ sizeof(oopDesc) +
+ objArrayKlass::element_klass_offset_in_bytes()));
// Compress array + index*oopSize + 12 into a single register. Frees rcx.
- __ leaq(rdx, Address(rdx, rcx,
- UseCompressedOops ? Address::times_4 : Address::times_8,
- arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
+ __ lea(rdx, Address(rdx, rcx,
+ UseCompressedOops ? Address::times_4 : Address::times_8,
+ arrayOopDesc::base_offset_in_bytes(T_OBJECT)));
// Generate subtype check. Blows rcx, rdi
// Superklass in rax. Subklass in rbx.
@@ -891,7 +893,7 @@ void TemplateTable::aastore() {
// Come here on success
__ bind(ok_is_subtype);
- __ movq(rax, at_tos()); // Value
+ __ movptr(rax, at_tos()); // Value
__ store_heap_oop(Address(rdx, 0), rax);
__ store_check(rdx);
__ jmp(done);
@@ -906,7 +908,7 @@ void TemplateTable::aastore() {
// Pop stack arguments
__ bind(done);
- __ addq(rsp, 3 * Interpreter::stackElementSize());
+ __ addptr(rsp, 3 * Interpreter::stackElementSize());
}
void TemplateTable::bastore() {
@@ -968,18 +970,18 @@ void TemplateTable::dstore(int n) {
void TemplateTable::astore(int n) {
transition(vtos, vtos);
__ pop_ptr(rax, rdx);
- __ movq(aaddress(n), rax);
+ __ movptr(aaddress(n), rax);
__ tag_local(rdx, n);
}
void TemplateTable::pop() {
transition(vtos, vtos);
- __ addq(rsp, Interpreter::stackElementSize());
+ __ addptr(rsp, Interpreter::stackElementSize());
}
void TemplateTable::pop2() {
transition(vtos, vtos);
- __ addq(rsp, 2 * Interpreter::stackElementSize());
+ __ addptr(rsp, 2 * Interpreter::stackElementSize());
}
void TemplateTable::dup() {
@@ -1090,11 +1092,11 @@ void TemplateTable::iop2(Operation op) {
void TemplateTable::lop2(Operation op) {
transition(ltos, ltos);
switch (op) {
- case add : __ pop_l(rdx); __ addq (rax, rdx); break;
- case sub : __ movq(rdx, rax); __ pop_l(rax); __ subq (rax, rdx); break;
- case _and : __ pop_l(rdx); __ andq (rax, rdx); break;
- case _or : __ pop_l(rdx); __ orq (rax, rdx); break;
- case _xor : __ pop_l(rdx); __ xorq (rax, rdx); break;
+ case add : __ pop_l(rdx); __ addptr (rax, rdx); break;
+ case sub : __ mov(rdx, rax); __ pop_l(rax); __ subptr (rax, rdx); break;
+ case _and : __ pop_l(rdx); __ andptr (rax, rdx); break;
+ case _or : __ pop_l(rdx); __ orptr (rax, rdx); break;
+ case _xor : __ pop_l(rdx); __ xorptr (rax, rdx); break;
default : ShouldNotReachHere();
}
}
@@ -1130,7 +1132,7 @@ void TemplateTable::lmul() {
void TemplateTable::ldiv() {
transition(ltos, ltos);
- __ movq(rcx, rax);
+ __ mov(rcx, rax);
__ pop_l(rax);
// generate explicit div0 check
__ testq(rcx, rcx);
@@ -1145,7 +1147,7 @@ void TemplateTable::ldiv() {
void TemplateTable::lrem() {
transition(ltos, ltos);
- __ movq(rcx, rax);
+ __ mov(rcx, rax);
__ pop_l(rax);
__ testq(rcx, rcx);
__ jump_cc(Assembler::zero,
@@ -1155,7 +1157,7 @@ void TemplateTable::lrem() {
// needed), which may speed up this implementation for the common case.
// (see also JVM spec., p.243 & p.271)
__ corrected_idivq(rcx); // kills rbx
- __ movq(rax, rdx);
+ __ mov(rax, rdx);
}
void TemplateTable::lshl() {
@@ -1184,7 +1186,7 @@ void TemplateTable::fop2(Operation op) {
switch (op) {
case add:
__ addss(xmm0, at_rsp());
- __ addq(rsp, Interpreter::stackElementSize());
+ __ addptr(rsp, Interpreter::stackElementSize());
break;
case sub:
__ movflt(xmm1, xmm0);
@@ -1193,7 +1195,7 @@ void TemplateTable::fop2(Operation op) {
break;
case mul:
__ mulss(xmm0, at_rsp());
- __ addq(rsp, Interpreter::stackElementSize());
+ __ addptr(rsp, Interpreter::stackElementSize());
break;
case div:
__ movflt(xmm1, xmm0);
@@ -1216,7 +1218,7 @@ void TemplateTable::dop2(Operation op) {
switch (op) {
case add:
__ addsd(xmm0, at_rsp());
- __ addq(rsp, 2 * Interpreter::stackElementSize());
+ __ addptr(rsp, 2 * Interpreter::stackElementSize());
break;
case sub:
__ movdbl(xmm1, xmm0);
@@ -1225,7 +1227,7 @@ void TemplateTable::dop2(Operation op) {
break;
case mul:
__ mulsd(xmm0, at_rsp());
- __ addq(rsp, 2 * Interpreter::stackElementSize());
+ __ addptr(rsp, 2 * Interpreter::stackElementSize());
break;
case div:
__ movdbl(xmm1, xmm0);
@@ -1486,7 +1488,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
if (!is_wide) {
__ sarl(rdx, 16);
}
- __ movslq(rdx, rdx);
+ __ movl2ptr(rdx, rdx);
// Handle all the JSR stuff here, then exit.
// It's much shorter and cleaner than intermingling with the non-JSR
@@ -1496,11 +1498,11 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
__ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1, 0));
// compute return address as bci in rax
- __ leaq(rax, at_bcp((is_wide ? 5 : 3) -
+ __ lea(rax, at_bcp((is_wide ? 5 : 3) -
in_bytes(constMethodOopDesc::codes_offset())));
- __ subq(rax, Address(rcx, methodOopDesc::const_offset()));
+ __ subptr(rax, Address(rcx, methodOopDesc::const_offset()));
// Adjust the bcp in r13 by the displacement in rdx
- __ addq(r13, rdx);
+ __ addptr(r13, rdx);
// jsr returns atos that is not an oop
__ push_i(rax);
__ dispatch_only(vtos);
@@ -1510,7 +1512,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// Normal (non-jsr) branch handling
// Adjust the bcp in r13 by the displacement in rdx
- __ addq(r13, rdx);
+ __ addptr(r13, rdx);
assert(UseLoopCounter || !UseOnStackReplacement,
"on-stack-replacement requires loop counters");
@@ -1594,25 +1596,25 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
CAST_FROM_FN_PTR(address,
InterpreterRuntime::profile_method), r13);
__ load_unsigned_byte(rbx, Address(r13, 0)); // restore target bytecode
- __ movq(rcx, Address(rbp, method_offset));
- __ movq(rcx, Address(rcx,
- in_bytes(methodOopDesc::method_data_offset())));
- __ movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
- rcx);
+ __ movptr(rcx, Address(rbp, method_offset));
+ __ movptr(rcx, Address(rcx,
+ in_bytes(methodOopDesc::method_data_offset())));
+ __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
+ rcx);
__ test_method_data_pointer(rcx, dispatch);
// offset non-null mdp by MDO::data_offset() + IR::profile_method()
- __ addq(rcx, in_bytes(methodDataOopDesc::data_offset()));
- __ addq(rcx, rax);
- __ movq(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
- rcx);
+ __ addptr(rcx, in_bytes(methodDataOopDesc::data_offset()));
+ __ addptr(rcx, rax);
+ __ movptr(Address(rbp, frame::interpreter_frame_mdx_offset * wordSize),
+ rcx);
__ jmp(dispatch);
}
if (UseOnStackReplacement) {
// invocation counter overflow
__ bind(backedge_counter_overflow);
- __ negq(rdx);
- __ addq(rdx, r13); // branch bcp
+ __ negptr(rdx);
+ __ addptr(rdx, r13); // branch bcp
// IcoResult frequency_counter_overflow([JavaThread*], address branch_bcp)
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
@@ -1625,7 +1627,7 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// rdx: scratch
// r14: locals pointer
// r13: bcp
- __ testq(rax, rax); // test result
+ __ testptr(rax, rax); // test result
__ jcc(Assembler::zero, dispatch); // no osr if null
// nmethod may have been invalidated (VM may block upon call_VM return)
__ movl(rcx, Address(rax, nmethod::entry_bci_offset()));
@@ -1636,12 +1638,12 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
// We need to prepare to execute the OSR method. First we must
// migrate the locals and monitors off of the stack.
- __ movq(r13, rax); // save the nmethod
+ __ mov(r13, rax); // save the nmethod
call_VM(noreg, CAST_FROM_FN_PTR(address, SharedRuntime::OSR_migration_begin));
// eax is OSR buffer, move it to expected parameter location
- __ movq(j_rarg0, rax);
+ __ mov(j_rarg0, rax);
// We use j_rarg definitions here so that registers don't conflict as parameter
// registers change across platforms as we are in the midst of a calling
@@ -1651,18 +1653,18 @@ void TemplateTable::branch(bool is_jsr, bool is_wide) {
const Register sender_sp = j_rarg1;
// pop the interpreter frame
- __ movq(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
+ __ movptr(sender_sp, Address(rbp, frame::interpreter_frame_sender_sp_offset * wordSize)); // get sender sp
__ leave(); // remove frame anchor
- __ popq(retaddr); // get return address
- __ movq(rsp, sender_sp); // set sp to sender sp
+ __ pop(retaddr); // get return address
+ __ mov(rsp, sender_sp); // set sp to sender sp
// Ensure compiled code always sees stack at proper alignment
- __ andq(rsp, -(StackAlignmentInBytes));
+ __ andptr(rsp, -(StackAlignmentInBytes));
// unlike x86 we need no specialized return from compiled code
// to the interpreter or the call stub.
// push the return address
- __ pushq(retaddr);
+ __ push(retaddr);
// and begin the OSR nmethod
__ jmp(Address(r13, nmethod::osr_entry_point_offset()));
@@ -1698,7 +1700,7 @@ void TemplateTable::if_nullcmp(Condition cc) {
transition(atos, vtos);
// assume branch is more often taken than not (loops use backward branches)
Label not_taken;
- __ testq(rax, rax);
+ __ testptr(rax, rax);
__ jcc(j_not(cc), not_taken);
branch(false, false);
__ bind(not_taken);
@@ -1710,7 +1712,7 @@ void TemplateTable::if_acmp(Condition cc) {
// assume branch is more often taken than not (loops use backward branches)
Label not_taken;
__ pop_ptr(rdx);
- __ cmpq(rdx, rax);
+ __ cmpptr(rdx, rax);
__ jcc(j_not(cc), not_taken);
branch(false, false);
__ bind(not_taken);
@@ -1720,23 +1722,23 @@ void TemplateTable::if_acmp(Condition cc) {
void TemplateTable::ret() {
transition(vtos, vtos);
locals_index(rbx);
- __ movq(rbx, aaddress(rbx)); // get return bci, compute return bcp
+ __ movslq(rbx, iaddress(rbx)); // get return bci, compute return bcp
__ profile_ret(rbx, rcx);
__ get_method(rax);
- __ movq(r13, Address(rax, methodOopDesc::const_offset()));
- __ leaq(r13, Address(r13, rbx, Address::times_1,
- constMethodOopDesc::codes_offset()));
+ __ movptr(r13, Address(rax, methodOopDesc::const_offset()));
+ __ lea(r13, Address(r13, rbx, Address::times_1,
+ constMethodOopDesc::codes_offset()));
__ dispatch_next(vtos);
}
void TemplateTable::wide_ret() {
transition(vtos, vtos);
locals_index_wide(rbx);
- __ movq(rbx, aaddress(rbx)); // get return bci, compute return bcp
+ __ movptr(rbx, aaddress(rbx)); // get return bci, compute return bcp
__ profile_ret(rbx, rcx);
__ get_method(rax);
- __ movq(r13, Address(rax, methodOopDesc::const_offset()));
- __ leaq(r13, Address(r13, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
+ __ movptr(r13, Address(rax, methodOopDesc::const_offset()));
+ __ lea(r13, Address(r13, rbx, Address::times_1, constMethodOopDesc::codes_offset()));
__ dispatch_next(vtos);
}
@@ -1744,8 +1746,8 @@ void TemplateTable::tableswitch() {
Label default_case, continue_execution;
transition(itos, vtos);
// align r13
- __ leaq(rbx, at_bcp(BytesPerInt));
- __ andq(rbx, -BytesPerInt);
+ __ lea(rbx, at_bcp(BytesPerInt));
+ __ andptr(rbx, -BytesPerInt);
// load lo & hi
__ movl(rcx, Address(rbx, BytesPerInt));
__ movl(rdx, Address(rbx, 2 * BytesPerInt));
@@ -1763,9 +1765,9 @@ void TemplateTable::tableswitch() {
// continue execution
__ bind(continue_execution);
__ bswapl(rdx);
- __ movslq(rdx, rdx);
+ __ movl2ptr(rdx, rdx);
__ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
- __ addq(r13, rdx);
+ __ addptr(r13, rdx);
__ dispatch_only(vtos);
// handle default
__ bind(default_case);
@@ -1785,10 +1787,10 @@ void TemplateTable::fast_linearswitch() {
// bswap rax so we can avoid bswapping the table entries
__ bswapl(rax);
// align r13
- __ leaq(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
- // this instruction (change offsets
- // below)
- __ andq(rbx, -BytesPerInt);
+ __ lea(rbx, at_bcp(BytesPerInt)); // btw: should be able to get rid of
+ // this instruction (change offsets
+ // below)
+ __ andptr(rbx, -BytesPerInt);
// set counter
__ movl(rcx, Address(rbx, BytesPerInt));
__ bswapl(rcx);
@@ -1811,9 +1813,9 @@ void TemplateTable::fast_linearswitch() {
// continue execution
__ bind(continue_execution);
__ bswapl(rdx);
- __ movslq(rdx, rdx);
+ __ movl2ptr(rdx, rdx);
__ load_unsigned_byte(rbx, Address(r13, rdx, Address::times_1));
- __ addq(r13, rdx);
+ __ addptr(r13, rdx);
__ dispatch_only(vtos);
}
@@ -1853,11 +1855,11 @@ void TemplateTable::fast_binaryswitch() {
const Register temp = rsi;
// Find array start
- __ leaq(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
- // get rid of this
- // instruction (change
- // offsets below)
- __ andq(array, -BytesPerInt);
+ __ lea(array, at_bcp(3 * BytesPerInt)); // btw: should be able to
+ // get rid of this
+ // instruction (change
+ // offsets below)
+ __ andptr(array, -BytesPerInt);
// Initialize i & j
__ xorl(i, i); // i = 0;
@@ -1909,9 +1911,9 @@ void TemplateTable::fast_binaryswitch() {
__ movl(j , Address(array, i, Address::times_8, BytesPerInt));
__ profile_switch_case(i, key, array);
__ bswapl(j);
- __ movslq(j, j);
+ __ movl2ptr(j, j);
__ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
- __ addq(r13, j);
+ __ addptr(r13, j);
__ dispatch_only(vtos);
// default case -> j = default offset
@@ -1919,9 +1921,9 @@ void TemplateTable::fast_binaryswitch() {
__ profile_switch_default(i);
__ movl(j, Address(array, -2 * BytesPerInt));
__ bswapl(j);
- __ movslq(j, j);
+ __ movl2ptr(j, j);
__ load_unsigned_byte(rbx, Address(r13, j, Address::times_1));
- __ addq(r13, j);
+ __ addptr(r13, j);
__ dispatch_only(vtos);
}
@@ -1933,7 +1935,7 @@ void TemplateTable::_return(TosState state) {
if (_desc->bytecode() == Bytecodes::_return_register_finalizer) {
assert(state == vtos, "only valid state");
- __ movq(c_rarg1, aaddress(0));
+ __ movptr(c_rarg1, aaddress(0));
__ load_klass(rdi, c_rarg1);
__ movl(rdi, Address(rdi, Klass::access_flags_offset_in_bytes() + sizeof(oopDesc)));
__ testl(rdi, JVM_ACC_HAS_FINALIZER);
@@ -2044,9 +2046,9 @@ void TemplateTable::load_field_cp_cache_entry(Register obj,
ByteSize cp_base_offset = constantPoolCacheOopDesc::base_offset();
// Field offset
- __ movq(off, Address(cache, index, Address::times_8,
- in_bytes(cp_base_offset +
- ConstantPoolCacheEntry::f2_offset())));
+ __ movptr(off, Address(cache, index, Address::times_8,
+ in_bytes(cp_base_offset +
+ ConstantPoolCacheEntry::f2_offset())));
// Flags
__ movl(flags, Address(cache, index, Address::times_8,
in_bytes(cp_base_offset +
@@ -2054,9 +2056,9 @@ void TemplateTable::load_field_cp_cache_entry(Register obj,
// klass overwrite register
if (is_static) {
- __ movq(obj, Address(cache, index, Address::times_8,
- in_bytes(cp_base_offset +
- ConstantPoolCacheEntry::f1_offset())));
+ __ movptr(obj, Address(cache, index, Address::times_8,
+ in_bytes(cp_base_offset +
+ ConstantPoolCacheEntry::f1_offset())));
}
}
@@ -2088,9 +2090,9 @@ void TemplateTable::load_invoke_cp_cache_entry(int byte_no,
resolve_cache_and_index(byte_no, cache, index);
assert(wordSize == 8, "adjust code below");
- __ movq(method, Address(cache, index, Address::times_8, method_offset));
+ __ movptr(method, Address(cache, index, Address::times_8, method_offset));
if (itable_index != noreg) {
- __ movq(itable_index,
+ __ movptr(itable_index,
Address(cache, index, Address::times_8, index_offset));
}
__ movl(flags , Address(cache, index, Address::times_8, flags_offset));
@@ -2116,13 +2118,13 @@ void TemplateTable::jvmti_post_field_access(Register cache, Register index,
__ get_cache_and_index_at_bcp(c_rarg2, c_rarg3, 1);
// cache entry pointer
- __ addq(c_rarg2, in_bytes(constantPoolCacheOopDesc::base_offset()));
+ __ addptr(c_rarg2, in_bytes(constantPoolCacheOopDesc::base_offset()));
__ shll(c_rarg3, LogBytesPerWord);
- __ addq(c_rarg2, c_rarg3);
+ __ addptr(c_rarg2, c_rarg3);
if (is_static) {
__ xorl(c_rarg1, c_rarg1); // NULL object reference
} else {
- __ movq(c_rarg1, at_tos()); // get object pointer without popping it
+ __ movptr(c_rarg1, at_tos()); // get object pointer without popping it
__ verify_oop(c_rarg1);
}
// c_rarg1: object pointer or NULL
@@ -2319,20 +2321,20 @@ void TemplateTable::jvmti_post_field_mod(Register cache, Register index, bool is
// Make sure we don't need to mask rcx for tosBits after the
// above shift
ConstantPoolCacheEntry::verify_tosBits();
- __ movq(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
+ __ movptr(c_rarg1, at_tos_p1()); // initially assume a one word jvalue
__ cmpl(c_rarg3, ltos);
- __ cmovq(Assembler::equal,
- c_rarg1, at_tos_p2()); // ltos (two word jvalue)
+ __ cmovptr(Assembler::equal,
+ c_rarg1, at_tos_p2()); // ltos (two word jvalue)
__ cmpl(c_rarg3, dtos);
- __ cmovq(Assembler::equal,
- c_rarg1, at_tos_p2()); // dtos (two word jvalue)
+ __ cmovptr(Assembler::equal,
+ c_rarg1, at_tos_p2()); // dtos (two word jvalue)
}
// cache entry pointer
- __ addq(c_rarg2, in_bytes(cp_base_offset));
+ __ addptr(c_rarg2, in_bytes(cp_base_offset));
__ shll(rscratch1, LogBytesPerWord);
- __ addq(c_rarg2, rscratch1);
+ __ addptr(c_rarg2, rscratch1);
// object (tos)
- __ movq(c_rarg3, rsp);
+ __ mov(c_rarg3, rsp);
// c_rarg1: object pointer set up above (NULL if static)
// c_rarg2: cache entry pointer
// c_rarg3: jvalue object on the stack
@@ -2510,8 +2512,8 @@ void TemplateTable::jvmti_post_fast_field_mod() {
__ pop_ptr(rbx); // copy the object pointer from tos
__ verify_oop(rbx);
__ push_ptr(rbx); // put the object pointer back on tos
- __ subq(rsp, sizeof(jvalue)); // add space for a jvalue object
- __ movq(c_rarg3, rsp);
+ __ subptr(rsp, sizeof(jvalue)); // add space for a jvalue object
+ __ mov(c_rarg3, rsp);
const Address field(c_rarg3, 0);
switch (bytecode()) { // load values into the jvalue object
@@ -2529,7 +2531,7 @@ void TemplateTable::jvmti_post_fast_field_mod() {
// Save rax because call_VM() will clobber it, then use it for
// JVMTI purposes
- __ pushq(rax);
+ __ push(rax);
// access constant pool cache entry
__ get_cache_entry_pointer_at_bcp(c_rarg2, rax, 1);
__ verify_oop(rbx);
@@ -2540,8 +2542,8 @@ void TemplateTable::jvmti_post_fast_field_mod() {
CAST_FROM_FN_PTR(address,
InterpreterRuntime::post_field_modification),
rbx, c_rarg2, c_rarg3);
- __ popq(rax); // restore lower value
- __ addq(rsp, sizeof(jvalue)); // release jvalue object space
+ __ pop(rax); // restore lower value
+ __ addptr(rsp, sizeof(jvalue)); // release jvalue object space
__ bind(L2);
}
}
@@ -2562,8 +2564,8 @@ void TemplateTable::fast_storefield(TosState state) {
ConstantPoolCacheEntry::flags_offset())));
// replace index with field offset from cache entry
- __ movq(rbx, Address(rcx, rbx, Address::times_8,
- in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
+ __ movptr(rbx, Address(rcx, rbx, Address::times_8,
+ in_bytes(base + ConstantPoolCacheEntry::f2_offset())));
// [jk] not needed currently
// volatile_barrier(Assembler::Membar_mask_bits(Assembler::LoadStore |
@@ -2632,15 +2634,15 @@ void TemplateTable::fast_accessfield(TosState state) {
// access constant pool cache entry
__ get_cache_entry_pointer_at_bcp(c_rarg2, rcx, 1);
__ verify_oop(rax);
- __ movq(r12, rax); // save object pointer before call_VM() clobbers it
- __ movq(c_rarg1, rax);
+ __ mov(r12, rax); // save object pointer before call_VM() clobbers it
+ __ mov(c_rarg1, rax);
// c_rarg1: object pointer copied above
// c_rarg2: cache entry pointer
__ call_VM(noreg,
CAST_FROM_FN_PTR(address,
InterpreterRuntime::post_field_access),
c_rarg1, c_rarg2);
- __ movq(rax, r12); // restore object pointer
+ __ mov(rax, r12); // restore object pointer
__ reinit_heapbase();
__ bind(L1);
}
@@ -2656,9 +2658,9 @@ void TemplateTable::fast_accessfield(TosState state) {
// __ shrl(rdx, ConstantPoolCacheEntry::volatileField);
// __ andl(rdx, 0x1);
// }
- __ movq(rbx, Address(rcx, rbx, Address::times_8,
- in_bytes(constantPoolCacheOopDesc::base_offset() +
- ConstantPoolCacheEntry::f2_offset())));
+ __ movptr(rbx, Address(rcx, rbx, Address::times_8,
+ in_bytes(constantPoolCacheOopDesc::base_offset() +
+ ConstantPoolCacheEntry::f2_offset())));
// rax: object
__ verify_oop(rax);
@@ -2709,17 +2711,17 @@ void TemplateTable::fast_xaccess(TosState state) {
transition(vtos, state);
// get receiver
- __ movq(rax, aaddress(0));
+ __ movptr(rax, aaddress(0));
debug_only(__ verify_local_tag(frame::TagReference, 0));
// access constant pool cache
__ get_cache_and_index_at_bcp(rcx, rdx, 2);
- __ movq(rbx,
- Address(rcx, rdx, Address::times_8,
- in_bytes(constantPoolCacheOopDesc::base_offset() +
- ConstantPoolCacheEntry::f2_offset())));
+ __ movptr(rbx,
+ Address(rcx, rdx, Address::times_8,
+ in_bytes(constantPoolCacheOopDesc::base_offset() +
+ ConstantPoolCacheEntry::f2_offset())));
// make sure exception is reported in correct bcp range (getfield is
// next instruction)
- __ incrementq(r13);
+ __ increment(r13);
__ null_check(rax);
switch (state) {
case itos:
@@ -2749,7 +2751,7 @@ void TemplateTable::fast_xaccess(TosState state) {
// __ bind(notVolatile);
// }
- __ decrementq(r13);
+ __ decrement(r13);
}
@@ -2788,7 +2790,7 @@ void TemplateTable::prepare_invoke(Register method,
__ movl(recv, flags);
__ andl(recv, 0xFF);
if (TaggedStackInterpreter) __ shll(recv, 1); // index*2
- __ movq(recv, Address(rsp, recv, Address::times_8,
+ __ movptr(recv, Address(rsp, recv, Address::times_8,
-Interpreter::expr_offset_in_bytes(1)));
__ verify_oop(recv);
}
@@ -2811,11 +2813,11 @@ void TemplateTable::prepare_invoke(Register method,
ExternalAddress return_5((address)Interpreter::return_5_addrs_by_index_table());
ExternalAddress return_3((address)Interpreter::return_3_addrs_by_index_table());
__ lea(rscratch1, (is_invokeinterface ? return_5 : return_3));
- __ movq(flags, Address(rscratch1, flags, Address::times_8));
+ __ movptr(flags, Address(rscratch1, flags, Address::times_8));
}
// push return address
- __ pushq(flags);
+ __ push(flags);
// Restore flag field from the constant pool cache, and restore esi
// for later null checks. r13 is the bytecode pointer
@@ -2867,10 +2869,10 @@ void TemplateTable::invokevirtual_helper(Register index,
const int base = instanceKlass::vtable_start_offset() * wordSize;
assert(vtableEntry::size() * wordSize == 8,
"adjust the scaling in the code below");
- __ movq(method, Address(rax, index,
+ __ movptr(method, Address(rax, index,
Address::times_8,
base + vtableEntry::method_offset_in_bytes()));
- __ movq(rdx, Address(method, methodOopDesc::interpreter_entry_offset()));
+ __ movptr(rdx, Address(method, methodOopDesc::interpreter_entry_offset()));
__ jump_from_interpreted(method, rdx);
}
@@ -2940,7 +2942,7 @@ void TemplateTable::invokeinterface(int byte_no) {
// profile this call
__ profile_virtual_call(rdx, r13, r14);
- __ movq(r14, rdx); // Save klassOop in r14
+ __ mov(r14, rdx); // Save klassOop in r14
// Compute start of first itableOffsetEntry (which is at the end of
// the vtable)
@@ -2950,18 +2952,18 @@ void TemplateTable::invokeinterface(int byte_no) {
"adjust the scaling in the code below");
__ movl(r13, Address(rdx,
instanceKlass::vtable_length_offset() * wordSize));
- __ leaq(rdx, Address(rdx, r13, Address::times_8, base));
+ __ lea(rdx, Address(rdx, r13, Address::times_8, base));
if (HeapWordsPerLong > 1) {
// Round up to align_object_offset boundary
- __ round_to_q(rdx, BytesPerLong);
+ __ round_to(rdx, BytesPerLong);
}
Label entry, search, interface_ok;
__ jmpb(entry);
__ bind(search);
- __ addq(rdx, itableOffsetEntry::size() * wordSize);
+ __ addptr(rdx, itableOffsetEntry::size() * wordSize);
__ bind(entry);
@@ -2969,13 +2971,13 @@ void TemplateTable::invokeinterface(int byte_no) {
// receiver class doesn't implement the interface, and wasn't the
// same as the receiver class checked when the interface was
// resolved.
- __ pushq(rdx);
- __ movq(rdx, Address(rdx, itableOffsetEntry::interface_offset_in_bytes()));
- __ testq(rdx, rdx);
+ __ push(rdx);
+ __ movptr(rdx, Address(rdx, itableOffsetEntry::interface_offset_in_bytes()));
+ __ testptr(rdx, rdx);
__ jcc(Assembler::notZero, interface_ok);
// throw exception
- __ popq(rdx); // pop saved register first.
- __ popq(rbx); // pop return address (pushed by prepare_invoke)
+ __ pop(rdx); // pop saved register first.
+ __ pop(rbx); // pop return address (pushed by prepare_invoke)
__ restore_bcp(); // r13 must be correct for exception handler (was
// destroyed)
__ restore_locals(); // make sure locals pointer is correct as well
@@ -2986,17 +2988,17 @@ void TemplateTable::invokeinterface(int byte_no) {
__ should_not_reach_here();
__ bind(interface_ok);
- __ popq(rdx);
+ __ pop(rdx);
- __ cmpq(rax, Address(rdx, itableOffsetEntry::interface_offset_in_bytes()));
+ __ cmpptr(rax, Address(rdx, itableOffsetEntry::interface_offset_in_bytes()));
__ jcc(Assembler::notEqual, search);
__ movl(rdx, Address(rdx, itableOffsetEntry::offset_offset_in_bytes()));
- __ addq(rdx, r14); // Add offset to klassOop
+ __ addptr(rdx, r14); // Add offset to klassOop
assert(itableMethodEntry::size() * wordSize == 8,
"adjust the scaling in the code below");
- __ movq(rbx, Address(rdx, rbx, Address::times_8));
+ __ movptr(rbx, Address(rdx, rbx, Address::times_8));
// rbx: methodOop to call
// rcx: receiver
// Check for abstract method error
@@ -3005,12 +3007,12 @@ void TemplateTable::invokeinterface(int byte_no) {
// conditional jump to it in case of a null method.
{
Label L;
- __ testq(rbx, rbx);
+ __ testptr(rbx, rbx);
__ jcc(Assembler::notZero, L);
// throw exception
// note: must restore interpreter registers to canonical
// state for exception handling to work correctly!
- __ popq(rbx); // pop return address (pushed by prepare_invoke)
+ __ pop(rbx); // pop return address (pushed by prepare_invoke)
__ restore_bcp(); // r13 must be correct for exception handler
// (was destroyed)
__ restore_locals(); // make sure locals pointer is correct as
@@ -3023,7 +3025,7 @@ void TemplateTable::invokeinterface(int byte_no) {
__ bind(L);
}
- __ movq(rcx, Address(rbx, methodOopDesc::interpreter_entry_offset()));
+ __ movptr(rcx, Address(rbx, methodOopDesc::interpreter_entry_offset()));
// do the call
// rcx: receiver
@@ -3047,8 +3049,8 @@ void TemplateTable::_new() {
__ get_cpool_and_tags(rsi, rax);
// get instanceKlass
- __ movq(rsi, Address(rsi, rdx,
- Address::times_8, sizeof(constantPoolOopDesc)));
+ __ movptr(rsi, Address(rsi, rdx,
+ Address::times_8, sizeof(constantPoolOopDesc)));
// make sure the class we're about to instantiate has been
// resolved. Note: slow_case does a pop of stack, which is why we
@@ -3084,11 +3086,11 @@ void TemplateTable::_new() {
Universe::heap()->supports_inline_contig_alloc() && !CMSIncrementalMode;
if (UseTLAB) {
- __ movq(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
- __ leaq(rbx, Address(rax, rdx, Address::times_1));
- __ cmpq(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
+ __ movptr(rax, Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())));
+ __ lea(rbx, Address(rax, rdx, Address::times_1));
+ __ cmpptr(rbx, Address(r15_thread, in_bytes(JavaThread::tlab_end_offset())));
__ jcc(Assembler::above, allow_shared_alloc ? allocate_shared : slow_case);
- __ movq(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
+ __ movptr(Address(r15_thread, in_bytes(JavaThread::tlab_top_offset())), rbx);
if (ZeroTLAB) {
// the fields have been already cleared
__ jmp(initialize_header);
@@ -3109,13 +3111,13 @@ void TemplateTable::_new() {
__ lea(RtopAddr, top);
__ lea(RendAddr, end);
- __ movq(rax, Address(RtopAddr, 0));
+ __ movptr(rax, Address(RtopAddr, 0));
// For retries rax gets set by cmpxchgq
Label retry;
__ bind(retry);
- __ leaq(rbx, Address(rax, rdx, Address::times_1));
- __ cmpq(rbx, Address(RendAddr, 0));
+ __ lea(rbx, Address(rax, rdx, Address::times_1));
+ __ cmpptr(rbx, Address(RendAddr, 0));
__ jcc(Assembler::above, slow_case);
// Compare rax with the top addr, and if still equal, store the new
@@ -3128,7 +3130,7 @@ void TemplateTable::_new() {
if (os::is_MP()) {
__ lock();
}
- __ cmpxchgq(rbx, Address(RtopAddr, 0));
+ __ cmpxchgptr(rbx, Address(RtopAddr, 0));
// if someone beat us on the allocation, try again, otherwise continue
__ jcc(Assembler::notEqual, retry);
@@ -3157,8 +3159,8 @@ void TemplateTable::_new() {
// initialize object header only.
__ bind(initialize_header);
if (UseBiasedLocking) {
- __ movq(rscratch1, Address(rsi, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
- __ movq(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1);
+ __ movptr(rscratch1, Address(rsi, Klass::prototype_header_offset_in_bytes() + klassOopDesc::klass_part_offset_in_bytes()));
+ __ movptr(Address(rax, oopDesc::mark_offset_in_bytes()), rscratch1);
} else {
__ movptr(Address(rax, oopDesc::mark_offset_in_bytes()),
(intptr_t) markOopDesc::prototype()); // header (address 0x1)
@@ -3215,7 +3217,7 @@ void TemplateTable::arraylength() {
void TemplateTable::checkcast() {
transition(atos, atos);
Label done, is_null, ok_is_subtype, quicked, resolved;
- __ testq(rax, rax); // object is in rax
+ __ testptr(rax, rax); // object is in rax
__ jcc(Assembler::zero, is_null);
// Get cpool & tags index
@@ -3228,7 +3230,7 @@ void TemplateTable::checkcast() {
JVM_CONSTANT_Class);
__ jcc(Assembler::equal, quicked);
__ push(atos); // save receiver for result, and for GC
- __ movq(r12, rcx); // save rcx XXX
+ __ mov(r12, rcx); // save rcx XXX
call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
__ movq(rcx, r12); // restore rcx XXX
__ reinit_heapbase();
@@ -3237,8 +3239,8 @@ void TemplateTable::checkcast() {
// Get superklass in rax and subklass in rbx
__ bind(quicked);
- __ movq(rdx, rax); // Save object in rdx; rax needed for subtype check
- __ movq(rax, Address(rcx, rbx,
+ __ mov(rdx, rax); // Save object in rdx; rax needed for subtype check
+ __ movptr(rax, Address(rcx, rbx,
Address::times_8, sizeof(constantPoolOopDesc)));
__ bind(resolved);
@@ -3255,7 +3257,7 @@ void TemplateTable::checkcast() {
// Come here on success
__ bind(ok_is_subtype);
- __ movq(rax, rdx); // Restore object in rdx
+ __ mov(rax, rdx); // Restore object in rdx
// Collect counts on whether this check-cast sees NULLs a lot or not.
if (ProfileInterpreter) {
@@ -3271,7 +3273,7 @@ void TemplateTable::checkcast() {
void TemplateTable::instanceof() {
transition(atos, itos);
Label done, is_null, ok_is_subtype, quicked, resolved;
- __ testq(rax, rax);
+ __ testptr(rax, rax);
__ jcc(Assembler::zero, is_null);
// Get cpool & tags index
@@ -3285,7 +3287,7 @@ void TemplateTable::instanceof() {
__ jcc(Assembler::equal, quicked);
__ push(atos); // save receiver for result, and for GC
- __ movq(r12, rcx); // save rcx
+ __ mov(r12, rcx); // save rcx
call_VM(rax, CAST_FROM_FN_PTR(address, InterpreterRuntime::quicken_io_cc));
__ movq(rcx, r12); // restore rcx
__ reinit_heapbase();
@@ -3296,8 +3298,8 @@ void TemplateTable::instanceof() {
// Get superklass in rax and subklass in rdx
__ bind(quicked);
__ load_klass(rdx, rax);
- __ movq(rax, Address(rcx, rbx,
- Address::times_8, sizeof(constantPoolOopDesc)));
+ __ movptr(rax, Address(rcx, rbx,
+ Address::times_8, sizeof(constantPoolOopDesc)));
__ bind(resolved);
@@ -3340,7 +3342,7 @@ void TemplateTable::_breakpoint() {
CAST_FROM_FN_PTR(address,
InterpreterRuntime::get_original_bytecode_at),
c_rarg1, r13);
- __ movq(rbx, rax);
+ __ mov(rbx, rax);
// post the breakpoint event
__ get_method(c_rarg1);
@@ -3398,52 +3400,52 @@ void TemplateTable::monitorenter() {
// find a free slot in the monitor block (result in c_rarg1)
{
Label entry, loop, exit;
- __ movq(c_rarg3, monitor_block_top); // points to current entry,
+ __ movptr(c_rarg3, monitor_block_top); // points to current entry,
// starting with top-most entry
- __ leaq(c_rarg2, monitor_block_bot); // points to word before bottom
+ __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
// of monitor block
__ jmpb(entry);
__ bind(loop);
// check if current entry is used
- __ cmpq(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int) NULL);
+ __ cmpptr(Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()), (int32_t) NULL_WORD);
// if not used then remember entry in c_rarg1
- __ cmovq(Assembler::equal, c_rarg1, c_rarg3);
+ __ cmov(Assembler::equal, c_rarg1, c_rarg3);
// check if current entry is for same object
- __ cmpq(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()));
+ __ cmpptr(rax, Address(c_rarg3, BasicObjectLock::obj_offset_in_bytes()));
// if same object then stop searching
__ jccb(Assembler::equal, exit);
// otherwise advance to next entry
- __ addq(c_rarg3, entry_size);
+ __ addptr(c_rarg3, entry_size);
__ bind(entry);
// check if bottom reached
- __ cmpq(c_rarg3, c_rarg2);
+ __ cmpptr(c_rarg3, c_rarg2);
// if not at bottom then check this entry
__ jcc(Assembler::notEqual, loop);
__ bind(exit);
}
- __ testq(c_rarg1, c_rarg1); // check if a slot has been found
+ __ testptr(c_rarg1, c_rarg1); // check if a slot has been found
__ jcc(Assembler::notZero, allocated); // if found, continue with that one
// allocate one if there's no free slot
{
Label entry, loop;
- // 1. compute new pointers // rsp: old expression stack top
- __ movq(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom
- __ subq(rsp, entry_size); // move expression stack top
- __ subq(c_rarg1, entry_size); // move expression stack bottom
- __ movq(c_rarg3, rsp); // set start value for copy loop
- __ movq(monitor_block_bot, c_rarg1); // set new monitor block bottom
+ // 1. compute new pointers // rsp: old expression stack top
+ __ movptr(c_rarg1, monitor_block_bot); // c_rarg1: old expression stack bottom
+ __ subptr(rsp, entry_size); // move expression stack top
+ __ subptr(c_rarg1, entry_size); // move expression stack bottom
+ __ mov(c_rarg3, rsp); // set start value for copy loop
+ __ movptr(monitor_block_bot, c_rarg1); // set new monitor block bottom
__ jmp(entry);
// 2. move expression stack contents
__ bind(loop);
- __ movq(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
- // word from old location
- __ movq(Address(c_rarg3, 0), c_rarg2); // and store it at new location
- __ addq(c_rarg3, wordSize); // advance to next word
+ __ movptr(c_rarg2, Address(c_rarg3, entry_size)); // load expression stack
+ // word from old location
+ __ movptr(Address(c_rarg3, 0), c_rarg2); // and store it at new location
+ __ addptr(c_rarg3, wordSize); // advance to next word
__ bind(entry);
- __ cmpq(c_rarg3, c_rarg1); // check if bottom reached
+ __ cmpptr(c_rarg3, c_rarg1); // check if bottom reached
__ jcc(Assembler::notEqual, loop); // if not at bottom then
// copy next word
}
@@ -3456,10 +3458,10 @@ void TemplateTable::monitorenter() {
// handling for async. exceptions work correctly.
// The object has already been poped from the stack, so the
// expression stack looks correct.
- __ incrementq(r13);
+ __ increment(r13);
// store object
- __ movq(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax);
+ __ movptr(Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()), rax);
__ lock_object(c_rarg1);
// check to make sure this monitor doesn't cause stack overflow after locking
@@ -3489,22 +3491,22 @@ void TemplateTable::monitorexit() {
// find matching slot
{
Label entry, loop;
- __ movq(c_rarg1, monitor_block_top); // points to current entry,
+ __ movptr(c_rarg1, monitor_block_top); // points to current entry,
// starting with top-most entry
- __ leaq(c_rarg2, monitor_block_bot); // points to word before bottom
+ __ lea(c_rarg2, monitor_block_bot); // points to word before bottom
// of monitor block
__ jmpb(entry);
__ bind(loop);
// check if current entry is for same object
- __ cmpq(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
+ __ cmpptr(rax, Address(c_rarg1, BasicObjectLock::obj_offset_in_bytes()));
// if same object then stop searching
__ jcc(Assembler::equal, found);
// otherwise advance to next entry
- __ addq(c_rarg1, entry_size);
+ __ addptr(c_rarg1, entry_size);
__ bind(entry);
// check if bottom reached
- __ cmpq(c_rarg1, c_rarg2);
+ __ cmpptr(c_rarg1, c_rarg2);
// if not at bottom then check this entry
__ jcc(Assembler::notEqual, loop);
}
@@ -3541,11 +3543,12 @@ void TemplateTable::multianewarray() {
// last dim is on top of stack; we want address of first one:
// first_addr = last_addr + (ndims - 1) * wordSize
if (TaggedStackInterpreter) __ shll(rax, 1); // index*2
- __ leaq(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize));
+ __ lea(c_rarg1, Address(rsp, rax, Address::times_8, -wordSize));
call_VM(rax,
CAST_FROM_FN_PTR(address, InterpreterRuntime::multianewarray),
c_rarg1);
__ load_unsigned_byte(rbx, at_bcp(3));
if (TaggedStackInterpreter) __ shll(rbx, 1); // index*2
- __ leaq(rsp, Address(rsp, rbx, Address::times_8));
+ __ lea(rsp, Address(rsp, rbx, Address::times_8));
}
+#endif // !CC_INTERP
diff --git a/src/cpu/x86/vm/vm_version_x86_32.cpp b/src/cpu/x86/vm/vm_version_x86_32.cpp
index d65cc0cc3..839a4cdae 100644
--- a/src/cpu/x86/vm/vm_version_x86_32.cpp
+++ b/src/cpu/x86/vm/vm_version_x86_32.cpp
@@ -67,23 +67,23 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
//
// void getPsrInfo(VM_Version::CpuidInfo* cpuid_info);
//
- __ pushl(rbp);
- __ movl(rbp, Address(rsp, 8)); // cpuid_info address
- __ pushl(rbx);
- __ pushl(rsi);
- __ pushfd(); // preserve rbx, and flags
- __ popl(rax);
- __ pushl(rax);
- __ movl(rcx, rax);
+ __ push(rbp);
+ __ movptr(rbp, Address(rsp, 8)); // cpuid_info address
+ __ push(rbx);
+ __ push(rsi);
+ __ pushf(); // preserve rbx, and flags
+ __ pop(rax);
+ __ push(rax);
+ __ mov(rcx, rax);
//
// if we are unable to change the AC flag, we have a 386
//
__ xorl(rax, EFL_AC);
- __ pushl(rax);
- __ popfd();
- __ pushfd();
- __ popl(rax);
- __ cmpl(rax, rcx);
+ __ push(rax);
+ __ popf();
+ __ pushf();
+ __ pop(rax);
+ __ cmpptr(rax, rcx);
__ jccb(Assembler::notEqual, detect_486);
__ movl(rax, CPU_FAMILY_386);
@@ -95,13 +95,13 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// not support the "cpuid" instruction.
//
__ bind(detect_486);
- __ movl(rax, rcx);
+ __ mov(rax, rcx);
__ xorl(rax, EFL_ID);
- __ pushl(rax);
- __ popfd();
- __ pushfd();
- __ popl(rax);
- __ cmpl(rcx, rax);
+ __ push(rax);
+ __ popf();
+ __ pushf();
+ __ pop(rax);
+ __ cmpptr(rcx, rax);
__ jccb(Assembler::notEqual, detect_586);
__ bind(cpu486);
@@ -113,13 +113,13 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// at this point, we have a chip which supports the "cpuid" instruction
//
__ bind(detect_586);
- __ xorl(rax, rax);
+ __ xorptr(rax, rax);
__ cpuid();
- __ orl(rax, rax);
+ __ orptr(rax, rax);
__ jcc(Assembler::equal, cpu486); // if cpuid doesn't support an input
// value of at least 1, we give up and
// assume a 486
- __ leal(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
+ __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx);
@@ -134,13 +134,13 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ movl(rax, 4); // and rcx already set to 0x0
__ xorl(rcx, rcx);
__ cpuid();
- __ pushl(rax);
+ __ push(rax);
__ andl(rax, 0x1f); // Determine if valid cache parameters used
__ orl(rax, rax); // rax,[4:0] == 0 indicates invalid cache
- __ popl(rax);
+ __ pop(rax);
__ jccb(Assembler::equal, std_cpuid1);
- __ leal(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset())));
+ __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx);
@@ -152,7 +152,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ bind(std_cpuid1);
__ movl(rax, 1);
__ cpuid();
- __ leal(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
+ __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx);
@@ -171,7 +171,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
//
__ movl(rax, 0x80000008);
__ cpuid();
- __ leal(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset())));
+ __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx);
@@ -183,7 +183,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ bind(ext_cpuid5);
__ movl(rax, 0x80000005);
__ cpuid();
- __ leal(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset())));
+ __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx);
@@ -195,7 +195,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ bind(ext_cpuid1);
__ movl(rax, 0x80000001);
__ cpuid();
- __ leal(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset())));
+ __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx);
@@ -205,10 +205,10 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// return
//
__ bind(done);
- __ popfd();
- __ popl(rsi);
- __ popl(rbx);
- __ popl(rbp);
+ __ popf();
+ __ pop(rsi);
+ __ pop(rbx);
+ __ pop(rbp);
__ ret(0);
# undef __
diff --git a/src/cpu/x86/vm/vm_version_x86_64.cpp b/src/cpu/x86/vm/vm_version_x86_64.cpp
index 71be1f8a2..709d82e6e 100644
--- a/src/cpu/x86/vm/vm_version_x86_64.cpp
+++ b/src/cpu/x86/vm/vm_version_x86_64.cpp
@@ -60,17 +60,17 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
//
// rcx and rdx are first and second argument registers on windows
- __ pushq(rbp);
- __ movq(rbp, c_rarg0); // cpuid_info address
- __ pushq(rbx);
- __ pushq(rsi);
+ __ push(rbp);
+ __ mov(rbp, c_rarg0); // cpuid_info address
+ __ push(rbx);
+ __ push(rsi);
//
// we have a chip which supports the "cpuid" instruction
//
__ xorl(rax, rax);
__ cpuid();
- __ leaq(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
+ __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid0_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx);
@@ -85,13 +85,13 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ movl(rax, 4);
__ xorl(rcx, rcx); // L1 cache
__ cpuid();
- __ pushq(rax);
+ __ push(rax);
__ andl(rax, 0x1f); // Determine if valid cache parameters used
__ orl(rax, rax); // eax[4:0] == 0 indicates invalid cache
- __ popq(rax);
+ __ pop(rax);
__ jccb(Assembler::equal, std_cpuid1);
- __ leaq(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset())));
+ __ lea(rsi, Address(rbp, in_bytes(VM_Version::dcp_cpuid4_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx);
@@ -103,7 +103,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ bind(std_cpuid1);
__ movl(rax, 1);
__ cpuid();
- __ leaq(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
+ __ lea(rsi, Address(rbp, in_bytes(VM_Version::std_cpuid1_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx);
@@ -122,7 +122,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
//
__ movl(rax, 0x80000008);
__ cpuid();
- __ leaq(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset())));
+ __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid8_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx);
@@ -134,7 +134,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ bind(ext_cpuid5);
__ movl(rax, 0x80000005);
__ cpuid();
- __ leaq(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset())));
+ __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid5_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx);
@@ -146,7 +146,7 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
__ bind(ext_cpuid1);
__ movl(rax, 0x80000001);
__ cpuid();
- __ leaq(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset())));
+ __ lea(rsi, Address(rbp, in_bytes(VM_Version::ext_cpuid1_offset())));
__ movl(Address(rsi, 0), rax);
__ movl(Address(rsi, 4), rbx);
__ movl(Address(rsi, 8), rcx);
@@ -156,9 +156,9 @@ class VM_Version_StubGenerator: public StubCodeGenerator {
// return
//
__ bind(done);
- __ popq(rsi);
- __ popq(rbx);
- __ popq(rbp);
+ __ pop(rsi);
+ __ pop(rbx);
+ __ pop(rbp);
__ ret(0);
# undef __
diff --git a/src/cpu/x86/vm/vtableStubs_x86_32.cpp b/src/cpu/x86/vm/vtableStubs_x86_32.cpp
index 09d37901e..cfa1edd62 100644
--- a/src/cpu/x86/vm/vtableStubs_x86_32.cpp
+++ b/src/cpu/x86/vm/vtableStubs_x86_32.cpp
@@ -49,7 +49,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
#ifndef PRODUCT
if (CountCompiledCalls) {
- __ increment(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
+ __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
}
#endif /* PRODUCT */
@@ -58,7 +58,7 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// get receiver klass
address npe_addr = __ pc();
- __ movl(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));
+ __ movptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));
// compute entry offset (in words)
int entry_offset = instanceKlass::vtable_start_offset() + vtable_index*vtableEntry::size();
#ifndef PRODUCT
@@ -76,12 +76,12 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
const Register method = rbx;
// load methodOop and target address
- __ movl(method, Address(rax, entry_offset*wordSize + vtableEntry::method_offset_in_bytes()));
+ __ movptr(method, Address(rax, entry_offset*wordSize + vtableEntry::method_offset_in_bytes()));
if (DebugVtables) {
Label L;
- __ cmpl(method, NULL_WORD);
+ __ cmpptr(method, (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L);
- __ cmpl(Address(method, methodOopDesc::from_compiled_offset()), NULL_WORD);
+ __ cmpptr(Address(method, methodOopDesc::from_compiled_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notZero, L);
__ stop("Vtable entry is NULL");
__ bind(L);
@@ -114,7 +114,7 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
#ifndef PRODUCT
if (CountCompiledCalls) {
- __ increment(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
+ __ incrementl(ExternalAddress((address) SharedRuntime::nof_megamorphic_calls_addr()));
}
#endif /* PRODUCT */
// get receiver (need to skip return address on top of stack)
@@ -123,16 +123,16 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
// get receiver klass (also an implicit null-check)
address npe_addr = __ pc();
- __ movl(rbx, Address(rcx, oopDesc::klass_offset_in_bytes()));
+ __ movptr(rbx, Address(rcx, oopDesc::klass_offset_in_bytes()));
- __ movl(rsi, rbx); // Save klass in free register
+ __ mov(rsi, rbx); // Save klass in free register
// Most registers are in use, so save a few
- __ pushl(rdx);
+ __ push(rdx);
// compute itable entry offset (in words)
const int base = instanceKlass::vtable_start_offset() * wordSize;
assert(vtableEntry::size() * wordSize == 4, "adjust the scaling in the code below");
__ movl(rdx, Address(rbx, instanceKlass::vtable_length_offset() * wordSize)); // Get length of vtable
- __ leal(rbx, Address(rbx, rdx, Address::times_4, base));
+ __ lea(rbx, Address(rbx, rdx, Address::times_ptr, base));
if (HeapWordsPerLong > 1) {
// Round up to align_object_offset boundary
__ round_to(rbx, BytesPerLong);
@@ -143,16 +143,16 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
__ jmpb(entry);
__ bind(next);
- __ addl(rbx, itableOffsetEntry::size() * wordSize);
+ __ addptr(rbx, itableOffsetEntry::size() * wordSize);
__ bind(entry);
// If the entry is NULL then we've reached the end of the table
// without finding the expected interface, so throw an exception
- __ movl(rdx, Address(rbx, itableOffsetEntry::interface_offset_in_bytes()));
- __ testl(rdx, rdx);
+ __ movptr(rdx, Address(rbx, itableOffsetEntry::interface_offset_in_bytes()));
+ __ testptr(rdx, rdx);
__ jcc(Assembler::zero, throw_icce);
- __ cmpl(rax, rdx);
+ __ cmpptr(rax, rdx);
__ jcc(Assembler::notEqual, next);
// We found a hit, move offset into rbx,
@@ -163,10 +163,10 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
// Get methodOop and entrypoint for compiler
const Register method = rbx;
- __ movl(method, Address(rsi, rdx, Address::times_1, method_offset));
+ __ movptr(method, Address(rsi, rdx, Address::times_1, method_offset));
// Restore saved register, before possible trap.
- __ popl(rdx);
+ __ pop(rdx);
// method (rbx): methodOop
// rcx: receiver
@@ -174,9 +174,9 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
#ifdef ASSERT
if (DebugVtables) {
Label L1;
- __ cmpl(method, NULL_WORD);
+ __ cmpptr(method, (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L1);
- __ cmpl(Address(method, methodOopDesc::from_compiled_offset()), NULL_WORD);
+ __ cmpptr(Address(method, methodOopDesc::from_compiled_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notZero, L1);
__ stop("methodOop is null");
__ bind(L1);
@@ -188,7 +188,7 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
__ bind(throw_icce);
// Restore saved register
- __ popl(rdx);
+ __ pop(rdx);
__ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
masm->flush();
diff --git a/src/cpu/x86/vm/vtableStubs_x86_64.cpp b/src/cpu/x86/vm/vtableStubs_x86_64.cpp
index 931b3aa90..7ae875b73 100644
--- a/src/cpu/x86/vm/vtableStubs_x86_64.cpp
+++ b/src/cpu/x86/vm/vtableStubs_x86_64.cpp
@@ -79,14 +79,14 @@ VtableStub* VtableStubs::create_vtable_stub(int vtable_index) {
// load methodOop and target address
const Register method = rbx;
- __ movq(method, Address(rax,
- entry_offset * wordSize +
- vtableEntry::method_offset_in_bytes()));
+ __ movptr(method, Address(rax,
+ entry_offset * wordSize +
+ vtableEntry::method_offset_in_bytes()));
if (DebugVtables) {
Label L;
- __ cmpq(method, (int)NULL);
+ __ cmpptr(method, (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L);
- __ cmpq(Address(method, methodOopDesc::from_compiled_offset()), (int)NULL_WORD);
+ __ cmpptr(Address(method, methodOopDesc::from_compiled_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notZero, L);
__ stop("Vtable entry is NULL");
__ bind(L);
@@ -138,7 +138,7 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
// when there are mistakes in this assembly code that could generate
// a spurious fault. Ask me how I know...
- __ pushq(j_rarg1); // Most registers are in use, so save one
+ __ push(j_rarg1); // Most registers are in use, so save one
// compute itable entry offset (in words)
const int base = instanceKlass::vtable_start_offset() * wordSize;
@@ -147,27 +147,27 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
// Get length of vtable
__ movl(j_rarg1,
Address(rbx, instanceKlass::vtable_length_offset() * wordSize));
- __ leaq(rbx, Address(rbx, j_rarg1, Address::times_8, base));
+ __ lea(rbx, Address(rbx, j_rarg1, Address::times_8, base));
if (HeapWordsPerLong > 1) {
// Round up to align_object_offset boundary
- __ round_to_q(rbx, BytesPerLong);
+ __ round_to(rbx, BytesPerLong);
}
Label hit, next, entry, throw_icce;
__ jmpb(entry);
__ bind(next);
- __ addq(rbx, itableOffsetEntry::size() * wordSize);
+ __ addptr(rbx, itableOffsetEntry::size() * wordSize);
__ bind(entry);
// If the entry is NULL then we've reached the end of the table
// without finding the expected interface, so throw an exception
- __ movq(j_rarg1, Address(rbx, itableOffsetEntry::interface_offset_in_bytes()));
- __ testq(j_rarg1, j_rarg1);
+ __ movptr(j_rarg1, Address(rbx, itableOffsetEntry::interface_offset_in_bytes()));
+ __ testptr(j_rarg1, j_rarg1);
__ jcc(Assembler::zero, throw_icce);
- __ cmpq(rax, j_rarg1);
+ __ cmpptr(rax, j_rarg1);
__ jccb(Assembler::notEqual, next);
// We found a hit, move offset into j_rarg1
@@ -184,10 +184,10 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
__ load_klass(rax, j_rarg0);
const Register method = rbx;
- __ movq(method, Address(rax, j_rarg1, Address::times_1, method_offset));
+ __ movptr(method, Address(rax, j_rarg1, Address::times_1, method_offset));
// Restore saved register, before possible trap.
- __ popq(j_rarg1);
+ __ pop(j_rarg1);
// method (rbx): methodOop
// j_rarg0: receiver
@@ -196,9 +196,9 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
#ifdef ASSERT
if (DebugVtables) {
Label L2;
- __ cmpq(method, (int)NULL);
+ __ cmpptr(method, (int32_t)NULL_WORD);
__ jcc(Assembler::equal, L2);
- __ cmpq(Address(method, methodOopDesc::from_compiled_offset()), (int)NULL_WORD);
+ __ cmpptr(Address(method, methodOopDesc::from_compiled_offset()), (int32_t)NULL_WORD);
__ jcc(Assembler::notZero, L2);
__ stop("compiler entrypoint is null");
__ bind(L2);
@@ -212,7 +212,7 @@ VtableStub* VtableStubs::create_itable_stub(int vtable_index) {
__ bind(throw_icce);
// Restore saved register
- __ popq(j_rarg1);
+ __ pop(j_rarg1);
__ jump(RuntimeAddress(StubRoutines::throw_IncompatibleClassChangeError_entry()));
__ flush();
diff --git a/src/cpu/x86/vm/x86_32.ad b/src/cpu/x86/vm/x86_32.ad
index 35db833e9..a4a419931 100644
--- a/src/cpu/x86/vm/x86_32.ad
+++ b/src/cpu/x86/vm/x86_32.ad
@@ -236,7 +236,7 @@ reg_class xdb_reg7( XMM7a,XMM7b );
// This is a block of C++ code which provides values, functions, and
// definitions necessary in the rest of the architecture description
source %{
-#define RELOC_IMM32 Assembler::imm32_operand
+#define RELOC_IMM32 Assembler::imm_operand
#define RELOC_DISP32 Assembler::disp32_operand
#define __ _masm.
@@ -593,11 +593,11 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
if (VerifyStackAtCalls) {
Label L;
MacroAssembler masm(&cbuf);
- masm.pushl(rax);
- masm.movl(rax, rsp);
- masm.andl(rax, StackAlignmentInBytes-1);
- masm.cmpl(rax, StackAlignmentInBytes-wordSize);
- masm.popl(rax);
+ masm.push(rax);
+ masm.mov(rax, rsp);
+ masm.andptr(rax, StackAlignmentInBytes-1);
+ masm.cmpptr(rax, StackAlignmentInBytes-wordSize);
+ masm.pop(rax);
masm.jcc(Assembler::equal, L);
masm.stop("Stack is not properly aligned!");
masm.bind(L);
@@ -1150,7 +1150,8 @@ void emit_java_to_interp(CodeBuffer &cbuf ) {
__ relocate(static_stub_Relocation::spec(mark), RELOC_IMM32);
// static stub relocation also tags the methodOop in the code-stream.
__ movoop(rbx, (jobject)NULL); // method is zapped till fixup time
- __ jump(RuntimeAddress((address)-1));
+ // This is recognized as unresolved by relocs/nativeInst/ic code
+ __ jump(RuntimeAddress(__ pc()));
__ end_a_stub();
// Update current stubs pointer and restore code_end.
@@ -1181,7 +1182,7 @@ void MachUEPNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const {
#ifdef ASSERT
uint code_size = cbuf.code_size();
#endif
- masm.cmpl(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));
+ masm.cmpptr(rax, Address(rcx, oopDesc::klass_offset_in_bytes()));
masm.jump_cc(Assembler::notEqual,
RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
/* WARNING these NOPs are critical so that verified entry point is properly
@@ -1687,20 +1688,20 @@ encode %{
// Compare super with sub directly, since super is not in its own SSA.
// The compiler used to emit this test, but we fold it in here,
// to allow platform-specific tweaking on sparc.
- __ cmpl(Reax, Resi);
+ __ cmpptr(Reax, Resi);
__ jcc(Assembler::equal, hit);
#ifndef PRODUCT
- __ increment(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr));
+ __ incrementl(ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr));
#endif //PRODUCT
- __ movl(Redi,Address(Resi,sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes()));
+ __ movptr(Redi,Address(Resi,sizeof(oopDesc) + Klass::secondary_supers_offset_in_bytes()));
__ movl(Recx,Address(Redi,arrayOopDesc::length_offset_in_bytes()));
- __ addl(Redi,arrayOopDesc::base_offset_in_bytes(T_OBJECT));
+ __ addptr(Redi,arrayOopDesc::base_offset_in_bytes(T_OBJECT));
__ repne_scan();
__ jcc(Assembler::notEqual, miss);
- __ movl(Address(Resi,sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()),Reax);
+ __ movptr(Address(Resi,sizeof(oopDesc) + Klass::secondary_super_cache_offset_in_bytes()),Reax);
__ bind(hit);
if( $primary )
- __ xorl(Redi,Redi);
+ __ xorptr(Redi,Redi);
__ bind(miss);
%}
@@ -1749,15 +1750,15 @@ encode %{
// optimizer if the C function is a pure function.
__ ffree(0);
} else if (rt == T_FLOAT) {
- __ leal(rsp, Address(rsp, -4));
+ __ lea(rsp, Address(rsp, -4));
__ fstp_s(Address(rsp, 0));
__ movflt(xmm0, Address(rsp, 0));
- __ leal(rsp, Address(rsp, 4));
+ __ lea(rsp, Address(rsp, 4));
} else if (rt == T_DOUBLE) {
- __ leal(rsp, Address(rsp, -8));
+ __ lea(rsp, Address(rsp, -8));
__ fstp_d(Address(rsp, 0));
__ movdbl(xmm0, Address(rsp, 0));
- __ leal(rsp, Address(rsp, 8));
+ __ lea(rsp, Address(rsp, 8));
}
}
%}
@@ -2888,10 +2889,10 @@ encode %{
__ jccb(Assembler::equal, done);
__ jccb(Assembler::above, inc);
__ bind(nan);
- __ decrement(as_Register($dst$$reg));
+ __ decrement(as_Register($dst$$reg)); // NO L qqq
__ jmpb(done);
__ bind(inc);
- __ increment(as_Register($dst$$reg));
+ __ increment(as_Register($dst$$reg)); // NO L qqq
__ bind(done);
%}
@@ -3158,7 +3159,7 @@ encode %{
enc_class mov_i2x(regXD dst, eRegI src) %{
MacroAssembler _masm(&cbuf);
- __ movd(as_XMMRegister($dst$$reg), as_Register($src$$reg));
+ __ movdl(as_XMMRegister($dst$$reg), as_Register($src$$reg));
%}
@@ -3258,30 +3259,30 @@ encode %{
}
if (EmitSync & 1) {
// set box->dhw = unused_mark (3)
- // Force all sync thru slow-path: slow_enter() and slow_exit()
- masm.movl (Address(boxReg, 0), intptr_t(markOopDesc::unused_mark())) ;
- masm.cmpl (rsp, 0) ;
- } else
- if (EmitSync & 2) {
- Label DONE_LABEL ;
+ // Force all sync thru slow-path: slow_enter() and slow_exit()
+ masm.movptr (Address(boxReg, 0), int32_t(markOopDesc::unused_mark())) ;
+ masm.cmpptr (rsp, (int32_t)0) ;
+ } else
+ if (EmitSync & 2) {
+ Label DONE_LABEL ;
if (UseBiasedLocking) {
// Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
}
- masm.movl (tmpReg, Address(objReg, 0)) ; // fetch markword
- masm.orl (tmpReg, 0x1);
- masm.movl (Address(boxReg, 0), tmpReg); // Anticipate successful CAS
+ masm.movptr(tmpReg, Address(objReg, 0)) ; // fetch markword
+ masm.orptr (tmpReg, 0x1);
+ masm.movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
if (os::is_MP()) { masm.lock(); }
- masm.cmpxchg(boxReg, Address(objReg, 0)); // Updates tmpReg
+ masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
masm.jcc(Assembler::equal, DONE_LABEL);
// Recursive locking
- masm.subl(tmpReg, rsp);
- masm.andl(tmpReg, 0xFFFFF003 );
- masm.movl(Address(boxReg, 0), tmpReg);
- masm.bind(DONE_LABEL) ;
- } else {
- // Possible cases that we'll encounter in fast_lock
+ masm.subptr(tmpReg, rsp);
+ masm.andptr(tmpReg, (int32_t) 0xFFFFF003 );
+ masm.movptr(Address(boxReg, 0), tmpReg);
+ masm.bind(DONE_LABEL) ;
+ } else {
+ // Possible cases that we'll encounter in fast_lock
// ------------------------------------------------
// * Inflated
// -- unlocked
@@ -3310,15 +3311,15 @@ encode %{
masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
}
- masm.movl (tmpReg, Address(objReg, 0)) ; // [FETCH]
- masm.testl (tmpReg, 0x02) ; // Inflated v (Stack-locked or neutral)
+ masm.movptr(tmpReg, Address(objReg, 0)) ; // [FETCH]
+ masm.testptr(tmpReg, 0x02) ; // Inflated v (Stack-locked or neutral)
masm.jccb (Assembler::notZero, IsInflated) ;
// Attempt stack-locking ...
- masm.orl (tmpReg, 0x1);
- masm.movl (Address(boxReg, 0), tmpReg); // Anticipate successful CAS
+ masm.orptr (tmpReg, 0x1);
+ masm.movptr(Address(boxReg, 0), tmpReg); // Anticipate successful CAS
if (os::is_MP()) { masm.lock(); }
- masm.cmpxchg(boxReg, Address(objReg, 0)); // Updates tmpReg
+ masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
if (_counters != NULL) {
masm.cond_inc32(Assembler::equal,
ExternalAddress((address)_counters->fast_path_entry_count_addr()));
@@ -3326,9 +3327,9 @@ encode %{
masm.jccb (Assembler::equal, DONE_LABEL);
// Recursive locking
- masm.subl(tmpReg, rsp);
- masm.andl(tmpReg, 0xFFFFF003 );
- masm.movl(Address(boxReg, 0), tmpReg);
+ masm.subptr(tmpReg, rsp);
+ masm.andptr(tmpReg, 0xFFFFF003 );
+ masm.movptr(Address(boxReg, 0), tmpReg);
if (_counters != NULL) {
masm.cond_inc32(Assembler::equal,
ExternalAddress((address)_counters->fast_path_entry_count_addr()));
@@ -3360,36 +3361,33 @@ encode %{
// This is convenient but results a ST-before-CAS penalty. The following CAS suffers
// additional latency as we have another ST in the store buffer that must drain.
- if (EmitSync & 8192) {
- masm.movl (Address(boxReg, 0), 3) ; // results in ST-before-CAS penalty
- masm.get_thread (scrReg) ;
- masm.movl (boxReg, tmpReg); // consider: LEA box, [tmp-2]
- masm.movl (tmpReg, 0); // consider: xor vs mov
- if (os::is_MP()) { masm.lock(); }
- masm.cmpxchg (scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
- } else
+ if (EmitSync & 8192) {
+ masm.movptr(Address(boxReg, 0), 3) ; // results in ST-before-CAS penalty
+ masm.get_thread (scrReg) ;
+ masm.movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2]
+ masm.movptr(tmpReg, 0); // consider: xor vs mov
+ if (os::is_MP()) { masm.lock(); }
+ masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
+ } else
if ((EmitSync & 128) == 0) { // avoid ST-before-CAS
- masm.movl (scrReg, boxReg) ;
- masm.movl (boxReg, tmpReg); // consider: LEA box, [tmp-2]
+ masm.movptr(scrReg, boxReg) ;
+ masm.movptr(boxReg, tmpReg); // consider: LEA box, [tmp-2]
// Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
if ((EmitSync & 2048) && VM_Version::supports_3dnow() && os::is_MP()) {
// prefetchw [eax + Offset(_owner)-2]
- masm.emit_raw (0x0F) ;
- masm.emit_raw (0x0D) ;
- masm.emit_raw (0x48) ;
- masm.emit_raw (ObjectMonitor::owner_offset_in_bytes()-2) ;
+ masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2));
}
if ((EmitSync & 64) == 0) {
// Optimistic form: consider XORL tmpReg,tmpReg
- masm.movl (tmpReg, 0 ) ;
- } else {
+ masm.movptr(tmpReg, 0 ) ;
+ } else {
// Can suffer RTS->RTO upgrades on shared or cold $ lines
// Test-And-CAS instead of CAS
- masm.movl (tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; // rax, = m->_owner
- masm.testl (tmpReg, tmpReg) ; // Locked ?
- masm.jccb (Assembler::notZero, DONE_LABEL) ;
+ masm.movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; // rax, = m->_owner
+ masm.testptr(tmpReg, tmpReg) ; // Locked ?
+ masm.jccb (Assembler::notZero, DONE_LABEL) ;
}
// Appears unlocked - try to swing _owner from null to non-null.
@@ -3401,41 +3399,38 @@ encode %{
// (rsp or the address of the box) into m->owner is harmless.
// Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand.
if (os::is_MP()) { masm.lock(); }
- masm.cmpxchg (scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
- masm.movl (Address(scrReg, 0), 3) ; // box->_displaced_header = 3
- masm.jccb (Assembler::notZero, DONE_LABEL) ;
+ masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
+ masm.movptr(Address(scrReg, 0), 3) ; // box->_displaced_header = 3
+ masm.jccb (Assembler::notZero, DONE_LABEL) ;
masm.get_thread (scrReg) ; // beware: clobbers ICCs
- masm.movl (Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2), scrReg) ;
- masm.xorl (boxReg, boxReg) ; // set icc.ZFlag = 1 to indicate success
-
- // If the CAS fails we can either retry or pass control to the slow-path.
- // We use the latter tactic.
+ masm.movptr(Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2), scrReg) ;
+ masm.xorptr(boxReg, boxReg) ; // set icc.ZFlag = 1 to indicate success
+
+ // If the CAS fails we can either retry or pass control to the slow-path.
+ // We use the latter tactic.
// Pass the CAS result in the icc.ZFlag into DONE_LABEL
// If the CAS was successful ...
// Self has acquired the lock
// Invariant: m->_recursions should already be 0, so we don't need to explicitly set it.
// Intentional fall-through into DONE_LABEL ...
} else {
- masm.movl (Address(boxReg, 0), 3) ; // results in ST-before-CAS penalty
- masm.movl (boxReg, tmpReg) ;
+ masm.movptr(Address(boxReg, 0), 3) ; // results in ST-before-CAS penalty
+ masm.movptr(boxReg, tmpReg) ;
// Using a prefetchw helps avoid later RTS->RTO upgrades and cache probes
if ((EmitSync & 2048) && VM_Version::supports_3dnow() && os::is_MP()) {
// prefetchw [eax + Offset(_owner)-2]
- masm.emit_raw (0x0F) ;
- masm.emit_raw (0x0D) ;
- masm.emit_raw (0x48) ;
- masm.emit_raw (ObjectMonitor::owner_offset_in_bytes()-2) ;
+ masm.prefetchw(Address(rax, ObjectMonitor::owner_offset_in_bytes()-2));
}
if ((EmitSync & 64) == 0) {
// Optimistic form
- masm.xorl (tmpReg, tmpReg) ;
- } else {
+ masm.xorptr (tmpReg, tmpReg) ;
+ } else {
// Can suffer RTS->RTO upgrades on shared or cold $ lines
- masm.movl (tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; // rax, = m->_owner
- masm.testl (tmpReg, tmpReg) ; // Locked ?
- masm.jccb (Assembler::notZero, DONE_LABEL) ;
+ masm.movptr(tmpReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ; // rax, = m->_owner
+ masm.testptr(tmpReg, tmpReg) ; // Locked ?
+ masm.jccb (Assembler::notZero, DONE_LABEL) ;
}
// Appears unlocked - try to swing _owner from null to non-null.
@@ -3443,7 +3438,7 @@ encode %{
// Invariant: tmpReg == 0. tmpReg is EAX which is the implicit cmpxchg comparand.
masm.get_thread (scrReg) ;
if (os::is_MP()) { masm.lock(); }
- masm.cmpxchg (scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
+ masm.cmpxchgptr(scrReg, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
// If the CAS fails we can either retry or pass control to the slow-path.
// We use the latter tactic.
@@ -3514,19 +3509,19 @@ encode %{
if (EmitSync & 4) {
// Disable - inhibit all inlining. Force control through the slow-path
- masm.cmpl (rsp, 0) ;
- } else
+ masm.cmpptr (rsp, 0) ;
+ } else
if (EmitSync & 8) {
Label DONE_LABEL ;
if (UseBiasedLocking) {
masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
}
// classic stack-locking code ...
- masm.movl (tmpReg, Address(boxReg, 0)) ;
- masm.testl (tmpReg, tmpReg) ;
+ masm.movptr(tmpReg, Address(boxReg, 0)) ;
+ masm.testptr(tmpReg, tmpReg) ;
masm.jcc (Assembler::zero, DONE_LABEL) ;
if (os::is_MP()) { masm.lock(); }
- masm.cmpxchg(tmpReg, Address(objReg, 0)); // Uses EAX which is box
+ masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses EAX which is box
masm.bind(DONE_LABEL);
} else {
Label DONE_LABEL, Stacked, CheckSucc, Inflated ;
@@ -3536,12 +3531,12 @@ encode %{
if (UseBiasedLocking) {
masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
}
-
- masm.cmpl (Address(boxReg, 0), 0) ; // Examine the displaced header
- masm.movl (tmpReg, Address(objReg, 0)) ; // Examine the object's markword
+
+ masm.cmpptr(Address(boxReg, 0), 0) ; // Examine the displaced header
+ masm.movptr(tmpReg, Address(objReg, 0)) ; // Examine the object's markword
masm.jccb (Assembler::zero, DONE_LABEL) ; // 0 indicates recursive stack-lock
- masm.testl (tmpReg, 0x02) ; // Inflated?
+ masm.testptr(tmpReg, 0x02) ; // Inflated?
masm.jccb (Assembler::zero, Stacked) ;
masm.bind (Inflated) ;
@@ -3571,11 +3566,8 @@ encode %{
masm.get_thread (boxReg) ;
if ((EmitSync & 4096) && VM_Version::supports_3dnow() && os::is_MP()) {
- // prefetchw [ebx + Offset(_owner)-2]
- masm.emit_raw (0x0F) ;
- masm.emit_raw (0x0D) ;
- masm.emit_raw (0x4B) ;
- masm.emit_raw (ObjectMonitor::owner_offset_in_bytes()-2) ;
+ // prefetchw [ebx + Offset(_owner)-2]
+ masm.prefetchw(Address(rbx, ObjectMonitor::owner_offset_in_bytes()-2));
}
// Note that we could employ various encoding schemes to reduce
@@ -3584,22 +3576,22 @@ encode %{
// In practice the chain of fetches doesn't seem to impact performance, however.
if ((EmitSync & 65536) == 0 && (EmitSync & 256)) {
// Attempt to reduce branch density - AMD's branch predictor.
- masm.xorl (boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
- masm.orl (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
- masm.orl (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
- masm.orl (boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
- masm.jccb (Assembler::notZero, DONE_LABEL) ;
- masm.movl (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ;
- masm.jmpb (DONE_LABEL) ;
- } else {
- masm.xorl (boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
- masm.orl (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
- masm.jccb (Assembler::notZero, DONE_LABEL) ;
- masm.movl (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
- masm.orl (boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
- masm.jccb (Assembler::notZero, CheckSucc) ;
- masm.movl (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ;
- masm.jmpb (DONE_LABEL) ;
+ masm.xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
+ masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
+ masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
+ masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
+ masm.jccb (Assembler::notZero, DONE_LABEL) ;
+ masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ;
+ masm.jmpb (DONE_LABEL) ;
+ } else {
+ masm.xorptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
+ masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
+ masm.jccb (Assembler::notZero, DONE_LABEL) ;
+ masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
+ masm.orptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
+ masm.jccb (Assembler::notZero, CheckSucc) ;
+ masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ;
+ masm.jmpb (DONE_LABEL) ;
}
// The Following code fragment (EmitSync & 65536) improves the performance of
@@ -3615,9 +3607,9 @@ encode %{
masm.bind (CheckSucc) ;
// Optional pre-test ... it's safe to elide this
- if ((EmitSync & 16) == 0) {
- masm.cmpl (Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0) ;
- masm.jccb (Assembler::zero, LGoSlowPath) ;
+ if ((EmitSync & 16) == 0) {
+ masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0) ;
+ masm.jccb (Assembler::zero, LGoSlowPath) ;
}
// We have a classic Dekker-style idiom:
@@ -3645,39 +3637,37 @@ encode %{
//
// We currently use (3), although it's likely that switching to (2)
// is correct for the future.
-
- masm.movl (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ;
- if (os::is_MP()) {
- if (VM_Version::supports_sse2() && 1 == FenceInstruction) {
- masm.emit_raw (0x0F) ; // MFENCE ...
- masm.emit_raw (0xAE) ;
- masm.emit_raw (0xF0) ;
- } else {
- masm.lock () ; masm.addl (Address(rsp, 0), 0) ;
+
+ masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), 0) ;
+ if (os::is_MP()) {
+ if (VM_Version::supports_sse2() && 1 == FenceInstruction) {
+ masm.mfence();
+ } else {
+ masm.lock () ; masm.addptr(Address(rsp, 0), 0) ;
}
}
// Ratify _succ remains non-null
- masm.cmpl (Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0) ;
- masm.jccb (Assembler::notZero, LSuccess) ;
+ masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), 0) ;
+ masm.jccb (Assembler::notZero, LSuccess) ;
- masm.xorl (boxReg, boxReg) ; // box is really EAX
+ masm.xorptr(boxReg, boxReg) ; // box is really EAX
if (os::is_MP()) { masm.lock(); }
- masm.cmpxchg(rsp, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+ masm.cmpxchgptr(rsp, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
masm.jccb (Assembler::notEqual, LSuccess) ;
// Since we're low on registers we installed rsp as a placeholding in _owner.
// Now install Self over rsp. This is safe as we're transitioning from
// non-null to non=null
masm.get_thread (boxReg) ;
- masm.movl (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), boxReg) ;
+ masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), boxReg) ;
// Intentional fall-through into LGoSlowPath ...
- masm.bind (LGoSlowPath) ;
- masm.orl (boxReg, 1) ; // set ICC.ZF=0 to indicate failure
- masm.jmpb (DONE_LABEL) ;
+ masm.bind (LGoSlowPath) ;
+ masm.orptr(boxReg, 1) ; // set ICC.ZF=0 to indicate failure
+ masm.jmpb (DONE_LABEL) ;
- masm.bind (LSuccess) ;
- masm.xorl (boxReg, boxReg) ; // set ICC.ZF=1 to indicate success
- masm.jmpb (DONE_LABEL) ;
+ masm.bind (LSuccess) ;
+ masm.xorptr(boxReg, boxReg) ; // set ICC.ZF=1 to indicate success
+ masm.jmpb (DONE_LABEL) ;
}
masm.bind (Stacked) ;
@@ -3686,9 +3676,9 @@ encode %{
// Try to reset the header to displaced header.
// The "box" value on the stack is stable, so we can reload
// and be assured we observe the same value as above.
- masm.movl (tmpReg, Address(boxReg, 0)) ;
+ masm.movptr(tmpReg, Address(boxReg, 0)) ;
if (os::is_MP()) { masm.lock(); }
- masm.cmpxchg(tmpReg, Address(objReg, 0)); // Uses EAX which is box
+ masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses EAX which is box
// Intention fall-thru into DONE_LABEL
@@ -3720,12 +3710,12 @@ encode %{
int count_offset = java_lang_String::count_offset_in_bytes();
int base_offset = arrayOopDesc::base_offset_in_bytes(T_CHAR);
- masm.movl(rax, Address(rsi, value_offset));
+ masm.movptr(rax, Address(rsi, value_offset));
masm.movl(rcx, Address(rsi, offset_offset));
- masm.leal(rax, Address(rax, rcx, Address::times_2, base_offset));
- masm.movl(rbx, Address(rdi, value_offset));
+ masm.lea(rax, Address(rax, rcx, Address::times_2, base_offset));
+ masm.movptr(rbx, Address(rdi, value_offset));
masm.movl(rcx, Address(rdi, offset_offset));
- masm.leal(rbx, Address(rbx, rcx, Address::times_2, base_offset));
+ masm.lea(rbx, Address(rbx, rcx, Address::times_2, base_offset));
// Compute the minimum of the string lengths(rsi) and the
// difference of the string lengths (stack)
@@ -3736,14 +3726,14 @@ encode %{
masm.movl(rsi, Address(rsi, count_offset));
masm.movl(rcx, rdi);
masm.subl(rdi, rsi);
- masm.pushl(rdi);
+ masm.push(rdi);
masm.cmovl(Assembler::lessEqual, rsi, rcx);
} else {
masm.movl(rdi, Address(rdi, count_offset));
masm.movl(rcx, Address(rsi, count_offset));
masm.movl(rsi, rdi);
masm.subl(rdi, rcx);
- masm.pushl(rdi);
+ masm.push(rdi);
masm.jcc(Assembler::lessEqual, ECX_GOOD_LABEL);
masm.movl(rsi, rcx);
// rsi holds min, rcx is unused
@@ -3761,14 +3751,14 @@ encode %{
// Compare first characters
masm.subl(rcx, rdi);
masm.jcc(Assembler::notZero, POP_LABEL);
- masm.decrement(rsi);
+ masm.decrementl(rsi);
masm.jcc(Assembler::zero, LENGTH_DIFF_LABEL);
{
// Check after comparing first character to see if strings are equivalent
Label LSkip2;
// Check if the strings start at same location
- masm.cmpl(rbx,rax);
+ masm.cmpptr(rbx,rax);
masm.jcc(Assembler::notEqual, LSkip2);
// Check if the length difference is zero (from stack)
@@ -3780,8 +3770,8 @@ encode %{
}
// Shift rax, and rbx, to the end of the arrays, negate min
- masm.leal(rax, Address(rax, rsi, Address::times_2, 2));
- masm.leal(rbx, Address(rbx, rsi, Address::times_2, 2));
+ masm.lea(rax, Address(rax, rsi, Address::times_2, 2));
+ masm.lea(rbx, Address(rbx, rsi, Address::times_2, 2));
masm.negl(rsi);
// Compare the rest of the characters
@@ -3790,18 +3780,18 @@ encode %{
masm.load_unsigned_word(rdi, Address(rax, rsi, Address::times_2, 0));
masm.subl(rcx, rdi);
masm.jcc(Assembler::notZero, POP_LABEL);
- masm.increment(rsi);
+ masm.incrementl(rsi);
masm.jcc(Assembler::notZero, WHILE_HEAD_LABEL);
// Strings are equal up to min length. Return the length difference.
masm.bind(LENGTH_DIFF_LABEL);
- masm.popl(rcx);
+ masm.pop(rcx);
masm.jmp(DONE_LABEL);
// Discard the stored length difference
masm.bind(POP_LABEL);
- masm.addl(rsp, 4);
-
+ masm.addptr(rsp, 4);
+
// That's it
masm.bind(DONE_LABEL);
%}
@@ -4315,7 +4305,8 @@ encode %{
enc_class enc_membar_volatile %{
MacroAssembler masm(&cbuf);
- masm.membar();
+ masm.membar(Assembler::Membar_mask_bits(Assembler::StoreLoad |
+ Assembler::StoreStore));
%}
// Atomically load the volatile long
@@ -11151,7 +11142,7 @@ instruct convXI2XD_reg(regXD dst, eRegI src)
format %{ "MOVD $dst,$src\n\t"
"CVTDQ2PD $dst,$dst\t# i2d" %}
ins_encode %{
- __ movd($dst$$XMMRegister, $src$$Register);
+ __ movdl($dst$$XMMRegister, $src$$Register);
__ cvtdq2pd($dst$$XMMRegister, $dst$$XMMRegister);
%}
ins_pipe(pipe_slow); // XXX
@@ -11249,7 +11240,7 @@ instruct convI2X_reg(regX dst, eRegI src) %{
format %{ "MOVD $dst,$src\n\t"
"CVTDQ2PS $dst,$dst\t# i2f" %}
ins_encode %{
- __ movd($dst$$XMMRegister, $src$$Register);
+ __ movdl($dst$$XMMRegister, $src$$Register);
__ cvtdq2ps($dst$$XMMRegister, $dst$$XMMRegister);
%}
ins_pipe(pipe_slow); // XXX
@@ -12262,7 +12253,7 @@ instruct cmpL3_reg_reg(eSIRegI dst, eRegL src1, eRegL src2, eFlagsReg flags ) %{
"done:" %}
ins_encode %{
Label p_one, m_one, done;
- __ xorl($dst$$Register, $dst$$Register);
+ __ xorptr($dst$$Register, $dst$$Register);
__ cmpl(HIGH_FROM_LOW($src1$$Register), HIGH_FROM_LOW($src2$$Register));
__ jccb(Assembler::less, m_one);
__ jccb(Assembler::greater, p_one);
@@ -12270,10 +12261,10 @@ instruct cmpL3_reg_reg(eSIRegI dst, eRegL src1, eRegL src2, eFlagsReg flags ) %{
__ jccb(Assembler::below, m_one);
__ jccb(Assembler::equal, done);
__ bind(p_one);
- __ increment($dst$$Register);
+ __ incrementl($dst$$Register);
__ jmpb(done);
__ bind(m_one);
- __ decrement($dst$$Register);
+ __ decrementl($dst$$Register);
__ bind(done);
%}
ins_pipe( pipe_slow );
diff --git a/src/cpu/x86/vm/x86_64.ad b/src/cpu/x86/vm/x86_64.ad
index 302994148..62b46da14 100644
--- a/src/cpu/x86/vm/x86_64.ad
+++ b/src/cpu/x86/vm/x86_64.ad
@@ -478,7 +478,7 @@ reg_class int_no_rcx_reg(RAX,
// Class for all int registers except RAX, RDX (and RSP)
reg_class int_no_rax_rdx_reg(RBP,
- RDI
+ RDI,
RSI,
RCX,
RBX,
@@ -552,7 +552,7 @@ reg_class double_reg(XMM0, XMM0_H,
// This is a block of C++ code which provides values, functions, and
// definitions necessary in the rest of the architecture description
source %{
-#define RELOC_IMM64 Assembler::imm64_operand
+#define RELOC_IMM64 Assembler::imm_operand
#define RELOC_DISP32 Assembler::disp32_operand
#define __ _masm.
@@ -962,11 +962,11 @@ void MachPrologNode::emit(CodeBuffer &cbuf, PhaseRegAlloc *ra_) const
if (VerifyStackAtCalls) {
Label L;
MacroAssembler masm(&cbuf);
- masm.pushq(rax);
- masm.movq(rax, rsp);
- masm.andq(rax, StackAlignmentInBytes-1);
- masm.cmpq(rax, StackAlignmentInBytes-wordSize);
- masm.popq(rax);
+ masm.push(rax);
+ masm.mov(rax, rsp);
+ masm.andptr(rax, StackAlignmentInBytes-1);
+ masm.cmpptr(rax, StackAlignmentInBytes-wordSize);
+ masm.pop(rax);
masm.jcc(Assembler::equal, L);
masm.stop("Stack is not properly aligned!");
masm.bind(L);
@@ -1817,6 +1817,7 @@ void emit_java_to_interp(CodeBuffer& cbuf)
__ relocate(static_stub_Relocation::spec(mark), RELOC_IMM64);
// static stub relocation also tags the methodOop in the code-stream.
__ movoop(rbx, (jobject) NULL); // method is zapped till fixup time
+ // This is recognized as unresolved by relocs/nativeinst/ic code
__ jump(RuntimeAddress(__ pc()));
// Update current stubs pointer and restore code_end.
@@ -1863,9 +1864,9 @@ void MachUEPNode::emit(CodeBuffer& cbuf, PhaseRegAlloc* ra_) const
#endif
if (UseCompressedOops) {
masm.load_klass(rscratch1, j_rarg0);
- masm.cmpq(rax, rscratch1);
+ masm.cmpptr(rax, rscratch1);
} else {
- masm.cmpq(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
+ masm.cmpptr(rax, Address(j_rarg0, oopDesc::klass_offset_in_bytes()));
}
masm.jump_cc(Assembler::notEqual, RuntimeAddress(SharedRuntime::get_ic_miss_stub()));
@@ -1949,7 +1950,7 @@ int emit_deopt_handler(CodeBuffer& cbuf)
__ call(next, relocInfo::none); // reloc none is fine since it is a disp32
__ bind(next);
// adjust it so it matches "the_pc"
- __ subq(Address(rsp, 0), __ offset() - offset);
+ __ subptr(Address(rsp, 0), __ offset() - offset);
__ jump(RuntimeAddress(SharedRuntime::deopt_blob()->unpack()));
assert(__ offset() - offset <= (int) size_deopt_handler(), "overflow");
__ end_a_stub();
@@ -2577,23 +2578,23 @@ encode %{
// Compare super with sub directly, since super is not in its own SSA.
// The compiler used to emit this test, but we fold it in here,
// to allow platform-specific tweaking on sparc.
- __ cmpq(Rrax, Rrsi);
+ __ cmpptr(Rrax, Rrsi);
__ jcc(Assembler::equal, hit);
#ifndef PRODUCT
__ lea(Rrcx, ExternalAddress((address)&SharedRuntime::_partial_subtype_ctr));
__ incrementl(Address(Rrcx, 0));
#endif //PRODUCT
- __ movq(Rrdi, Address(Rrsi,
- sizeof(oopDesc) +
+ __ movptr(Rrdi, Address(Rrsi,
+ sizeof(oopDesc) +
Klass::secondary_supers_offset_in_bytes()));
__ movl(Rrcx, Address(Rrdi, arrayOopDesc::length_offset_in_bytes()));
- __ addq(Rrdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
+ __ addptr(Rrdi, arrayOopDesc::base_offset_in_bytes(T_OBJECT));
if (UseCompressedOops) {
__ encode_heap_oop(Rrax);
__ repne_scanl();
__ jcc(Assembler::notEqual, cmiss);
__ decode_heap_oop(Rrax);
- __ movq(Address(Rrsi,
+ __ movptr(Address(Rrsi,
sizeof(oopDesc) +
Klass::secondary_super_cache_offset_in_bytes()),
Rrax);
@@ -2602,16 +2603,16 @@ encode %{
__ decode_heap_oop(Rrax);
__ jmp(miss);
} else {
- __ repne_scanq();
+ __ repne_scan();
__ jcc(Assembler::notEqual, miss);
- __ movq(Address(Rrsi,
+ __ movptr(Address(Rrsi,
sizeof(oopDesc) +
Klass::secondary_super_cache_offset_in_bytes()),
Rrax);
}
__ bind(hit);
if ($primary) {
- __ xorq(Rrdi, Rrdi);
+ __ xorptr(Rrdi, Rrdi);
}
__ bind(miss);
%}
@@ -3527,8 +3528,9 @@ encode %{
masm.atomic_incl(ExternalAddress((address) _counters->total_entry_count_addr()));
}
if (EmitSync & 1) {
- masm.movptr (Address(boxReg, 0), intptr_t(markOopDesc::unused_mark())) ;
- masm.cmpq (rsp, 0) ;
+ // Without cast to int32_t a movptr will destroy r10 which is typically obj
+ masm.movptr (Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
+ masm.cmpptr(rsp, (int32_t)NULL_WORD) ;
} else
if (EmitSync & 2) {
Label DONE_LABEL;
@@ -3536,29 +3538,30 @@ encode %{
// Note: tmpReg maps to the swap_reg argument and scrReg to the tmp_reg argument.
masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, false, DONE_LABEL, NULL, _counters);
}
- masm.movl(tmpReg, 0x1);
- masm.orq(tmpReg, Address(objReg, 0));
- masm.movq(Address(boxReg, 0), tmpReg);
+ // QQQ was movl...
+ masm.movptr(tmpReg, 0x1);
+ masm.orptr(tmpReg, Address(objReg, 0));
+ masm.movptr(Address(boxReg, 0), tmpReg);
if (os::is_MP()) {
masm.lock();
}
- masm.cmpxchgq(boxReg, Address(objReg, 0)); // Updates tmpReg
+ masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
masm.jcc(Assembler::equal, DONE_LABEL);
// Recursive locking
- masm.subq(tmpReg, rsp);
- masm.andq(tmpReg, 7 - os::vm_page_size());
- masm.movq(Address(boxReg, 0), tmpReg);
+ masm.subptr(tmpReg, rsp);
+ masm.andptr(tmpReg, 7 - os::vm_page_size());
+ masm.movptr(Address(boxReg, 0), tmpReg);
masm.bind(DONE_LABEL);
masm.nop(); // avoid branch to branch
} else {
Label DONE_LABEL, IsInflated, Egress;
- masm.movq (tmpReg, Address(objReg, 0)) ;
- masm.testq (tmpReg, 0x02) ; // inflated vs stack-locked|neutral|biased
- masm.jcc (Assembler::notZero, IsInflated) ;
-
+ masm.movptr(tmpReg, Address(objReg, 0)) ;
+ masm.testl (tmpReg, 0x02) ; // inflated vs stack-locked|neutral|biased
+ masm.jcc (Assembler::notZero, IsInflated) ;
+
// it's stack-locked, biased or neutral
// TODO: optimize markword triage order to reduce the number of
// conditional branches in the most common cases.
@@ -3568,13 +3571,14 @@ encode %{
if (UseBiasedLocking) {
masm.biased_locking_enter(boxReg, objReg, tmpReg, scrReg, true, DONE_LABEL, NULL, _counters);
- masm.movq (tmpReg, Address(objReg, 0)) ; // [FETCH]
+ masm.movptr(tmpReg, Address(objReg, 0)) ; // [FETCH]
}
- masm.orq (tmpReg, 1) ;
- masm.movq (Address(boxReg, 0), tmpReg) ;
- if (os::is_MP()) { masm.lock(); }
- masm.cmpxchgq(boxReg, Address(objReg, 0)); // Updates tmpReg
+ // was q will it destroy high?
+ masm.orl (tmpReg, 1) ;
+ masm.movptr(Address(boxReg, 0), tmpReg) ;
+ if (os::is_MP()) { masm.lock(); }
+ masm.cmpxchgptr(boxReg, Address(objReg, 0)); // Updates tmpReg
if (_counters != NULL) {
masm.cond_inc32(Assembler::equal,
ExternalAddress((address) _counters->fast_path_entry_count_addr()));
@@ -3582,9 +3586,9 @@ encode %{
masm.jcc (Assembler::equal, DONE_LABEL);
// Recursive locking
- masm.subq (tmpReg, rsp);
- masm.andq (tmpReg, 7 - os::vm_page_size());
- masm.movq (Address(boxReg, 0), tmpReg);
+ masm.subptr(tmpReg, rsp);
+ masm.andptr(tmpReg, 7 - os::vm_page_size());
+ masm.movptr(Address(boxReg, 0), tmpReg);
if (_counters != NULL) {
masm.cond_inc32(Assembler::equal,
ExternalAddress((address) _counters->fast_path_entry_count_addr()));
@@ -3599,16 +3603,17 @@ encode %{
// We should also think about trying a CAS without having
// fetched _owner. If the CAS is successful we may
// avoid an RTO->RTS upgrade on the $line.
- masm.movptr(Address(boxReg, 0), intptr_t(markOopDesc::unused_mark())) ;
+ // Without cast to int32_t a movptr will destroy r10 which is typically obj
+ masm.movptr(Address(boxReg, 0), (int32_t)intptr_t(markOopDesc::unused_mark())) ;
- masm.movq (boxReg, tmpReg) ;
- masm.movq (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
- masm.testq (tmpReg, tmpReg) ;
- masm.jcc (Assembler::notZero, DONE_LABEL) ;
+ masm.mov (boxReg, tmpReg) ;
+ masm.movptr (tmpReg, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
+ masm.testptr(tmpReg, tmpReg) ;
+ masm.jcc (Assembler::notZero, DONE_LABEL) ;
// It's inflated and appears unlocked
- if (os::is_MP()) { masm.lock(); }
- masm.cmpxchgq(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
+ if (os::is_MP()) { masm.lock(); }
+ masm.cmpxchgptr(r15_thread, Address(boxReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
// Intentional fall-through into DONE_LABEL ...
masm.bind (DONE_LABEL) ;
@@ -3627,8 +3632,8 @@ encode %{
Register tmpReg = as_Register($tmp$$reg);
MacroAssembler masm(&cbuf);
- if (EmitSync & 4) {
- masm.cmpq (rsp, 0) ;
+ if (EmitSync & 4) {
+ masm.cmpptr(rsp, 0) ;
} else
if (EmitSync & 8) {
Label DONE_LABEL;
@@ -3638,15 +3643,15 @@ encode %{
// Check whether the displaced header is 0
//(=> recursive unlock)
- masm.movq(tmpReg, Address(boxReg, 0));
- masm.testq(tmpReg, tmpReg);
+ masm.movptr(tmpReg, Address(boxReg, 0));
+ masm.testptr(tmpReg, tmpReg);
masm.jcc(Assembler::zero, DONE_LABEL);
// If not recursive lock, reset the header to displaced header
if (os::is_MP()) {
masm.lock();
}
- masm.cmpxchgq(tmpReg, Address(objReg, 0)); // Uses RAX which is box
+ masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
masm.bind(DONE_LABEL);
masm.nop(); // avoid branch to branch
} else {
@@ -3655,44 +3660,44 @@ encode %{
if (UseBiasedLocking) {
masm.biased_locking_exit(objReg, tmpReg, DONE_LABEL);
}
-
- masm.movq (tmpReg, Address(objReg, 0)) ;
- masm.cmpq (Address(boxReg, 0), (int)NULL_WORD) ;
- masm.jcc (Assembler::zero, DONE_LABEL) ;
- masm.testq (tmpReg, 0x02) ;
- masm.jcc (Assembler::zero, Stacked) ;
-
+
+ masm.movptr(tmpReg, Address(objReg, 0)) ;
+ masm.cmpptr(Address(boxReg, 0), (int32_t)NULL_WORD) ;
+ masm.jcc (Assembler::zero, DONE_LABEL) ;
+ masm.testl (tmpReg, 0x02) ;
+ masm.jcc (Assembler::zero, Stacked) ;
+
// It's inflated
- masm.movq (boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
- masm.xorq (boxReg, r15_thread) ;
- masm.orq (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
- masm.jcc (Assembler::notZero, DONE_LABEL) ;
- masm.movq (boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
- masm.orq (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
- masm.jcc (Assembler::notZero, CheckSucc) ;
- masm.mov64 (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int)NULL_WORD) ;
- masm.jmp (DONE_LABEL) ;
-
- if ((EmitSync & 65536) == 0) {
+ masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2)) ;
+ masm.xorptr(boxReg, r15_thread) ;
+ masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::recursions_offset_in_bytes()-2)) ;
+ masm.jcc (Assembler::notZero, DONE_LABEL) ;
+ masm.movptr(boxReg, Address (tmpReg, ObjectMonitor::cxq_offset_in_bytes()-2)) ;
+ masm.orptr (boxReg, Address (tmpReg, ObjectMonitor::EntryList_offset_in_bytes()-2)) ;
+ masm.jcc (Assembler::notZero, CheckSucc) ;
+ masm.movptr(Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
+ masm.jmp (DONE_LABEL) ;
+
+ if ((EmitSync & 65536) == 0) {
Label LSuccess, LGoSlowPath ;
masm.bind (CheckSucc) ;
- masm.cmpq (Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int)NULL_WORD) ;
+ masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
masm.jcc (Assembler::zero, LGoSlowPath) ;
// I'd much rather use lock:andl m->_owner, 0 as it's faster than the
// the explicit ST;MEMBAR combination, but masm doesn't currently support
// "ANDQ M,IMM". Don't use MFENCE here. lock:add to TOS, xchg, etc
// are all faster when the write buffer is populated.
- masm.movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int)NULL_WORD) ;
+ masm.movptr (Address (tmpReg, ObjectMonitor::owner_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
if (os::is_MP()) {
- masm.lock () ; masm.addq (Address(rsp, 0), 0) ;
+ masm.lock () ; masm.addl (Address(rsp, 0), 0) ;
}
- masm.cmpq (Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int)NULL_WORD) ;
+ masm.cmpptr(Address (tmpReg, ObjectMonitor::succ_offset_in_bytes()-2), (int32_t)NULL_WORD) ;
masm.jcc (Assembler::notZero, LSuccess) ;
- masm.movptr (boxReg, (int)NULL_WORD) ; // box is really EAX
+ masm.movptr (boxReg, (int32_t)NULL_WORD) ; // box is really EAX
if (os::is_MP()) { masm.lock(); }
- masm.cmpxchgq (r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
+ masm.cmpxchgptr(r15_thread, Address(tmpReg, ObjectMonitor::owner_offset_in_bytes()-2));
masm.jcc (Assembler::notEqual, LSuccess) ;
// Intentional fall-through into slow-path
@@ -3705,10 +3710,10 @@ encode %{
masm.jmp (DONE_LABEL) ;
}
- masm.bind (Stacked) ;
- masm.movq (tmpReg, Address (boxReg, 0)) ; // re-fetch
- if (os::is_MP()) { masm.lock(); }
- masm.cmpxchgq(tmpReg, Address(objReg, 0)); // Uses RAX which is box
+ masm.bind (Stacked) ;
+ masm.movptr(tmpReg, Address (boxReg, 0)) ; // re-fetch
+ if (os::is_MP()) { masm.lock(); }
+ masm.cmpxchgptr(tmpReg, Address(objReg, 0)); // Uses RAX which is box
if (EmitSync & 65536) {
masm.bind (CheckSucc) ;
@@ -3736,10 +3741,10 @@ encode %{
masm.load_heap_oop(rax, Address(rsi, value_offset));
masm.movl(rcx, Address(rsi, offset_offset));
- masm.leaq(rax, Address(rax, rcx, Address::times_2, base_offset));
+ masm.lea(rax, Address(rax, rcx, Address::times_2, base_offset));
masm.load_heap_oop(rbx, Address(rdi, value_offset));
masm.movl(rcx, Address(rdi, offset_offset));
- masm.leaq(rbx, Address(rbx, rcx, Address::times_2, base_offset));
+ masm.lea(rbx, Address(rbx, rcx, Address::times_2, base_offset));
// Compute the minimum of the string lengths(rsi) and the
// difference of the string lengths (stack)
@@ -3748,8 +3753,8 @@ encode %{
masm.movl(rsi, Address(rsi, count_offset));
masm.movl(rcx, rdi);
masm.subl(rdi, rsi);
- masm.pushq(rdi);
- masm.cmovl(Assembler::lessEqual, rsi, rcx);
+ masm.push(rdi);
+ masm.cmov(Assembler::lessEqual, rsi, rcx);
// Is the minimum length zero?
masm.bind(RCX_GOOD_LABEL);
@@ -3770,7 +3775,7 @@ encode %{
// Check after comparing first character to see if strings are equivalent
Label LSkip2;
// Check if the strings start at same location
- masm.cmpq(rbx, rax);
+ masm.cmpptr(rbx, rax);
masm.jcc(Assembler::notEqual, LSkip2);
// Check if the length difference is zero (from stack)
@@ -3782,9 +3787,9 @@ encode %{
}
// Shift RAX and RBX to the end of the arrays, negate min
- masm.leaq(rax, Address(rax, rsi, Address::times_2, 2));
- masm.leaq(rbx, Address(rbx, rsi, Address::times_2, 2));
- masm.negq(rsi);
+ masm.lea(rax, Address(rax, rsi, Address::times_2, 2));
+ masm.lea(rbx, Address(rbx, rsi, Address::times_2, 2));
+ masm.negptr(rsi);
// Compare the rest of the characters
masm.bind(WHILE_HEAD_LABEL);
@@ -3792,18 +3797,18 @@ encode %{
masm.load_unsigned_word(rdi, Address(rax, rsi, Address::times_2, 0));
masm.subl(rcx, rdi);
masm.jcc(Assembler::notZero, POP_LABEL);
- masm.incrementq(rsi);
+ masm.increment(rsi);
masm.jcc(Assembler::notZero, WHILE_HEAD_LABEL);
// Strings are equal up to min length. Return the length difference.
masm.bind(LENGTH_DIFF_LABEL);
- masm.popq(rcx);
+ masm.pop(rcx);
masm.jmp(DONE_LABEL);
// Discard the stored length difference
masm.bind(POP_LABEL);
- masm.addq(rsp, 8);
-
+ masm.addptr(rsp, 8);
+
// That's it
masm.bind(DONE_LABEL);
%}
@@ -3893,7 +3898,7 @@ encode %{
enc_class absF_encoding(regF dst)
%{
int dstenc = $dst$$reg;
- address signmask_address = (address) StubRoutines::amd64::float_sign_mask();
+ address signmask_address = (address) StubRoutines::x86::float_sign_mask();
cbuf.set_inst_mark();
if (dstenc >= 8) {
@@ -3910,7 +3915,7 @@ encode %{
enc_class absD_encoding(regD dst)
%{
int dstenc = $dst$$reg;
- address signmask_address = (address) StubRoutines::amd64::double_sign_mask();
+ address signmask_address = (address) StubRoutines::x86::double_sign_mask();
cbuf.set_inst_mark();
emit_opcode(cbuf, 0x66);
@@ -3928,7 +3933,7 @@ encode %{
enc_class negF_encoding(regF dst)
%{
int dstenc = $dst$$reg;
- address signflip_address = (address) StubRoutines::amd64::float_sign_flip();
+ address signflip_address = (address) StubRoutines::x86::float_sign_flip();
cbuf.set_inst_mark();
if (dstenc >= 8) {
@@ -3945,7 +3950,7 @@ encode %{
enc_class negD_encoding(regD dst)
%{
int dstenc = $dst$$reg;
- address signflip_address = (address) StubRoutines::amd64::double_sign_flip();
+ address signflip_address = (address) StubRoutines::x86::double_sign_flip();
cbuf.set_inst_mark();
emit_opcode(cbuf, 0x66);
@@ -4003,7 +4008,7 @@ encode %{
emit_opcode(cbuf, 0xE8);
emit_d32_reloc(cbuf,
(int)
- (StubRoutines::amd64::f2i_fixup() - cbuf.code_end() - 4),
+ (StubRoutines::x86::f2i_fixup() - cbuf.code_end() - 4),
runtime_call_Relocation::spec(),
RELOC_DISP32);
@@ -4020,7 +4025,7 @@ encode %{
%{
int dstenc = $dst$$reg;
int srcenc = $src$$reg;
- address const_address = (address) StubRoutines::amd64::double_sign_flip();
+ address const_address = (address) StubRoutines::x86::double_sign_flip();
// cmpq $dst, [0x8000000000000000]
cbuf.set_inst_mark();
@@ -4061,7 +4066,7 @@ encode %{
emit_opcode(cbuf, 0xE8);
emit_d32_reloc(cbuf,
(int)
- (StubRoutines::amd64::f2l_fixup() - cbuf.code_end() - 4),
+ (StubRoutines::x86::f2l_fixup() - cbuf.code_end() - 4),
runtime_call_Relocation::spec(),
RELOC_DISP32);
@@ -4117,7 +4122,7 @@ encode %{
emit_opcode(cbuf, 0xE8);
emit_d32_reloc(cbuf,
(int)
- (StubRoutines::amd64::d2i_fixup() - cbuf.code_end() - 4),
+ (StubRoutines::x86::d2i_fixup() - cbuf.code_end() - 4),
runtime_call_Relocation::spec(),
RELOC_DISP32);
@@ -4134,7 +4139,7 @@ encode %{
%{
int dstenc = $dst$$reg;
int srcenc = $src$$reg;
- address const_address = (address) StubRoutines::amd64::double_sign_flip();
+ address const_address = (address) StubRoutines::x86::double_sign_flip();
// cmpq $dst, [0x8000000000000000]
cbuf.set_inst_mark();
@@ -4175,7 +4180,7 @@ encode %{
emit_opcode(cbuf, 0xE8);
emit_d32_reloc(cbuf,
(int)
- (StubRoutines::amd64::d2l_fixup() - cbuf.code_end() - 4),
+ (StubRoutines::x86::d2l_fixup() - cbuf.code_end() - 4),
runtime_call_Relocation::spec(),
RELOC_DISP32);
diff --git a/src/os_cpu/linux_x86/vm/assembler_linux_x86_32.cpp b/src/os_cpu/linux_x86/vm/assembler_linux_x86.cpp
index 1854b0075..4211d3611 100644
--- a/src/os_cpu/linux_x86/vm/assembler_linux_x86_32.cpp
+++ b/src/os_cpu/linux_x86/vm/assembler_linux_x86.cpp
@@ -23,8 +23,9 @@
*/
#include "incls/_precompiled.incl"
-#include "incls/_assembler_linux_x86_32.cpp.incl"
+#include "incls/_assembler_linux_x86.cpp.incl"
+#ifndef _LP64
void MacroAssembler::int3() {
call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
}
@@ -39,3 +40,45 @@ void MacroAssembler::get_thread(Register thread) {
movptr(thread, tls);
}
+#else
+void MacroAssembler::int3() {
+ call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
+}
+
+void MacroAssembler::get_thread(Register thread) {
+ // call pthread_getspecific
+ // void * pthread_getspecific(pthread_key_t key);
+ if (thread != rax) {
+ push(rax);
+ }
+ push(rdi);
+ push(rsi);
+ push(rdx);
+ push(rcx);
+ push(r8);
+ push(r9);
+ push(r10);
+ // XXX
+ mov(r10, rsp);
+ andq(rsp, -16);
+ push(r10);
+ push(r11);
+
+ movl(rdi, ThreadLocalStorage::thread_index());
+ call(RuntimeAddress(CAST_FROM_FN_PTR(address, pthread_getspecific)));
+
+ pop(r11);
+ pop(rsp);
+ pop(r10);
+ pop(r9);
+ pop(r8);
+ pop(rcx);
+ pop(rdx);
+ pop(rsi);
+ pop(rdi);
+ if (thread != rax) {
+ mov(thread, rax);
+ pop(rax);
+ }
+}
+#endif
diff --git a/src/os_cpu/linux_x86/vm/assembler_linux_x86_64.cpp b/src/os_cpu/linux_x86/vm/assembler_linux_x86_64.cpp
deleted file mode 100644
index 24a4dce09..000000000
--- a/src/os_cpu/linux_x86/vm/assembler_linux_x86_64.cpp
+++ /dev/null
@@ -1,67 +0,0 @@
-/*
- * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- */
-
-#include "incls/_precompiled.incl"
-#include "incls/_assembler_linux_x86_64.cpp.incl"
-
-void MacroAssembler::int3() {
- call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-}
-
-void MacroAssembler::get_thread(Register thread) {
- // call pthread_getspecific
- // void * pthread_getspecific(pthread_key_t key);
- if (thread != rax) {
- pushq(rax);
- }
- pushq(rdi);
- pushq(rsi);
- pushq(rdx);
- pushq(rcx);
- pushq(r8);
- pushq(r9);
- pushq(r10);
- // XXX
- movq(r10, rsp);
- andq(rsp, -16);
- pushq(r10);
- pushq(r11);
-
- movl(rdi, ThreadLocalStorage::thread_index());
- call(RuntimeAddress(CAST_FROM_FN_PTR(address, pthread_getspecific)));
-
- popq(r11);
- popq(rsp);
- popq(r10);
- popq(r9);
- popq(r8);
- popq(rcx);
- popq(rdx);
- popq(rsi);
- popq(rdi);
- if (thread != rax) {
- movq(thread, rax);
- popq(rax);
- }
-}
diff --git a/src/os_cpu/solaris_x86/vm/assembler_solaris_x86_32.cpp b/src/os_cpu/solaris_x86/vm/assembler_solaris_x86.cpp
index bce611c11..9fd017b41 100644
--- a/src/os_cpu/solaris_x86/vm/assembler_solaris_x86_32.cpp
+++ b/src/os_cpu/solaris_x86/vm/assembler_solaris_x86.cpp
@@ -23,59 +23,111 @@
*/
#include "incls/_precompiled.incl"
-#include "incls/_assembler_solaris_x86_32.cpp.incl"
+#include "incls/_assembler_solaris_x86.cpp.incl"
void MacroAssembler::int3() {
- pushl(rax);
- pushl(rdx);
- pushl(rcx);
+ push(rax);
+ push(rdx);
+ push(rcx);
call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
- popl(rcx);
- popl(rdx);
- popl(rax);
+ pop(rcx);
+ pop(rdx);
+ pop(rax);
}
+#define __ _masm->
+#ifndef _LP64
+static void slow_call_thr_specific(MacroAssembler* _masm, Register thread) {
+
+ // slow call to of thr_getspecific
+ // int thr_getspecific(thread_key_t key, void **value);
+ // Consider using pthread_getspecific instead.
+
+__ push(0); // allocate space for return value
+ if (thread != rax) __ push(rax); // save rax, if caller still wants it
+__ push(rcx); // save caller save
+__ push(rdx); // save caller save
+ if (thread != rax) {
+__ lea(thread, Address(rsp, 3 * sizeof(int))); // address of return value
+ } else {
+__ lea(thread, Address(rsp, 2 * sizeof(int))); // address of return value
+ }
+__ push(thread); // and pass the address
+__ push(ThreadLocalStorage::thread_index()); // the key
+__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, thr_getspecific)));
+__ increment(rsp, 2 * wordSize);
+__ pop(rdx);
+__ pop(rcx);
+ if (thread != rax) __ pop(rax);
+__ pop(thread);
+
+}
+#else
+static void slow_call_thr_specific(MacroAssembler* _masm, Register thread) {
+ // slow call to of thr_getspecific
+ // int thr_getspecific(thread_key_t key, void **value);
+ // Consider using pthread_getspecific instead.
+
+ if (thread != rax) {
+__ push(rax);
+ }
+__ push(0); // space for return value
+__ push(rdi);
+__ push(rsi);
+__ lea(rsi, Address(rsp, 16)); // pass return value address
+__ push(rdx);
+__ push(rcx);
+__ push(r8);
+__ push(r9);
+__ push(r10);
+ // XXX
+__ mov(r10, rsp);
+__ andptr(rsp, -16);
+__ push(r10);
+__ push(r11);
+
+__ movl(rdi, ThreadLocalStorage::thread_index());
+__ call(RuntimeAddress(CAST_FROM_FN_PTR(address, thr_getspecific)));
+
+__ pop(r11);
+__ pop(rsp);
+__ pop(r10);
+__ pop(r9);
+__ pop(r8);
+__ pop(rcx);
+__ pop(rdx);
+__ pop(rsi);
+__ pop(rdi);
+__ pop(thread); // load return value
+ if (thread != rax) {
+__ pop(rax);
+ }
+}
+#endif //LP64
+
void MacroAssembler::get_thread(Register thread) {
+ int segment = NOT_LP64(Assembler::GS_segment) LP64_ONLY(Assembler::FS_segment);
// Try to emit a Solaris-specific fast TSD/TLS accessor.
- ThreadLocalStorage::pd_tlsAccessMode tlsMode = ThreadLocalStorage::pd_getTlsAccessMode () ;
+ ThreadLocalStorage::pd_tlsAccessMode tlsMode = ThreadLocalStorage::pd_getTlsAccessMode ();
if (tlsMode == ThreadLocalStorage::pd_tlsAccessIndirect) { // T1
// Use thread as a temporary: mov r, gs:[0]; mov r, [r+tlsOffset]
- emit_byte (Assembler::GS_segment) ;
+ emit_byte (segment);
// ExternalAddress doesn't work because it can't take NULL
AddressLiteral null(0, relocInfo::none);
movptr (thread, null);
- movl (thread, Address(thread, ThreadLocalStorage::pd_getTlsOffset())) ;
+ movptr(thread, Address(thread, ThreadLocalStorage::pd_getTlsOffset())) ;
return ;
} else
if (tlsMode == ThreadLocalStorage::pd_tlsAccessDirect) { // T2
// mov r, gs:[tlsOffset]
- emit_byte (Assembler::GS_segment) ;
- AddressLiteral tls((address)ThreadLocalStorage::pd_getTlsOffset(), relocInfo::none);
- movptr (thread, tls);
+ emit_byte (segment);
+ AddressLiteral tls_off((address)ThreadLocalStorage::pd_getTlsOffset(), relocInfo::none);
+ movptr (thread, tls_off);
return ;
}
- // slow call to of thr_getspecific
- // int thr_getspecific(thread_key_t key, void **value);
- // Consider using pthread_getspecific instead.
+ slow_call_thr_specific(this, thread);
- pushl(0); // allocate space for return value
- if (thread != rax) pushl(rax); // save rax, if caller still wants it
- pushl(rcx); // save caller save
- pushl(rdx); // save caller save
- if (thread != rax) {
- leal(thread, Address(rsp, 3 * sizeof(int))); // address of return value
- } else {
- leal(thread, Address(rsp, 2 * sizeof(int))); // address of return value
- }
- pushl(thread); // and pass the address
- pushl(ThreadLocalStorage::thread_index()); // the key
- call(RuntimeAddress(CAST_FROM_FN_PTR(address, thr_getspecific)));
- increment(rsp, 2 * wordSize);
- popl(rdx);
- popl(rcx);
- if (thread != rax) popl(rax);
- popl(thread);
}
diff --git a/src/os_cpu/solaris_x86/vm/assembler_solaris_x86_64.cpp b/src/os_cpu/solaris_x86/vm/assembler_solaris_x86_64.cpp
deleted file mode 100644
index 2ccae8a68..000000000
--- a/src/os_cpu/solaris_x86/vm/assembler_solaris_x86_64.cpp
+++ /dev/null
@@ -1,87 +0,0 @@
-/*
- * Copyright 2004-2008 Sun Microsystems, Inc. All Rights Reserved.
- * DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
- *
- * This code is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License version 2 only, as
- * published by the Free Software Foundation.
- *
- * This code is distributed in the hope that it will be useful, but WITHOUT
- * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
- * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License
- * version 2 for more details (a copy is included in the LICENSE file that
- * accompanied this code).
- *
- * You should have received a copy of the GNU General Public License version
- * 2 along with this work; if not, write to the Free Software Foundation,
- * Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA.
- *
- * Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
- * CA 95054 USA or visit www.sun.com if you need additional information or
- * have any questions.
- *
- */
-
-#include "incls/_precompiled.incl"
-#include "incls/_assembler_solaris_x86_64.cpp.incl"
-
-void MacroAssembler::int3() {
- call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
-}
-
-void MacroAssembler::get_thread(Register thread) {
- // Try to emit a Solaris-specific fast TSD/TLS accessor.
- ThreadLocalStorage::pd_tlsAccessMode tlsMode = ThreadLocalStorage::pd_getTlsAccessMode();
- if (tlsMode == ThreadLocalStorage::pd_tlsAccessIndirect) { // T1
- // Use thread as a temporary: mov r, fs:[0]; mov r, [r+tlsOffset]
- emit_byte(Assembler::FS_segment);
- movq(thread, Address(NULL, relocInfo::none));
- movq(thread, Address(thread, ThreadLocalStorage::pd_getTlsOffset()));
- return;
- } else if (tlsMode == ThreadLocalStorage::pd_tlsAccessDirect) { // T2
- // mov r, fs:[tlsOffset]
- emit_byte(Assembler::FS_segment);
- ExternalAddress tls_off((address) ThreadLocalStorage::pd_getTlsOffset());
- movptr(thread, tls_off);
- return;
- }
-
- // slow call to of thr_getspecific
- // int thr_getspecific(thread_key_t key, void **value);
- // Consider using pthread_getspecific instead.
-
- if (thread != rax) {
- pushq(rax);
- }
- pushq(0); // space for return value
- pushq(rdi);
- pushq(rsi);
- leaq(rsi, Address(rsp, 16)); // pass return value address
- pushq(rdx);
- pushq(rcx);
- pushq(r8);
- pushq(r9);
- pushq(r10);
- // XXX
- movq(r10, rsp);
- andq(rsp, -16);
- pushq(r10);
- pushq(r11);
-
- movl(rdi, ThreadLocalStorage::thread_index());
- call(RuntimeAddress(CAST_FROM_FN_PTR(address, thr_getspecific)));
-
- popq(r11);
- popq(rsp);
- popq(r10);
- popq(r9);
- popq(r8);
- popq(rcx);
- popq(rdx);
- popq(rsi);
- popq(rdi);
- popq(thread); // load return value
- if (thread != rax) {
- popq(rax);
- }
-}
diff --git a/src/os_cpu/solaris_x86/vm/solaris_x86_32.ad b/src/os_cpu/solaris_x86/vm/solaris_x86_32.ad
index 0409fb55d..549f7b048 100644
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_32.ad
+++ b/src/os_cpu/solaris_x86/vm/solaris_x86_32.ad
@@ -62,13 +62,13 @@ encode %{
enc_class solaris_breakpoint %{
MacroAssembler* masm = new MacroAssembler(&cbuf);
// Really need to fix this
- masm->pushl(rax);
- masm->pushl(rcx);
- masm->pushl(rdx);
+ masm->push(rax);
+ masm->push(rcx);
+ masm->push(rdx);
masm->call(RuntimeAddress(CAST_FROM_FN_PTR(address, os::breakpoint)));
- masm->popl(rdx);
- masm->popl(rcx);
- masm->popl(rax);
+ masm->pop(rdx);
+ masm->pop(rcx);
+ masm->pop(rax);
%}
enc_class call_epilog %{
diff --git a/src/os_cpu/windows_x86/vm/assembler_windows_x86_32.cpp b/src/os_cpu/windows_x86/vm/assembler_windows_x86.cpp
index 5e91ce654..4d4642143 100644
--- a/src/os_cpu/windows_x86/vm/assembler_windows_x86_32.cpp
+++ b/src/os_cpu/windows_x86/vm/assembler_windows_x86.cpp
@@ -23,13 +23,14 @@
*/
#include "incls/_precompiled.incl"
-#include "incls/_assembler_windows_x86_32.cpp.incl"
+#include "incls/_assembler_windows_x86.cpp.incl"
void MacroAssembler::int3() {
emit_byte(0xCC);
}
+#ifndef _LP64
// The current scheme to accelerate access to the thread
// pointer is to store the current thread in the os_exception_wrapper
// and reference the current thread from stubs and compiled code
@@ -58,3 +59,40 @@ void MacroAssembler::get_thread(Register thread) {
"Thread Pointer Offset has not been initialized");
movl(thread, Address(thread, ThreadLocalStorage::get_thread_ptr_offset()));
}
+#else
+// call (Thread*)TlsGetValue(thread_index());
+void MacroAssembler::get_thread(Register thread) {
+ if (thread != rax) {
+ push(rax);
+ }
+ push(rdi);
+ push(rsi);
+ push(rdx);
+ push(rcx);
+ push(r8);
+ push(r9);
+ push(r10);
+ // XXX
+ mov(r10, rsp);
+ andq(rsp, -16);
+ push(r10);
+ push(r11);
+
+ movl(c_rarg0, ThreadLocalStorage::thread_index());
+ call(RuntimeAddress((address)TlsGetValue));
+
+ pop(r11);
+ pop(rsp);
+ pop(r10);
+ pop(r9);
+ pop(r8);
+ pop(rcx);
+ pop(rdx);
+ pop(rsi);
+ pop(rdi);
+ if (thread != rax) {
+ mov(thread, rax);
+ pop(rax);
+ }
+}
+#endif
diff --git a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp
index a25040459..ff8cdd6ff 100644
--- a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp
+++ b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp
@@ -369,7 +369,7 @@ frame os::current_frame() {
// apparently _asm not supported on windows amd64
typedef intptr_t* get_fp_func ();
get_fp_func* func = CAST_TO_FN_PTR(get_fp_func*,
- StubRoutines::amd64::get_previous_fp_entry());
+ StubRoutines::x86::get_previous_fp_entry());
if (func == NULL) return frame(NULL, NULL, NULL);
intptr_t* fp = (*func)();
#else
diff --git a/src/share/vm/adlc/output_h.cpp b/src/share/vm/adlc/output_h.cpp
index 78f160783..04cddcd28 100644
--- a/src/share/vm/adlc/output_h.cpp
+++ b/src/share/vm/adlc/output_h.cpp
@@ -1848,6 +1848,19 @@ void ArchDesc::declareClasses(FILE *fp) {
fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveP\n",
offset, offset+1, offset+1);
}
+ else if( instr->_matrule && instr->_matrule->_rChild && !strcmp(instr->_matrule->_rChild->_opType,"CMoveN") ) {
+ int offset = 1;
+ // Special special hack to see if the Cmp? has been incorporated in the conditional move
+ MatchNode *rl = instr->_matrule->_rChild->_lChild;
+ if( rl && !strcmp(rl->_opType, "Binary") ) {
+ MatchNode *rlr = rl->_rChild;
+ if (rlr && strncmp(rlr->_opType, "Cmp", 3) == 0)
+ offset = 2;
+ }
+ // Special hack for ideal CMoveN; ideal type depends on inputs
+ fprintf(fp," const Type *bottom_type() const { const Type *t = in(oper_input_base()+%d)->bottom_type(); return (req() <= oper_input_base()+%d) ? t : t->meet(in(oper_input_base()+%d)->bottom_type()); } // CMoveN\n",
+ offset, offset+1, offset+1);
+ }
else if( instr->needs_base_oop_edge(_globalNames) ) {
// Special hack for ideal AddP. Bottom type is an oop IFF it has a
// legal base-pointer input. Otherwise it is NOT an oop.
diff --git a/src/share/vm/c1/c1_FrameMap.cpp b/src/share/vm/c1/c1_FrameMap.cpp
index 31e29bed0..465b6d133 100644
--- a/src/share/vm/c1/c1_FrameMap.cpp
+++ b/src/share/vm/c1/c1_FrameMap.cpp
@@ -278,7 +278,7 @@ ByteSize FrameMap::sp_offset_for_spill(const int index) const {
ByteSize FrameMap::sp_offset_for_monitor_base(const int index) const {
int end_of_spills = round_to(first_available_sp_in_frame + _reserved_argument_area_size, sizeof(double)) +
_num_spills * spill_slot_size_in_bytes;
- int offset = round_to(end_of_spills, HeapWordSize) + index * sizeof(BasicObjectLock);
+ int offset = (int) round_to(end_of_spills, HeapWordSize) + index * sizeof(BasicObjectLock);
return in_ByteSize(offset);
}
diff --git a/src/share/vm/c1/c1_LIR.cpp b/src/share/vm/c1/c1_LIR.cpp
index 7335abb07..0c0f1eda6 100644
--- a/src/share/vm/c1/c1_LIR.cpp
+++ b/src/share/vm/c1/c1_LIR.cpp
@@ -37,7 +37,7 @@ Register LIR_OprDesc::as_register_hi() const {
return FrameMap::cpu_rnr2reg(cpu_regnrHi());
}
-#ifdef IA32
+#if defined(X86)
XMMRegister LIR_OprDesc::as_xmm_float_reg() const {
return FrameMap::nr2xmmreg(xmm_regnr());
@@ -48,7 +48,7 @@ XMMRegister LIR_OprDesc::as_xmm_double_reg() const {
return FrameMap::nr2xmmreg(xmm_regnrLo());
}
-#endif
+#endif // X86
#ifdef SPARC
@@ -81,7 +81,7 @@ LIR_Opr LIR_OprFact::value_type(ValueType* type) {
case floatTag : return LIR_OprFact::floatConst(type->as_FloatConstant()->value());
case longTag : return LIR_OprFact::longConst(type->as_LongConstant()->value());
case doubleTag : return LIR_OprFact::doubleConst(type->as_DoubleConstant()->value());
- default: ShouldNotReachHere();
+ default: ShouldNotReachHere(); return LIR_OprFact::intConst(-1);
}
}
@@ -94,7 +94,7 @@ LIR_Opr LIR_OprFact::dummy_value_type(ValueType* type) {
case floatTag: return LIR_OprFact::floatConst(0.0);
case longTag: return LIR_OprFact::longConst(0);
case doubleTag: return LIR_OprFact::doubleConst(0.0);
- default: ShouldNotReachHere();
+ default: ShouldNotReachHere(); return LIR_OprFact::intConst(-1);
}
return illegalOpr;
}
@@ -162,6 +162,7 @@ char LIR_OprDesc::type_char(BasicType t) {
default:
ShouldNotReachHere();
+ return '?';
}
}
@@ -1374,7 +1375,7 @@ void LIR_OprDesc::print(outputStream* out) const {
} else if (is_double_cpu()) {
out->print(as_register_hi()->name());
out->print(as_register_lo()->name());
-#ifdef IA32
+#if defined(X86)
} else if (is_single_xmm()) {
out->print(as_xmm_float_reg()->name());
} else if (is_double_xmm()) {
diff --git a/src/share/vm/c1/c1_LIR.hpp b/src/share/vm/c1/c1_LIR.hpp
index 6612604ca..15a095d44 100644
--- a/src/share/vm/c1/c1_LIR.hpp
+++ b/src/share/vm/c1/c1_LIR.hpp
@@ -135,6 +135,13 @@ class LIR_Const: public LIR_OprPtr {
return as_jint_hi();
}
}
+ jlong as_jlong_bits() const {
+ if (type() == T_DOUBLE) {
+ return jlong_cast(_value.get_jdouble());
+ } else {
+ return as_jlong();
+ }
+ }
virtual void print_value_on(outputStream* out) const PRODUCT_RETURN;
@@ -302,6 +309,7 @@ class LIR_OprDesc: public CompilationResourceObj {
default:
ShouldNotReachHere();
+ return single_size;
}
}
@@ -417,12 +425,12 @@ class LIR_OprDesc: public CompilationResourceObj {
return as_register();
}
-#ifdef IA32
+#ifdef X86
XMMRegister as_xmm_float_reg() const;
XMMRegister as_xmm_double_reg() const;
// for compatibility with RInfo
int fpu () const { return lo_reg_half(); }
-#endif
+#endif // X86
#ifdef SPARC
FloatRegister as_float_reg () const;
@@ -503,14 +511,14 @@ class LIR_Address: public LIR_OprPtr {
, _type(type)
, _disp(disp) { verify(); }
-#ifdef IA32
+#ifdef X86
LIR_Address(LIR_Opr base, LIR_Opr index, Scale scale, int disp, BasicType type):
_base(base)
, _index(index)
, _scale(scale)
, _type(type)
, _disp(disp) { verify(); }
-#endif
+#endif // X86
LIR_Opr base() const { return _base; }
LIR_Opr index() const { return _index; }
@@ -535,31 +543,93 @@ class LIR_OprFact: public AllStatic {
static LIR_Opr illegalOpr;
- static LIR_Opr single_cpu(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::int_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
- static LIR_Opr single_cpu_oop(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::object_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
- static LIR_Opr double_cpu(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::long_type | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size); }
+ static LIR_Opr single_cpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::int_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
+ static LIR_Opr single_cpu_oop(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::object_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size); }
+ static LIR_Opr double_cpu(int reg1, int reg2) {
+ LP64_ONLY(assert(reg1 == reg2, "must be identical"));
+ return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
+ (reg2 << LIR_OprDesc::reg2_shift) |
+ LIR_OprDesc::long_type |
+ LIR_OprDesc::cpu_register |
+ LIR_OprDesc::double_size);
+ }
- static LIR_Opr single_fpu(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::float_type | LIR_OprDesc::fpu_register | LIR_OprDesc::single_size); }
+ static LIR_Opr single_fpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
+ LIR_OprDesc::float_type |
+ LIR_OprDesc::fpu_register |
+ LIR_OprDesc::single_size); }
#ifdef SPARC
- static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)((reg1 << LIR_OprDesc::reg1_shift) | (reg2 << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); }
-#endif
-#ifdef IA32
- static LIR_Opr double_fpu(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | (reg << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size); }
- static LIR_Opr single_xmm(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | LIR_OprDesc::float_type | LIR_OprDesc::fpu_register | LIR_OprDesc::single_size | LIR_OprDesc::is_xmm_mask); }
- static LIR_Opr double_xmm(int reg) { return (LIR_Opr)((reg << LIR_OprDesc::reg1_shift) | (reg << LIR_OprDesc::reg2_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size | LIR_OprDesc::is_xmm_mask); }
+ static LIR_Opr double_fpu(int reg1, int reg2) { return (LIR_Opr)(intptr_t)((reg1 << LIR_OprDesc::reg1_shift) |
+ (reg2 << LIR_OprDesc::reg2_shift) |
+ LIR_OprDesc::double_type |
+ LIR_OprDesc::fpu_register |
+ LIR_OprDesc::double_size); }
#endif
+#ifdef X86
+ static LIR_Opr double_fpu(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
+ (reg << LIR_OprDesc::reg2_shift) |
+ LIR_OprDesc::double_type |
+ LIR_OprDesc::fpu_register |
+ LIR_OprDesc::double_size); }
+
+ static LIR_Opr single_xmm(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
+ LIR_OprDesc::float_type |
+ LIR_OprDesc::fpu_register |
+ LIR_OprDesc::single_size |
+ LIR_OprDesc::is_xmm_mask); }
+ static LIR_Opr double_xmm(int reg) { return (LIR_Opr)(intptr_t)((reg << LIR_OprDesc::reg1_shift) |
+ (reg << LIR_OprDesc::reg2_shift) |
+ LIR_OprDesc::double_type |
+ LIR_OprDesc::fpu_register |
+ LIR_OprDesc::double_size |
+ LIR_OprDesc::is_xmm_mask); }
+#endif // X86
static LIR_Opr virtual_register(int index, BasicType type) {
LIR_Opr res;
switch (type) {
case T_OBJECT: // fall through
- case T_ARRAY: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::object_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size | LIR_OprDesc::virtual_mask); break;
- case T_INT: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::int_type | LIR_OprDesc::cpu_register | LIR_OprDesc::single_size | LIR_OprDesc::virtual_mask); break;
- case T_LONG: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::long_type | LIR_OprDesc::cpu_register | LIR_OprDesc::double_size | LIR_OprDesc::virtual_mask); break;
- case T_FLOAT: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::float_type | LIR_OprDesc::fpu_register | LIR_OprDesc::single_size | LIR_OprDesc::virtual_mask); break;
- case T_DOUBLE: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::double_type | LIR_OprDesc::fpu_register | LIR_OprDesc::double_size | LIR_OprDesc::virtual_mask); break;
+ case T_ARRAY:
+ res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
+ LIR_OprDesc::object_type |
+ LIR_OprDesc::cpu_register |
+ LIR_OprDesc::single_size |
+ LIR_OprDesc::virtual_mask);
+ break;
+
+ case T_INT:
+ res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
+ LIR_OprDesc::int_type |
+ LIR_OprDesc::cpu_register |
+ LIR_OprDesc::single_size |
+ LIR_OprDesc::virtual_mask);
+ break;
+
+ case T_LONG:
+ res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
+ LIR_OprDesc::long_type |
+ LIR_OprDesc::cpu_register |
+ LIR_OprDesc::double_size |
+ LIR_OprDesc::virtual_mask);
+ break;
+
+ case T_FLOAT:
+ res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
+ LIR_OprDesc::float_type |
+ LIR_OprDesc::fpu_register |
+ LIR_OprDesc::single_size |
+ LIR_OprDesc::virtual_mask);
+ break;
+
+ case
+ T_DOUBLE: res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
+ LIR_OprDesc::double_type |
+ LIR_OprDesc::fpu_register |
+ LIR_OprDesc::double_size |
+ LIR_OprDesc::virtual_mask);
+ break;
default: ShouldNotReachHere(); res = illegalOpr;
}
@@ -572,8 +642,8 @@ class LIR_OprFact: public AllStatic {
// old-style calculation; check if old and new method are equal
LIR_OprDesc::OprType t = as_OprType(type);
- LIR_Opr old_res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | t |
- ((type == T_FLOAT || type == T_DOUBLE) ? LIR_OprDesc::fpu_register : LIR_OprDesc::cpu_register) |
+ LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) | t |
+ ((type == T_FLOAT || type == T_DOUBLE) ? LIR_OprDesc::fpu_register : LIR_OprDesc::cpu_register) |
LIR_OprDesc::size_for(type) | LIR_OprDesc::virtual_mask);
assert(res == old_res, "old and new method not equal");
#endif
@@ -588,11 +658,39 @@ class LIR_OprFact: public AllStatic {
LIR_Opr res;
switch (type) {
case T_OBJECT: // fall through
- case T_ARRAY: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::object_type | LIR_OprDesc::stack_value | LIR_OprDesc::single_size); break;
- case T_INT: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::int_type | LIR_OprDesc::stack_value | LIR_OprDesc::single_size); break;
- case T_LONG: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::long_type | LIR_OprDesc::stack_value | LIR_OprDesc::double_size); break;
- case T_FLOAT: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::float_type | LIR_OprDesc::stack_value | LIR_OprDesc::single_size); break;
- case T_DOUBLE: res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::double_type | LIR_OprDesc::stack_value | LIR_OprDesc::double_size); break;
+ case T_ARRAY:
+ res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
+ LIR_OprDesc::object_type |
+ LIR_OprDesc::stack_value |
+ LIR_OprDesc::single_size);
+ break;
+
+ case T_INT:
+ res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
+ LIR_OprDesc::int_type |
+ LIR_OprDesc::stack_value |
+ LIR_OprDesc::single_size);
+ break;
+
+ case T_LONG:
+ res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
+ LIR_OprDesc::long_type |
+ LIR_OprDesc::stack_value |
+ LIR_OprDesc::double_size);
+ break;
+
+ case T_FLOAT:
+ res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
+ LIR_OprDesc::float_type |
+ LIR_OprDesc::stack_value |
+ LIR_OprDesc::single_size);
+ break;
+ case T_DOUBLE:
+ res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
+ LIR_OprDesc::double_type |
+ LIR_OprDesc::stack_value |
+ LIR_OprDesc::double_size);
+ break;
default: ShouldNotReachHere(); res = illegalOpr;
}
@@ -601,7 +699,10 @@ class LIR_OprFact: public AllStatic {
assert(index >= 0, "index must be positive");
assert(index <= (max_jint >> LIR_OprDesc::data_shift), "index is too big");
- LIR_Opr old_res = (LIR_Opr)((index << LIR_OprDesc::data_shift) | LIR_OprDesc::stack_value | as_OprType(type) | LIR_OprDesc::size_for(type));
+ LIR_Opr old_res = (LIR_Opr)(intptr_t)((index << LIR_OprDesc::data_shift) |
+ LIR_OprDesc::stack_value |
+ as_OprType(type) |
+ LIR_OprDesc::size_for(type));
assert(res == old_res, "old and new method not equal");
#endif
diff --git a/src/share/vm/c1/c1_LIRAssembler.cpp b/src/share/vm/c1/c1_LIRAssembler.cpp
index a8ed3da18..1a0dac769 100644
--- a/src/share/vm/c1/c1_LIRAssembler.cpp
+++ b/src/share/vm/c1/c1_LIRAssembler.cpp
@@ -215,7 +215,7 @@ void LIR_Assembler::emit_block(BlockBegin* block) {
#endif /* PRODUCT */
assert(block->lir() != NULL, "must have LIR");
- IA32_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
+ X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
#ifndef PRODUCT
if (CommentedAssembly) {
@@ -227,7 +227,7 @@ void LIR_Assembler::emit_block(BlockBegin* block) {
emit_lir_list(block->lir());
- IA32_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
+ X86_ONLY(assert(_masm->rsp_offset() == 0, "frame size should be fixed"));
}
@@ -434,7 +434,7 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
break;
default: ShouldNotReachHere();
}
-#if defined(IA32) && defined(TIERED)
+#if defined(X86) && defined(TIERED)
// C2 leave fpu stack dirty clean it
if (UseSSE < 2) {
int i;
@@ -445,7 +445,7 @@ void LIR_Assembler::emit_call(LIR_OpJavaCall* op) {
ffree(0);
}
}
-#endif // IA32 && TIERED
+#endif // X86 && TIERED
}
diff --git a/src/share/vm/c1/c1_LIRAssembler.hpp b/src/share/vm/c1/c1_LIRAssembler.hpp
index 3a64fe678..12adf1bac 100644
--- a/src/share/vm/c1/c1_LIRAssembler.hpp
+++ b/src/share/vm/c1/c1_LIRAssembler.hpp
@@ -75,9 +75,9 @@ class LIR_Assembler: public CompilationResourceObj {
void emit_stubs(CodeStubList* stub_list);
// addresses
- static Address as_Address(LIR_Address* addr);
- static Address as_Address_lo(LIR_Address* addr);
- static Address as_Address_hi(LIR_Address* addr);
+ Address as_Address(LIR_Address* addr);
+ Address as_Address_lo(LIR_Address* addr);
+ Address as_Address_hi(LIR_Address* addr);
// debug information
void add_call_info(int pc_offset, CodeEmitInfo* cinfo);
diff --git a/src/share/vm/c1/c1_LIRGenerator.cpp b/src/share/vm/c1/c1_LIRGenerator.cpp
index eb90b3fd2..6fe213ed1 100644
--- a/src/share/vm/c1/c1_LIRGenerator.cpp
+++ b/src/share/vm/c1/c1_LIRGenerator.cpp
@@ -1717,7 +1717,7 @@ void LIRGenerator::do_UnsafeGetRaw(UnsafeGetRaw* x) {
assert(log2_scale == 0, "must not have a scale");
addr = new LIR_Address(base_op, index_op->as_jint(), dst_type);
} else {
-#ifdef IA32
+#ifdef X86
addr = new LIR_Address(base_op, index_op, LIR_Address::Scale(log2_scale), 0, dst_type);
#else
if (index_op->is_illegal() || log2_scale == 0) {
diff --git a/src/share/vm/c1/c1_LinearScan.cpp b/src/share/vm/c1/c1_LinearScan.cpp
index 0d733b7e6..22246148d 100644
--- a/src/share/vm/c1/c1_LinearScan.cpp
+++ b/src/share/vm/c1/c1_LinearScan.cpp
@@ -80,7 +80,7 @@ LinearScan::LinearScan(IR* ir, LIRGenerator* gen, FrameMap* frame_map)
, _scope_value_cache(0) // initialized later with correct length
, _interval_in_loop(0, 0) // initialized later with correct length
, _cached_blocks(*ir->linear_scan_order())
-#ifdef IA32
+#ifdef X86
, _fpu_stack_allocator(NULL)
#endif
{
@@ -116,7 +116,7 @@ int LinearScan::reg_num(LIR_Opr opr) {
return opr->cpu_regnr();
} else if (opr->is_double_cpu()) {
return opr->cpu_regnrLo();
-#ifdef IA32
+#ifdef X86
} else if (opr->is_single_xmm()) {
return opr->fpu_regnr() + pd_first_xmm_reg;
} else if (opr->is_double_xmm()) {
@@ -128,6 +128,7 @@ int LinearScan::reg_num(LIR_Opr opr) {
return opr->fpu_regnrLo() + pd_first_fpu_reg;
} else {
ShouldNotReachHere();
+ return -1;
}
}
@@ -140,7 +141,7 @@ int LinearScan::reg_numHi(LIR_Opr opr) {
return -1;
} else if (opr->is_double_cpu()) {
return opr->cpu_regnrHi();
-#ifdef IA32
+#ifdef X86
} else if (opr->is_single_xmm()) {
return -1;
} else if (opr->is_double_xmm()) {
@@ -152,6 +153,7 @@ int LinearScan::reg_numHi(LIR_Opr opr) {
return opr->fpu_regnrHi() + pd_first_fpu_reg;
} else {
ShouldNotReachHere();
+ return -1;
}
}
@@ -1063,7 +1065,7 @@ IntervalUseKind LinearScan::use_kind_of_input_operand(LIR_Op* op, LIR_Opr opr) {
}
-#ifdef IA32
+#ifdef X86
if (op->code() == lir_cmove) {
// conditional moves can handle stack operands
assert(op->result_opr()->is_register(), "result must always be in a register");
@@ -1128,7 +1130,7 @@ IntervalUseKind LinearScan::use_kind_of_input_operand(LIR_Op* op, LIR_Opr opr) {
}
}
}
-#endif // IA32
+#endif // X86
// all other operands require a register
return mustHaveRegister;
@@ -1261,7 +1263,7 @@ void LinearScan::build_intervals() {
// virtual fpu operands. Otherwise no allocation for fpu registers is
// perfomed and so the temp ranges would be useless
if (has_fpu_registers()) {
-#ifdef IA32
+#ifdef X86
if (UseSSE < 2) {
#endif
for (i = 0; i < FrameMap::nof_caller_save_fpu_regs; i++) {
@@ -1270,7 +1272,7 @@ void LinearScan::build_intervals() {
assert(reg_numHi(opr) == -1, "missing addition of range for hi-register");
caller_save_registers[num_caller_save_registers++] = reg_num(opr);
}
-#ifdef IA32
+#ifdef X86
}
if (UseSSE > 0) {
for (i = 0; i < FrameMap::nof_caller_save_xmm_regs; i++) {
@@ -1299,8 +1301,8 @@ void LinearScan::build_intervals() {
// Update intervals for registers live at the end of this block;
BitMap live = block->live_out();
- int size = live.size();
- for (int number = live.get_next_one_offset(0, size); number < size; number = live.get_next_one_offset(number + 1, size)) {
+ int size = (int)live.size();
+ for (int number = (int)live.get_next_one_offset(0, size); number < size; number = (int)live.get_next_one_offset(number + 1, size)) {
assert(live.at(number), "should not stop here otherwise");
assert(number >= LIR_OprDesc::vreg_base, "fixed intervals must not be live on block bounds");
TRACE_LINEAR_SCAN(2, tty->print_cr("live in %d to %d", number, block_to + 2));
@@ -1654,7 +1656,7 @@ void LinearScan::resolve_collect_mappings(BlockBegin* from_block, BlockBegin* to
const BitMap live_at_edge = to_block->live_in();
// visit all registers where the live_at_edge bit is set
- for (int r = live_at_edge.get_next_one_offset(0, size); r < size; r = live_at_edge.get_next_one_offset(r + 1, size)) {
+ for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) {
assert(r < num_regs, "live information set for not exisiting interval");
assert(from_block->live_out().at(r) && to_block->live_in().at(r), "interval not live at this edge");
@@ -1824,7 +1826,7 @@ void LinearScan::resolve_exception_entry(BlockBegin* block, MoveResolver &move_r
// visit all registers where the live_in bit is set
int size = live_set_size();
- for (int r = block->live_in().get_next_one_offset(0, size); r < size; r = block->live_in().get_next_one_offset(r + 1, size)) {
+ for (int r = (int)block->live_in().get_next_one_offset(0, size); r < size; r = (int)block->live_in().get_next_one_offset(r + 1, size)) {
resolve_exception_entry(block, r, move_resolver);
}
@@ -1898,7 +1900,7 @@ void LinearScan::resolve_exception_edge(XHandler* handler, int throwing_op_id, M
// visit all registers where the live_in bit is set
BlockBegin* block = handler->entry_block();
int size = live_set_size();
- for (int r = block->live_in().get_next_one_offset(0, size); r < size; r = block->live_in().get_next_one_offset(r + 1, size)) {
+ for (int r = (int)block->live_in().get_next_one_offset(0, size); r < size; r = (int)block->live_in().get_next_one_offset(r + 1, size)) {
resolve_exception_edge(handler, throwing_op_id, r, NULL, move_resolver);
}
@@ -2032,19 +2034,19 @@ LIR_Opr LinearScan::calc_operand_for_interval(const Interval* interval) {
assert(assigned_reg % 2 == 0 && assigned_reg + 1 == assigned_regHi, "must be sequential and even");
}
-#ifdef SPARC
#ifdef _LP64
return LIR_OprFact::double_cpu(assigned_reg, assigned_reg);
#else
+#ifdef SPARC
return LIR_OprFact::double_cpu(assigned_regHi, assigned_reg);
-#endif
#else
return LIR_OprFact::double_cpu(assigned_reg, assigned_regHi);
-#endif
+#endif // SPARC
+#endif // LP64
}
case T_FLOAT: {
-#ifdef IA32
+#ifdef X86
if (UseSSE >= 1) {
assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register");
assert(interval->assigned_regHi() == any_reg, "must not have hi register");
@@ -2058,7 +2060,7 @@ LIR_Opr LinearScan::calc_operand_for_interval(const Interval* interval) {
}
case T_DOUBLE: {
-#ifdef IA32
+#ifdef X86
if (UseSSE >= 2) {
assert(assigned_reg >= pd_first_xmm_reg && assigned_reg <= pd_last_xmm_reg, "no xmm register");
assert(interval->assigned_regHi() == any_reg, "must not have hi register (double xmm values are stored in one register)");
@@ -2122,7 +2124,7 @@ LIR_Opr LinearScan::color_lir_opr(LIR_Opr opr, int op_id, LIR_OpVisitState::OprM
LIR_Opr res = operand_for_interval(interval);
-#ifdef IA32
+#ifdef X86
// new semantic for is_last_use: not only set on definite end of interval,
// but also before hole
// This may still miss some cases (e.g. for dead values), but it is not necessary that the
@@ -2475,6 +2477,7 @@ int LinearScan::append_scope_value_for_constant(LIR_Opr opr, GrowableArray<Scope
default:
ShouldNotReachHere();
+ return -1;
}
}
@@ -2515,7 +2518,7 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
scope_values->append(sv);
return 1;
-#ifdef IA32
+#ifdef X86
} else if (opr->is_single_xmm()) {
VMReg rname = opr->as_xmm_float_reg()->as_VMReg();
LocationValue* sv = new LocationValue(Location::new_reg_loc(Location::normal, rname));
@@ -2525,7 +2528,7 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
#endif
} else if (opr->is_single_fpu()) {
-#ifdef IA32
+#ifdef X86
// the exact location of fpu stack values is only known
// during fpu stack allocation, so the stack allocator object
// must be present
@@ -2548,12 +2551,23 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
ScopeValue* second;
if (opr->is_double_stack()) {
+#ifdef _LP64
+ Location loc1;
+ Location::Type loc_type = opr->type() == T_LONG ? Location::lng : Location::dbl;
+ if (!frame_map()->locations_for_slot(opr->double_stack_ix(), loc_type, &loc1, NULL)) {
+ bailout("too large frame");
+ }
+ // Does this reverse on x86 vs. sparc?
+ first = new LocationValue(loc1);
+ second = &_int_0_scope_value;
+#else
Location loc1, loc2;
if (!frame_map()->locations_for_slot(opr->double_stack_ix(), Location::normal, &loc1, &loc2)) {
bailout("too large frame");
}
first = new LocationValue(loc1);
second = new LocationValue(loc2);
+#endif // _LP64
} else if (opr->is_double_cpu()) {
#ifdef _LP64
@@ -2573,9 +2587,10 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
first = new LocationValue(Location::new_reg_loc(Location::normal, rname_first));
second = new LocationValue(Location::new_reg_loc(Location::normal, rname_second));
-#endif
+#endif //_LP64
-#ifdef IA32
+
+#ifdef X86
} else if (opr->is_double_xmm()) {
assert(opr->fpu_regnrLo() == opr->fpu_regnrHi(), "assumed in calculation");
VMReg rname_first = opr->as_xmm_double_reg()->as_VMReg();
@@ -2589,13 +2604,13 @@ int LinearScan::append_scope_value_for_operand(LIR_Opr opr, GrowableArray<ScopeV
} else if (opr->is_double_fpu()) {
// On SPARC, fpu_regnrLo/fpu_regnrHi represents the two halves of
- // the double as float registers in the native ordering. On IA32,
+ // the double as float registers in the native ordering. On X86,
// fpu_regnrLo is a FPU stack slot whose VMReg represents
// the low-order word of the double and fpu_regnrLo + 1 is the
// name for the other half. *first and *second must represent the
// least and most significant words, respectively.
-#ifdef IA32
+#ifdef X86
// the exact location of fpu stack values is only known
// during fpu stack allocation, so the stack allocator object
// must be present
@@ -2865,7 +2880,6 @@ void LinearScan::assign_reg_num(LIR_OpList* instructions, IntervalWalker* iw) {
op->verify();
#endif
-#ifndef _LP64
// remove useless moves
if (op->code() == lir_move) {
assert(op->as_Op1() != NULL, "move must be LIR_Op1");
@@ -2879,7 +2893,6 @@ void LinearScan::assign_reg_num(LIR_OpList* instructions, IntervalWalker* iw) {
has_dead = true;
}
}
-#endif
}
if (has_dead) {
@@ -3192,7 +3205,7 @@ void LinearScan::verify_constants() {
BitMap live_at_edge = block->live_in();
// visit all registers where the live_at_edge bit is set
- for (int r = live_at_edge.get_next_one_offset(0, size); r < size; r = live_at_edge.get_next_one_offset(r + 1, size)) {
+ for (int r = (int)live_at_edge.get_next_one_offset(0, size); r < size; r = (int)live_at_edge.get_next_one_offset(r + 1, size)) {
TRACE_LINEAR_SCAN(4, tty->print("checking interval %d of block B%d", r, block->block_id()));
Value value = gen()->instruction_for_vreg(r);
@@ -3438,7 +3451,7 @@ void RegisterVerifier::process_operations(LIR_List* ops, IntervalList* input_sta
state_put(input_state, reg_num(FrameMap::caller_save_fpu_reg_at(j)), NULL);
}
-#ifdef IA32
+#ifdef X86
for (j = 0; j < FrameMap::nof_caller_save_xmm_regs; j++) {
state_put(input_state, reg_num(FrameMap::caller_save_xmm_reg_at(j)), NULL);
}
@@ -4357,7 +4370,7 @@ void Interval::print(outputStream* out) const {
opr = LIR_OprFact::single_cpu(assigned_reg());
} else if (assigned_reg() >= pd_first_fpu_reg && assigned_reg() <= pd_last_fpu_reg) {
opr = LIR_OprFact::single_fpu(assigned_reg() - pd_first_fpu_reg);
-#ifdef IA32
+#ifdef X86
} else if (assigned_reg() >= pd_first_xmm_reg && assigned_reg() <= pd_last_xmm_reg) {
opr = LIR_OprFact::single_xmm(assigned_reg() - pd_first_xmm_reg);
#endif
@@ -5435,7 +5448,7 @@ void LinearScanWalker::alloc_locked_reg(Interval* cur) {
}
bool LinearScanWalker::no_allocation_possible(Interval* cur) {
-#ifdef IA32
+#ifdef X86
// fast calculation of intervals that can never get a register because the
// the next instruction is a call that blocks all registers
// Note: this does not work if callee-saved registers are available (e.g. on Sparc)
diff --git a/src/share/vm/c1/c1_LinearScan.hpp b/src/share/vm/c1/c1_LinearScan.hpp
index bc187fa75..10487b074 100644
--- a/src/share/vm/c1/c1_LinearScan.hpp
+++ b/src/share/vm/c1/c1_LinearScan.hpp
@@ -177,7 +177,7 @@ class LinearScan : public CompilationResourceObj {
bool is_interval_in_loop(int interval, int loop) const { return _interval_in_loop.at(interval, loop); }
// handling of fpu stack allocation (platform dependent, needed for debug information generation)
-#ifdef IA32
+#ifdef X86
FpuStackAllocator* _fpu_stack_allocator;
bool use_fpu_stack_allocation() const { return UseSSE < 2 && has_fpu_registers(); }
#else
diff --git a/src/share/vm/c1/c1_Runtime1.cpp b/src/share/vm/c1/c1_Runtime1.cpp
index e7321ccc3..4a4765099 100644
--- a/src/share/vm/c1/c1_Runtime1.cpp
+++ b/src/share/vm/c1/c1_Runtime1.cpp
@@ -336,21 +336,6 @@ JRT_ENTRY(void, Runtime1::new_multi_array(JavaThread* thread, klassOopDesc* klas
assert(oop(klass)->is_klass(), "not a class");
assert(rank >= 1, "rank must be nonzero");
-#ifdef _LP64
-// In 64 bit mode, the sizes are stored in the top 32 bits
-// of each 64 bit stack entry.
-// dims is actually an intptr_t * because the arguments
-// are pushed onto a 64 bit stack.
-// We must create an array of jints to pass to multi_allocate.
-// We reuse the current stack because it will be popped
-// after this bytecode is completed.
- if ( rank > 1 ) {
- int index;
- for ( index = 1; index < rank; index++ ) { // First size is ok
- dims[index] = dims[index*2];
- }
- }
-#endif
oop obj = arrayKlass::cast(klass)->multi_allocate(rank, dims, CHECK);
thread->set_vm_result(obj);
JRT_END
diff --git a/src/share/vm/ci/ciTypeFlow.hpp b/src/share/vm/ci/ciTypeFlow.hpp
index 2235a90a1..9b5193ba5 100644
--- a/src/share/vm/ci/ciTypeFlow.hpp
+++ b/src/share/vm/ci/ciTypeFlow.hpp
@@ -127,7 +127,7 @@ public:
// Used as a combined index for locals and temps
enum Cell {
- Cell_0
+ Cell_0, Cell_max = INT_MAX
};
// A StateVector summarizes the type information at some
diff --git a/src/share/vm/code/relocInfo.hpp b/src/share/vm/code/relocInfo.hpp
index 8a2867dc0..36006e824 100644
--- a/src/share/vm/code/relocInfo.hpp
+++ b/src/share/vm/code/relocInfo.hpp
@@ -1200,11 +1200,13 @@ class section_word_Relocation : public internal_word_Relocation {
class poll_Relocation : public Relocation {
bool is_data() { return true; }
relocInfo::relocType type() { return relocInfo::poll_type; }
+ void fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest);
};
class poll_return_Relocation : public Relocation {
bool is_data() { return true; }
relocInfo::relocType type() { return relocInfo::poll_return_type; }
+ void fix_relocation_after_move(const CodeBuffer* src, CodeBuffer* dest);
};
diff --git a/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp b/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp
index 94377a3be..d4c641bf0 100644
--- a/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp
+++ b/src/share/vm/gc_implementation/parNew/parGCAllocBuffer.cpp
@@ -229,7 +229,7 @@ void ParGCAllocBufferWithBOT::retire(bool end_of_gc, bool retain) {
HeapWord* first_card_start = _bsa->address_for_index(first_card_index);
if (first_card_start < pre_top) {
HeapWord* second_card_start =
- _bsa->address_for_index(first_card_index + 1);
+ _bsa->inc_by_region_size(first_card_start);
// Ensure enough room to fill with the smallest block
second_card_start = MAX2(second_card_start, pre_top + AlignmentReserve);
diff --git a/src/share/vm/includeDB_compiler1 b/src/share/vm/includeDB_compiler1
index 3509507f1..d3006b094 100644
--- a/src/share/vm/includeDB_compiler1
+++ b/src/share/vm/includeDB_compiler1
@@ -258,6 +258,7 @@ c1_LIRGenerator_<arch>.cpp ciArray.hpp
c1_LIRGenerator_<arch>.cpp ciObjArrayKlass.hpp
c1_LIRGenerator_<arch>.cpp ciTypeArrayKlass.hpp
c1_LIRGenerator_<arch>.cpp sharedRuntime.hpp
+c1_LIRGenerator_<arch>.cpp vmreg_<arch>.inline.hpp
c1_LinearScan.cpp c1_CFGPrinter.hpp
c1_LinearScan.cpp c1_Compilation.hpp
@@ -281,7 +282,7 @@ c1_LinearScan_<arch>.cpp c1_LinearScan.hpp
c1_LinearScan_<arch>.hpp generate_platform_dependent_include
c1_MacroAssembler.hpp assembler.hpp
-c1_MacroAssembler.hpp assembler_<arch_model>.inline.hpp
+c1_MacroAssembler.hpp assembler_<arch>.inline.hpp
c1_MacroAssembler_<arch>.cpp arrayOop.hpp
c1_MacroAssembler_<arch>.cpp biasedLocking.hpp
diff --git a/src/share/vm/includeDB_compiler2 b/src/share/vm/includeDB_compiler2
index 1d2cb6d8b..07ae8a870 100644
--- a/src/share/vm/includeDB_compiler2
+++ b/src/share/vm/includeDB_compiler2
@@ -26,7 +26,7 @@ ad_<arch_model>.cpp adGlobals_<arch_model>.hpp
ad_<arch_model>.cpp ad_<arch_model>.hpp
ad_<arch_model>.cpp allocation.inline.hpp
ad_<arch_model>.cpp assembler.hpp
-ad_<arch_model>.cpp assembler_<arch_model>.inline.hpp
+ad_<arch_model>.cpp assembler_<arch>.inline.hpp
ad_<arch_model>.cpp biasedLocking.hpp
ad_<arch_model>.cpp cfgnode.hpp
ad_<arch_model>.cpp collectedHeap.inline.hpp
@@ -957,7 +957,7 @@ runtime.hpp vframe.hpp
runtime_<arch_model>.cpp adGlobals_<arch_model>.hpp
runtime_<arch_model>.cpp ad_<arch_model>.hpp
runtime_<arch_model>.cpp assembler.hpp
-runtime_<arch_model>.cpp assembler_<arch_model>.inline.hpp
+runtime_<arch_model>.cpp assembler_<arch>.inline.hpp
runtime_<arch_model>.cpp globalDefinitions.hpp
runtime_<arch_model>.cpp interfaceSupport.hpp
runtime_<arch_model>.cpp interpreter.hpp
diff --git a/src/share/vm/includeDB_core b/src/share/vm/includeDB_core
index b9b46dcaf..5b2045617 100644
--- a/src/share/vm/includeDB_core
+++ b/src/share/vm/includeDB_core
@@ -228,7 +228,7 @@ arrayOop.hpp universe.inline.hpp
assembler.cpp assembler.hpp
assembler.cpp assembler.inline.hpp
-assembler.cpp assembler_<arch_model>.inline.hpp
+assembler.cpp assembler_<arch>.inline.hpp
assembler.cpp codeBuffer.hpp
assembler.cpp icache.hpp
assembler.cpp os.hpp
@@ -248,29 +248,29 @@ assembler.inline.hpp codeBuffer.hpp
assembler.inline.hpp disassembler.hpp
assembler.inline.hpp threadLocalStorage.hpp
-assembler_<arch_model>.cpp assembler_<arch_model>.inline.hpp
-assembler_<arch_model>.cpp biasedLocking.hpp
-assembler_<arch_model>.cpp cardTableModRefBS.hpp
-assembler_<arch_model>.cpp collectedHeap.inline.hpp
-assembler_<arch_model>.cpp interfaceSupport.hpp
-assembler_<arch_model>.cpp interpreter.hpp
-assembler_<arch_model>.cpp objectMonitor.hpp
-assembler_<arch_model>.cpp os.hpp
-assembler_<arch_model>.cpp resourceArea.hpp
-assembler_<arch_model>.cpp sharedRuntime.hpp
-assembler_<arch_model>.cpp stubRoutines.hpp
-
-assembler_<arch_model>.hpp generate_platform_dependent_include
-
-assembler_<arch_model>.inline.hpp assembler.inline.hpp
-assembler_<arch_model>.inline.hpp codeBuffer.hpp
-assembler_<arch_model>.inline.hpp codeCache.hpp
-assembler_<arch_model>.inline.hpp handles.inline.hpp
-
-assembler_<os_arch_model>.cpp assembler.hpp
-assembler_<os_arch_model>.cpp assembler_<arch_model>.inline.hpp
-assembler_<os_arch_model>.cpp os.hpp
-assembler_<os_arch_model>.cpp threadLocalStorage.hpp
+assembler_<arch>.cpp assembler_<arch>.inline.hpp
+assembler_<arch>.cpp biasedLocking.hpp
+assembler_<arch>.cpp cardTableModRefBS.hpp
+assembler_<arch>.cpp collectedHeap.inline.hpp
+assembler_<arch>.cpp interfaceSupport.hpp
+assembler_<arch>.cpp interpreter.hpp
+assembler_<arch>.cpp objectMonitor.hpp
+assembler_<arch>.cpp os.hpp
+assembler_<arch>.cpp resourceArea.hpp
+assembler_<arch>.cpp sharedRuntime.hpp
+assembler_<arch>.cpp stubRoutines.hpp
+
+assembler_<arch>.hpp generate_platform_dependent_include
+
+assembler_<arch>.inline.hpp assembler.inline.hpp
+assembler_<arch>.inline.hpp codeBuffer.hpp
+assembler_<arch>.inline.hpp codeCache.hpp
+assembler_<arch>.inline.hpp handles.inline.hpp
+
+assembler_<os_arch>.cpp assembler.hpp
+assembler_<os_arch>.cpp assembler_<arch>.inline.hpp
+assembler_<os_arch>.cpp os.hpp
+assembler_<os_arch>.cpp threadLocalStorage.hpp
atomic.cpp atomic.hpp
atomic.cpp atomic_<os_arch>.inline.hpp
@@ -1926,7 +1926,7 @@ hpi_<os_family>.cpp os.hpp
hpi_imported.h jni.h
-icBuffer.cpp assembler_<arch_model>.inline.hpp
+icBuffer.cpp assembler_<arch>.inline.hpp
icBuffer.cpp collectedHeap.inline.hpp
icBuffer.cpp compiledIC.hpp
icBuffer.cpp icBuffer.hpp
@@ -1947,7 +1947,7 @@ icBuffer.hpp bytecodes.hpp
icBuffer.hpp stubs.hpp
icBuffer_<arch>.cpp assembler.hpp
-icBuffer_<arch>.cpp assembler_<arch_model>.inline.hpp
+icBuffer_<arch>.cpp assembler_<arch>.inline.hpp
icBuffer_<arch>.cpp bytecodes.hpp
icBuffer_<arch>.cpp collectedHeap.inline.hpp
icBuffer_<arch>.cpp icBuffer.hpp
@@ -1962,7 +1962,7 @@ icache.cpp resourceArea.hpp
icache.hpp allocation.hpp
icache.hpp stubCodeGenerator.hpp
-icache_<arch>.cpp assembler_<arch_model>.inline.hpp
+icache_<arch>.cpp assembler_<arch>.inline.hpp
icache_<arch>.cpp icache.hpp
icache_<arch>.hpp generate_platform_dependent_include
@@ -2095,7 +2095,7 @@ interp_masm_<arch_model>.cpp sharedRuntime.hpp
interp_masm_<arch_model>.cpp synchronizer.hpp
interp_masm_<arch_model>.cpp thread_<os_family>.inline.hpp
-interp_masm_<arch_model>.hpp assembler_<arch_model>.inline.hpp
+interp_masm_<arch_model>.hpp assembler_<arch>.inline.hpp
interp_masm_<arch_model>.hpp invocationCounter.hpp
interpreter.cpp allocation.inline.hpp
@@ -2402,7 +2402,7 @@ jniFastGetField.cpp jniFastGetField.hpp
jniFastGetField.hpp allocation.hpp
jniFastGetField.hpp jvm_misc.hpp
-jniFastGetField_<arch_model>.cpp assembler_<arch_model>.inline.hpp
+jniFastGetField_<arch_model>.cpp assembler_<arch>.inline.hpp
jniFastGetField_<arch_model>.cpp jniFastGetField.hpp
jniFastGetField_<arch_model>.cpp jvm_misc.hpp
jniFastGetField_<arch_model>.cpp resourceArea.hpp
@@ -2905,7 +2905,7 @@ mutex_<os_family>.inline.hpp interfaceSupport.hpp
mutex_<os_family>.inline.hpp os_<os_family>.inline.hpp
mutex_<os_family>.inline.hpp thread_<os_family>.inline.hpp
-nativeInst_<arch>.cpp assembler_<arch_model>.inline.hpp
+nativeInst_<arch>.cpp assembler_<arch>.inline.hpp
nativeInst_<arch>.cpp handles.hpp
nativeInst_<arch>.cpp nativeInst_<arch>.hpp
nativeInst_<arch>.cpp oop.hpp
@@ -3174,7 +3174,7 @@ os.hpp top.hpp
os_<os_arch>.cpp allocation.inline.hpp
os_<os_arch>.cpp arguments.hpp
-os_<os_arch>.cpp assembler_<arch_model>.inline.hpp
+os_<os_arch>.cpp assembler_<arch>.inline.hpp
os_<os_arch>.cpp classLoader.hpp
os_<os_arch>.cpp events.hpp
os_<os_arch>.cpp extendedPC.hpp
@@ -3208,7 +3208,7 @@ os_<os_arch>.hpp generate_platform_dependent_include
os_<os_family>.cpp allocation.inline.hpp
os_<os_family>.cpp arguments.hpp
-os_<os_family>.cpp assembler_<arch_model>.inline.hpp
+os_<os_family>.cpp assembler_<arch>.inline.hpp
os_<os_family>.cpp attachListener.hpp
os_<os_family>.cpp classLoader.hpp
os_<os_family>.cpp compileBroker.hpp
@@ -3267,7 +3267,7 @@ osThread.hpp javaFrameAnchor.hpp
osThread.hpp objectMonitor.hpp
osThread.hpp top.hpp
-osThread_<os_family>.cpp assembler_<arch_model>.inline.hpp
+osThread_<os_family>.cpp assembler_<arch>.inline.hpp
osThread_<os_family>.cpp atomic.hpp
osThread_<os_family>.cpp handles.inline.hpp
osThread_<os_family>.cpp mutexLocker.hpp
@@ -3480,7 +3480,7 @@ register_definitions_<arch>.cpp interp_masm_<arch_model>.hpp
register_definitions_<arch>.cpp register.hpp
register_definitions_<arch>.cpp register_<arch>.hpp
-relocInfo.cpp assembler_<arch_model>.inline.hpp
+relocInfo.cpp assembler_<arch>.inline.hpp
relocInfo.cpp compiledIC.hpp
relocInfo.cpp copy.hpp
relocInfo.cpp nativeInst_<arch>.hpp
@@ -3493,7 +3493,7 @@ relocInfo.hpp allocation.hpp
relocInfo.hpp top.hpp
relocInfo_<arch>.cpp assembler.inline.hpp
-relocInfo_<arch>.cpp assembler_<arch_model>.inline.hpp
+relocInfo_<arch>.cpp assembler_<arch>.inline.hpp
relocInfo_<arch>.cpp nativeInst_<arch>.hpp
relocInfo_<arch>.cpp oop.inline.hpp
relocInfo_<arch>.cpp relocInfo.hpp
@@ -3676,7 +3676,7 @@ sharedRuntime.hpp resourceArea.hpp
sharedRuntime.hpp threadLocalStorage.hpp
sharedRuntime_<arch_model>.cpp assembler.hpp
-sharedRuntime_<arch_model>.cpp assembler_<arch_model>.inline.hpp
+sharedRuntime_<arch_model>.cpp assembler_<arch>.inline.hpp
sharedRuntime_<arch_model>.cpp compiledICHolderOop.hpp
sharedRuntime_<arch_model>.cpp debugInfoRec.hpp
sharedRuntime_<arch_model>.cpp icBuffer.hpp
@@ -3819,7 +3819,7 @@ statSampler.cpp vm_version_<arch_model>.hpp
statSampler.hpp perfData.hpp
statSampler.hpp task.hpp
-stubCodeGenerator.cpp assembler_<arch_model>.inline.hpp
+stubCodeGenerator.cpp assembler_<arch>.inline.hpp
stubCodeGenerator.cpp disassembler.hpp
stubCodeGenerator.cpp forte.hpp
stubCodeGenerator.cpp oop.inline.hpp
@@ -3830,7 +3830,7 @@ stubCodeGenerator.hpp allocation.hpp
stubCodeGenerator.hpp assembler.hpp
stubGenerator_<arch_model>.cpp assembler.hpp
-stubGenerator_<arch_model>.cpp assembler_<arch_model>.inline.hpp
+stubGenerator_<arch_model>.cpp assembler_<arch>.inline.hpp
stubGenerator_<arch_model>.cpp frame.inline.hpp
stubGenerator_<arch_model>.cpp handles.inline.hpp
stubGenerator_<arch_model>.cpp instanceOop.hpp
@@ -4562,7 +4562,7 @@ vm_version.cpp vm_version_<arch_model>.hpp
vm_version.hpp allocation.hpp
vm_version.hpp ostream.hpp
-vm_version_<arch_model>.cpp assembler_<arch_model>.inline.hpp
+vm_version_<arch_model>.cpp assembler_<arch>.inline.hpp
vm_version_<arch_model>.cpp java.hpp
vm_version_<arch_model>.cpp os_<os_family>.inline.hpp
vm_version_<arch_model>.cpp resourceArea.hpp
@@ -4603,7 +4603,7 @@ vtableStubs.cpp vtune.hpp
vtableStubs.hpp allocation.hpp
vtableStubs_<arch_model>.cpp assembler.hpp
-vtableStubs_<arch_model>.cpp assembler_<arch_model>.inline.hpp
+vtableStubs_<arch_model>.cpp assembler_<arch>.inline.hpp
vtableStubs_<arch_model>.cpp instanceKlass.hpp
vtableStubs_<arch_model>.cpp interp_masm_<arch_model>.hpp
vtableStubs_<arch_model>.cpp klassVtable.hpp
diff --git a/src/share/vm/includeDB_features b/src/share/vm/includeDB_features
index 88f4c2936..6effcca57 100644
--- a/src/share/vm/includeDB_features
+++ b/src/share/vm/includeDB_features
@@ -57,7 +57,7 @@ dump.cpp systemDictionary.hpp
dump.cpp vmThread.hpp
dump.cpp vm_operations.hpp
-dump_<arch_model>.cpp assembler_<arch_model>.inline.hpp
+dump_<arch_model>.cpp assembler_<arch>.inline.hpp
dump_<arch_model>.cpp compactingPermGenGen.hpp
forte.cpp collectedHeap.inline.hpp
diff --git a/src/share/vm/memory/blockOffsetTable.hpp b/src/share/vm/memory/blockOffsetTable.hpp
index fc76fc920..be9ec01bf 100644
--- a/src/share/vm/memory/blockOffsetTable.hpp
+++ b/src/share/vm/memory/blockOffsetTable.hpp
@@ -199,6 +199,12 @@ public:
// "index" in "_offset_array".
HeapWord* address_for_index(size_t index) const;
+ // Return the address "p" incremented by the size of
+ // a region. This method does not align the address
+ // returned to the start of a region. It is a simple
+ // primitive.
+ HeapWord* inc_by_region_size(HeapWord* p) const { return p + N_words; }
+
// Shared space support
void serialize(SerializeOopClosure* soc, HeapWord* start, HeapWord* end);
};
diff --git a/src/share/vm/opto/addnode.cpp b/src/share/vm/opto/addnode.cpp
index e5cc05b6b..066ec6d71 100644
--- a/src/share/vm/opto/addnode.cpp
+++ b/src/share/vm/opto/addnode.cpp
@@ -573,8 +573,6 @@ const Type *AddPNode::bottom_type() const {
intptr_t txoffset = Type::OffsetBot;
if (tx->is_con()) { // Left input is an add of a constant?
txoffset = tx->get_con();
- if (txoffset != (int)txoffset)
- txoffset = Type::OffsetBot; // oops: add_offset will choke on it
}
return tp->add_offset(txoffset);
}
@@ -595,8 +593,6 @@ const Type *AddPNode::Value( PhaseTransform *phase ) const {
intptr_t p2offset = Type::OffsetBot;
if (p2->is_con()) { // Left input is an add of a constant?
p2offset = p2->get_con();
- if (p2offset != (int)p2offset)
- p2offset = Type::OffsetBot; // oops: add_offset will choke on it
}
return p1->add_offset(p2offset);
}
@@ -675,7 +671,7 @@ const Type *AddPNode::mach_bottom_type( const MachNode* n) {
// Check for any interesting operand info.
// In particular, check for both memory and non-memory operands.
// %%%%% Clean this up: use xadd_offset
- int con = opnd->constant();
+ intptr_t con = opnd->constant();
if ( con == TypePtr::OffsetBot ) goto bottom_out;
offset += con;
con = opnd->constant_disp();
@@ -695,6 +691,8 @@ const Type *AddPNode::mach_bottom_type( const MachNode* n) {
guarantee(tptr == NULL, "must be only one pointer operand");
tptr = et->isa_oopptr();
guarantee(tptr != NULL, "non-int operand must be pointer");
+ if (tptr->higher_equal(tp->add_offset(tptr->offset())))
+ tp = tptr; // Set more precise type for bailout
continue;
}
if ( eti->_hi != eti->_lo ) goto bottom_out;
diff --git a/src/share/vm/opto/block.cpp b/src/share/vm/opto/block.cpp
index c6b94a45a..be861b643 100644
--- a/src/share/vm/opto/block.cpp
+++ b/src/share/vm/opto/block.cpp
@@ -467,6 +467,10 @@ void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
// get successor block succ_no
assert(succ_no < in->_num_succs, "illegal successor number");
Block* out = in->_succs[succ_no];
+ // Compute frequency of the new block. Do this before inserting
+ // new block in case succ_prob() needs to infer the probability from
+ // surrounding blocks.
+ float freq = in->_freq * in->succ_prob(succ_no);
// get ProjNode corresponding to the succ_no'th successor of the in block
ProjNode* proj = in->_nodes[in->_nodes.size() - in->_num_succs + succ_no]->as_Proj();
// create region for basic block
@@ -491,6 +495,8 @@ void PhaseCFG::insert_goto_at(uint block_no, uint succ_no) {
}
// remap predecessor's successor to new block
in->_succs.map(succ_no, block);
+ // Set the frequency of the new block
+ block->_freq = freq;
// add new basic block to basic block list
_blocks.insert(block_no + 1, block);
_num_blocks++;
diff --git a/src/share/vm/opto/callGenerator.cpp b/src/share/vm/opto/callGenerator.cpp
index 3131cf6b3..72b04c5d0 100644
--- a/src/share/vm/opto/callGenerator.cpp
+++ b/src/share/vm/opto/callGenerator.cpp
@@ -464,6 +464,12 @@ JVMState* PredictedCallGenerator::generate(JVMState* jvms) {
}
}
+ if (kit.stopped()) {
+ // Instance exactly does not matches the desired type.
+ kit.set_jvms(slow_jvms);
+ return kit.transfer_exceptions_into_jvms();
+ }
+
// fall through if the instance exactly matches the desired type
kit.replace_in_map(receiver, exact_receiver);
diff --git a/src/share/vm/opto/callnode.cpp b/src/share/vm/opto/callnode.cpp
index 389db14fe..29b5f9990 100644
--- a/src/share/vm/opto/callnode.cpp
+++ b/src/share/vm/opto/callnode.cpp
@@ -829,9 +829,7 @@ SafePointNode* SafePointNode::next_exception() const {
//------------------------------Ideal------------------------------------------
// Skip over any collapsed Regions
Node *SafePointNode::Ideal(PhaseGVN *phase, bool can_reshape) {
- if (remove_dead_region(phase, can_reshape)) return this;
-
- return NULL;
+ return remove_dead_region(phase, can_reshape) ? this : NULL;
}
//------------------------------Identity---------------------------------------
diff --git a/src/share/vm/opto/chaitin.cpp b/src/share/vm/opto/chaitin.cpp
index 5d7efdb76..74a0fce90 100644
--- a/src/share/vm/opto/chaitin.cpp
+++ b/src/share/vm/opto/chaitin.cpp
@@ -43,7 +43,7 @@ void LRG::dump( ) const {
if( _degree_valid ) tty->print( "%d ", _eff_degree );
else tty->print("? ");
- if( _def == NodeSentinel ) {
+ if( is_multidef() ) {
tty->print("MultiDef ");
if (_defs != NULL) {
tty->print("(");
@@ -765,7 +765,7 @@ void PhaseChaitin::gather_lrg_masks( bool after_aggressive ) {
// if the LRG is an unaligned pair, we will have to spill
// so clear the LRG's register mask if it is not already spilled
if ( !n->is_SpillCopy() &&
- (lrg._def == NULL || lrg._def == NodeSentinel || !lrg._def->is_SpillCopy()) &&
+ (lrg._def == NULL || lrg.is_multidef() || !lrg._def->is_SpillCopy()) &&
lrgmask.is_misaligned_Pair()) {
lrg.Clear();
}
@@ -1282,7 +1282,7 @@ uint PhaseChaitin::Select( ) {
// Live range is live and no colors available
else {
assert( lrg->alive(), "" );
- assert( !lrg->_fat_proj || lrg->_def == NodeSentinel ||
+ assert( !lrg->_fat_proj || lrg->is_multidef() ||
lrg->_def->outcnt() > 0, "fat_proj cannot spill");
assert( !orig_mask.is_AllStack(), "All Stack does not spill" );
diff --git a/src/share/vm/opto/chaitin.hpp b/src/share/vm/opto/chaitin.hpp
index 273fb0320..9c7cc593e 100644
--- a/src/share/vm/opto/chaitin.hpp
+++ b/src/share/vm/opto/chaitin.hpp
@@ -156,6 +156,8 @@ public:
// Alive if non-zero, dead if zero
bool alive() const { return _def != NULL; }
+ bool is_multidef() const { return _def == NodeSentinel; }
+ bool is_singledef() const { return _def != NodeSentinel; }
#ifndef PRODUCT
void dump( ) const;
@@ -320,7 +322,8 @@ class PhaseChaitin : public PhaseRegAlloc {
uint split_DEF( Node *def, Block *b, int loc, uint max, Node **Reachblock, Node **debug_defs, GrowableArray<uint> splits, int slidx );
uint split_USE( Node *def, Block *b, Node *use, uint useidx, uint max, bool def_down, bool cisc_sp, GrowableArray<uint> splits, int slidx );
int clone_projs( Block *b, uint idx, Node *con, Node *copy, uint &maxlrg );
- Node *split_Rematerialize( Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray<uint> splits, int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru );
+ Node *split_Rematerialize(Node *def, Block *b, uint insidx, uint &maxlrg, GrowableArray<uint> splits,
+ int slidx, uint *lrg2reach, Node **Reachblock, bool walkThru);
// True if lidx is used before any real register is def'd in the block
bool prompt_use( Block *b, uint lidx );
Node *get_spillcopy_wide( Node *def, Node *use, uint uidx );
diff --git a/src/share/vm/opto/coalesce.cpp b/src/share/vm/opto/coalesce.cpp
index 20e9bd179..b7e8a85dc 100644
--- a/src/share/vm/opto/coalesce.cpp
+++ b/src/share/vm/opto/coalesce.cpp
@@ -604,8 +604,8 @@ void PhaseConservativeCoalesce::union_helper( Node *lr1_node, Node *lr2_node, ui
// If both are single def, then src_def powers one live range
// and def_copy powers the other. After merging, src_def powers
// the combined live range.
- lrgs(lr1)._def = (lrgs(lr1)._def == NodeSentinel ||
- lrgs(lr2)._def == NodeSentinel )
+ lrgs(lr1)._def = (lrgs(lr1).is_multidef() ||
+ lrgs(lr2).is_multidef() )
? NodeSentinel : src_def;
lrgs(lr2)._def = NULL; // No def for lrg 2
lrgs(lr2).Clear(); // Force empty mask for LRG 2
diff --git a/src/share/vm/opto/compile.cpp b/src/share/vm/opto/compile.cpp
index ec28d0799..18114bafc 100644
--- a/src/share/vm/opto/compile.cpp
+++ b/src/share/vm/opto/compile.cpp
@@ -2111,6 +2111,7 @@ static void final_graph_reshaping_impl( Node *n, Final_Reshape_Counts &fpu ) {
n->subsume_by( cmpN );
}
}
+ break;
#endif
case Op_ModI:
diff --git a/src/share/vm/opto/connode.cpp b/src/share/vm/opto/connode.cpp
index 8b441c994..ceebd76a6 100644
--- a/src/share/vm/opto/connode.cpp
+++ b/src/share/vm/opto/connode.cpp
@@ -101,6 +101,8 @@ matter ever).
// Move constants to the right.
Node *CMoveNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if( in(0) && remove_dead_region(phase, can_reshape) ) return this;
+ // Don't bother trying to transform a dead node
+ if( in(0) && in(0)->is_top() ) return NULL;
assert( !phase->eqv(in(Condition), this) &&
!phase->eqv(in(IfFalse), this) &&
!phase->eqv(in(IfTrue), this), "dead loop in CMoveNode::Ideal" );
diff --git a/src/share/vm/opto/divnode.cpp b/src/share/vm/opto/divnode.cpp
index 90e198d07..c1c2b5df4 100644
--- a/src/share/vm/opto/divnode.cpp
+++ b/src/share/vm/opto/divnode.cpp
@@ -264,8 +264,14 @@ static Node *long_by_long_mulhi( PhaseGVN *phase, Node *dividend, jlong magic_co
Node *t1 = phase->transform(new (phase->C, 3) URShiftLNode(lolo_product, phase->intcon(N / 2)));
Node *t2 = phase->transform(new (phase->C, 3) AddLNode(hilo_product, t1));
- Node *t3 = phase->transform(new (phase->C, 3) RShiftLNode(t2, phase->intcon(N / 2)));
- Node *t4 = phase->transform(new (phase->C, 3) AndLNode(t2, phase->longcon(0xFFFFFFFF)));
+
+ // Construct both t3 and t4 before transforming so t2 doesn't go dead
+ // prematurely.
+ Node *t3 = new (phase->C, 3) RShiftLNode(t2, phase->intcon(N / 2));
+ Node *t4 = new (phase->C, 3) AndLNode(t2, phase->longcon(0xFFFFFFFF));
+ t3 = phase->transform(t3);
+ t4 = phase->transform(t4);
+
Node *t5 = phase->transform(new (phase->C, 3) AddLNode(t4, lohi_product));
Node *t6 = phase->transform(new (phase->C, 3) RShiftLNode(t5, phase->intcon(N / 2)));
Node *t7 = phase->transform(new (phase->C, 3) AddLNode(t3, hihi_product));
@@ -396,6 +402,8 @@ Node *DivINode::Identity( PhaseTransform *phase ) {
// Divides can be changed to multiplies and/or shifts
Node *DivINode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (in(0) && remove_dead_region(phase, can_reshape)) return this;
+ // Don't bother trying to transform a dead node
+ if( in(0) && in(0)->is_top() ) return NULL;
const Type *t = phase->type( in(2) );
if( t == TypeInt::ONE ) // Identity?
@@ -493,6 +501,8 @@ Node *DivLNode::Identity( PhaseTransform *phase ) {
// Dividing by a power of 2 is a shift.
Node *DivLNode::Ideal( PhaseGVN *phase, bool can_reshape) {
if (in(0) && remove_dead_region(phase, can_reshape)) return this;
+ // Don't bother trying to transform a dead node
+ if( in(0) && in(0)->is_top() ) return NULL;
const Type *t = phase->type( in(2) );
if( t == TypeLong::ONE ) // Identity?
@@ -634,6 +644,8 @@ Node *DivFNode::Identity( PhaseTransform *phase ) {
//------------------------------Idealize---------------------------------------
Node *DivFNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (in(0) && remove_dead_region(phase, can_reshape)) return this;
+ // Don't bother trying to transform a dead node
+ if( in(0) && in(0)->is_top() ) return NULL;
const Type *t2 = phase->type( in(2) );
if( t2 == TypeF::ONE ) // Identity?
@@ -719,6 +731,8 @@ Node *DivDNode::Identity( PhaseTransform *phase ) {
//------------------------------Idealize---------------------------------------
Node *DivDNode::Ideal(PhaseGVN *phase, bool can_reshape) {
if (in(0) && remove_dead_region(phase, can_reshape)) return this;
+ // Don't bother trying to transform a dead node
+ if( in(0) && in(0)->is_top() ) return NULL;
const Type *t2 = phase->type( in(2) );
if( t2 == TypeD::ONE ) // Identity?
@@ -754,7 +768,9 @@ Node *DivDNode::Ideal(PhaseGVN *phase, bool can_reshape) {
//------------------------------Idealize---------------------------------------
Node *ModINode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Check for dead control input
- if( remove_dead_region(phase, can_reshape) ) return this;
+ if( in(0) && remove_dead_region(phase, can_reshape) ) return this;
+ // Don't bother trying to transform a dead node
+ if( in(0) && in(0)->is_top() ) return NULL;
// Get the modulus
const Type *t = phase->type( in(2) );
@@ -923,7 +939,9 @@ const Type *ModINode::Value( PhaseTransform *phase ) const {
//------------------------------Idealize---------------------------------------
Node *ModLNode::Ideal(PhaseGVN *phase, bool can_reshape) {
// Check for dead control input
- if( remove_dead_region(phase, can_reshape) ) return this;
+ if( in(0) && remove_dead_region(phase, can_reshape) ) return this;
+ // Don't bother trying to transform a dead node
+ if( in(0) && in(0)->is_top() ) return NULL;
// Get the modulus
const Type *t = phase->type( in(2) );
diff --git a/src/share/vm/opto/escape.cpp b/src/share/vm/opto/escape.cpp
index fece33098..1c1d5d822 100644
--- a/src/share/vm/opto/escape.cpp
+++ b/src/share/vm/opto/escape.cpp
@@ -492,22 +492,41 @@ static Node* find_second_addp(Node* addp, Node* n) {
// Adjust the type and inputs of an AddP which computes the
// address of a field of an instance
//
-void ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) {
+bool ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) {
const TypeOopPtr *base_t = igvn->type(base)->isa_oopptr();
assert(base_t != NULL && base_t->is_known_instance(), "expecting instance oopptr");
const TypeOopPtr *t = igvn->type(addp)->isa_oopptr();
if (t == NULL) {
// We are computing a raw address for a store captured by an Initialize
- // compute an appropriate address type.
+ // compute an appropriate address type (cases #3 and #5).
assert(igvn->type(addp) == TypeRawPtr::NOTNULL, "must be raw pointer");
assert(addp->in(AddPNode::Address)->is_Proj(), "base of raw address must be result projection from allocation");
- int offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
+ intptr_t offs = (int)igvn->find_intptr_t_con(addp->in(AddPNode::Offset), Type::OffsetBot);
assert(offs != Type::OffsetBot, "offset must be a constant");
t = base_t->add_offset(offs)->is_oopptr();
}
int inst_id = base_t->instance_id();
assert(!t->is_known_instance() || t->instance_id() == inst_id,
"old type must be non-instance or match new type");
+
+ // The type 't' could be subclass of 'base_t'.
+ // As result t->offset() could be large then base_t's size and it will
+ // cause the failure in add_offset() with narrow oops since TypeOopPtr()
+ // constructor verifies correctness of the offset.
+ //
+ // It could happend on subclass's branch (from the type profiling
+ // inlining) which was not eliminated during parsing since the exactness
+ // of the allocation type was not propagated to the subclass type check.
+ //
+ // Do nothing for such AddP node and don't process its users since
+ // this code branch will go away.
+ //
+ if (!t->is_known_instance() &&
+ !t->klass()->equals(base_t->klass()) &&
+ t->klass()->is_subtype_of(base_t->klass())) {
+ return false; // bail out
+ }
+
const TypeOopPtr *tinst = base_t->add_offset(t->offset())->is_oopptr();
// Do NOT remove the next call: ensure an new alias index is allocated
// for the instance type
@@ -542,6 +561,7 @@ void ConnectionGraph::split_AddP(Node *addp, Node *base, PhaseGVN *igvn) {
}
// Put on IGVN worklist since at least addp's type was changed above.
record_for_optimizer(addp);
+ return true;
}
//
@@ -969,7 +989,7 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
if (elem == _phantom_object)
continue; // Assume the value was set outside this method.
Node *base = get_map(elem); // CheckCastPP node
- split_AddP(n, base, igvn);
+ if (!split_AddP(n, base, igvn)) continue; // wrong type
tinst = igvn->type(base)->isa_oopptr();
} else if (n->is_Phi() ||
n->is_CheckCastPP() ||
@@ -1012,6 +1032,8 @@ void ConnectionGraph::split_unique_types(GrowableArray<Node *> &alloc_worklist)
tn->set_type(tn_type);
igvn->hash_insert(tn);
record_for_optimizer(n);
+ } else {
+ continue; // wrong type
}
}
} else {
@@ -1788,6 +1810,7 @@ void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *pha
} else if (call_analyzer->is_return_local()) {
// determine whether any arguments are returned
set_escape_state(call_idx, PointsToNode::NoEscape);
+ bool ret_arg = false;
for (uint i = TypeFunc::Parms; i < d->cnt(); i++) {
const Type* at = d->field_at(i);
@@ -1795,6 +1818,7 @@ void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *pha
Node *arg = call->in(i)->uncast();
if (call_analyzer->is_arg_returned(i - TypeFunc::Parms)) {
+ ret_arg = true;
PointsToNode *arg_esp = ptnode_adr(arg->_idx);
if (arg_esp->node_type() == PointsToNode::UnknownType)
done = false;
@@ -1806,6 +1830,11 @@ void ConnectionGraph::process_call_result(ProjNode *resproj, PhaseTransform *pha
}
}
}
+ if (done && !ret_arg) {
+ // Returns unknown object.
+ set_escape_state(call_idx, PointsToNode::GlobalEscape);
+ add_pointsto_edge(resproj_idx, _phantom_object);
+ }
copy_dependencies = true;
} else {
set_escape_state(call_idx, PointsToNode::GlobalEscape);
@@ -2212,7 +2241,9 @@ void ConnectionGraph::build_connection_graph(Node *n, PhaseTransform *phase) {
if (in->is_top() || in == n)
continue; // ignore top or inputs which go back this node
int ti = in->_idx;
- if (ptnode_adr(in->_idx)->node_type() == PointsToNode::JavaObject) {
+ PointsToNode::NodeType nt = ptnode_adr(ti)->node_type();
+ assert(nt != PointsToNode::UnknownType, "all nodes should be known");
+ if (nt == PointsToNode::JavaObject) {
add_pointsto_edge(n_idx, ti);
} else {
add_deferred_edge(n_idx, ti);
diff --git a/src/share/vm/opto/escape.hpp b/src/share/vm/opto/escape.hpp
index 93cfd0ec8..1ce0cc9cf 100644
--- a/src/share/vm/opto/escape.hpp
+++ b/src/share/vm/opto/escape.hpp
@@ -286,7 +286,7 @@ private:
// MemNode - new memory input for this node
// ChecCastPP - allocation that this is a cast of
// allocation - CheckCastPP of the allocation
- void split_AddP(Node *addp, Node *base, PhaseGVN *igvn);
+ bool split_AddP(Node *addp, Node *base, PhaseGVN *igvn);
PhiNode *create_split_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn, bool &new_created);
PhiNode *split_memory_phi(PhiNode *orig_phi, int alias_idx, GrowableArray<PhiNode *> &orig_phi_worklist, PhaseGVN *igvn);
Node *find_mem(Node *mem, int alias_idx, PhaseGVN *igvn);
diff --git a/src/share/vm/opto/gcm.cpp b/src/share/vm/opto/gcm.cpp
index 0627e5050..b65101197 100644
--- a/src/share/vm/opto/gcm.cpp
+++ b/src/share/vm/opto/gcm.cpp
@@ -1609,7 +1609,30 @@ void CFGLoop::compute_freq() {
float Block::succ_prob(uint i) {
int eidx = end_idx();
Node *n = _nodes[eidx]; // Get ending Node
- int op = n->is_Mach() ? n->as_Mach()->ideal_Opcode() : n->Opcode();
+
+ int op = n->Opcode();
+ if (n->is_Mach()) {
+ if (n->is_MachNullCheck()) {
+ // Can only reach here if called after lcm. The original Op_If is gone,
+ // so we attempt to infer the probability from one or both of the
+ // successor blocks.
+ assert(_num_succs == 2, "expecting 2 successors of a null check");
+ // If either successor has only one predecessor, then the
+ // probabiltity estimate can be derived using the
+ // relative frequency of the successor and this block.
+ if (_succs[i]->num_preds() == 2) {
+ return _succs[i]->_freq / _freq;
+ } else if (_succs[1-i]->num_preds() == 2) {
+ return 1 - (_succs[1-i]->_freq / _freq);
+ } else {
+ // Estimate using both successor frequencies
+ float freq = _succs[i]->_freq;
+ return freq / (freq + _succs[1-i]->_freq);
+ }
+ }
+ op = n->as_Mach()->ideal_Opcode();
+ }
+
// Switch on branch type
switch( op ) {
diff --git a/src/share/vm/opto/ifg.cpp b/src/share/vm/opto/ifg.cpp
index 2c6cd665f..e7f55aa5b 100644
--- a/src/share/vm/opto/ifg.cpp
+++ b/src/share/vm/opto/ifg.cpp
@@ -594,7 +594,7 @@ uint PhaseChaitin::build_ifg_physical( ResourceArea *a ) {
// Insure high score for immediate-use spill copies so they get a color
if( n->is_SpillCopy()
- && lrgs(r)._def != NodeSentinel // MultiDef live range can still split
+ && lrgs(r).is_singledef() // MultiDef live range can still split
&& n->outcnt() == 1 // and use must be in this block
&& _cfg._bbs[n->unique_out()->_idx] == b ) {
// All single-use MachSpillCopy(s) that immediately precede their
diff --git a/src/share/vm/opto/loopnode.cpp b/src/share/vm/opto/loopnode.cpp
index c411cdc34..7e853e6a1 100644
--- a/src/share/vm/opto/loopnode.cpp
+++ b/src/share/vm/opto/loopnode.cpp
@@ -2625,9 +2625,11 @@ void PhaseIdealLoop::build_loop_late_post( Node *n, const PhaseIdealLoop *verify
case Op_LoadF:
case Op_LoadI:
case Op_LoadKlass:
+ case Op_LoadNKlass:
case Op_LoadL:
case Op_LoadS:
case Op_LoadP:
+ case Op_LoadN:
case Op_LoadRange:
case Op_LoadD_unaligned:
case Op_LoadL_unaligned:
diff --git a/src/share/vm/opto/loopopts.cpp b/src/share/vm/opto/loopopts.cpp
index 3ac320cbf..06aab460d 100644
--- a/src/share/vm/opto/loopopts.cpp
+++ b/src/share/vm/opto/loopopts.cpp
@@ -96,6 +96,10 @@ Node *PhaseIdealLoop::split_thru_phi( Node *n, Node *region, int policy ) {
// our new node, even though we may throw the node away.
// (Note: This tweaking with igvn only works because x is a new node.)
_igvn.set_type(x, t);
+ // If x is a TypeNode, capture any more-precise type permanently into Node
+ // othewise it will be not updated during igvn->transform since
+ // igvn->type(x) is set to x->Value() already.
+ x->raise_bottom_type(t);
Node *y = x->Identity(&_igvn);
if( y != x ) {
wins++;
@@ -464,11 +468,11 @@ Node *PhaseIdealLoop::conditional_move( Node *region ) {
case T_FLOAT:
case T_DOUBLE:
case T_ADDRESS: // (RawPtr)
- case T_NARROWOOP:
cost++;
break;
+ case T_NARROWOOP: // Fall through
case T_OBJECT: { // Base oops are OK, but not derived oops
- const TypeOopPtr *tp = phi->type()->isa_oopptr();
+ const TypeOopPtr *tp = phi->type()->make_ptr()->isa_oopptr();
// Derived pointers are Bad (tm): what's the Base (for GC purposes) of a
// CMOVE'd derived pointer? It's a CMOVE'd derived base. Thus
// CMOVE'ing a derived pointer requires we also CMOVE the base. If we
@@ -499,11 +503,11 @@ Node *PhaseIdealLoop::conditional_move( Node *region ) {
return NULL; // Too much speculative goo
}
}
- // See if the Phi is used by a Cmp. This will likely Split-If, a
- // higher-payoff operation.
+ // See if the Phi is used by a Cmp or Narrow oop Decode/Encode.
+ // This will likely Split-If, a higher-payoff operation.
for (DUIterator_Fast kmax, k = phi->fast_outs(kmax); k < kmax; k++) {
Node* use = phi->fast_out(k);
- if( use->is_Cmp() )
+ if( use->is_Cmp() || use->is_DecodeN() || use->is_EncodeP() )
return NULL;
}
}
diff --git a/src/share/vm/opto/macro.cpp b/src/share/vm/opto/macro.cpp
index c496c8fd9..f8aea8e36 100644
--- a/src/share/vm/opto/macro.cpp
+++ b/src/share/vm/opto/macro.cpp
@@ -594,7 +594,7 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
// Scan object's fields adding an input to the safepoint for each field.
for (int j = 0; j < nfields; j++) {
- int offset;
+ intptr_t offset;
ciField* field = NULL;
if (iklass != NULL) {
field = iklass->nonstatic_field_at(j);
@@ -602,7 +602,7 @@ bool PhaseMacroExpand::scalar_replacement(AllocateNode *alloc, GrowableArray <Sa
elem_type = field->type();
basic_elem_type = field->layout_type();
} else {
- offset = array_base + j * element_size;
+ offset = array_base + j * (intptr_t)element_size;
}
const Type *field_type;
diff --git a/src/share/vm/opto/matcher.cpp b/src/share/vm/opto/matcher.cpp
index 1640b28e3..f8ff59632 100644
--- a/src/share/vm/opto/matcher.cpp
+++ b/src/share/vm/opto/matcher.cpp
@@ -1450,6 +1450,8 @@ MachNode *Matcher::ReduceInst( State *s, int rule, Node *&mem ) {
Node *leaf = s->_leaf;
// Check for instruction or instruction chain rule
if( rule >= _END_INST_CHAIN_RULE || rule < _BEGIN_INST_CHAIN_RULE ) {
+ assert(C->node_arena()->contains(s->_leaf) || !has_new_node(s->_leaf),
+ "duplicating node that's already been matched");
// Instruction
mach->add_req( leaf->in(0) ); // Set initial control
// Reduce interior of complex instruction
@@ -1872,6 +1874,12 @@ void Matcher::find_shared( Node *n ) {
// Clone addressing expressions as they are "free" in most instructions
if( mem_op && i == MemNode::Address && mop == Op_AddP ) {
+ if (m->in(AddPNode::Base)->Opcode() == Op_DecodeN) {
+ // Bases used in addresses must be shared but since
+ // they are shared through a DecodeN they may appear
+ // to have a single use so force sharing here.
+ set_shared(m->in(AddPNode::Base)->in(1));
+ }
Node *off = m->in(AddPNode::Offset);
if( off->is_Con() ) {
set_visited(m); // Flag as visited now
diff --git a/src/share/vm/opto/memnode.cpp b/src/share/vm/opto/memnode.cpp
index edbadc5a0..56f7ef736 100644
--- a/src/share/vm/opto/memnode.cpp
+++ b/src/share/vm/opto/memnode.cpp
@@ -214,6 +214,9 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
Node *ctl = in(MemNode::Control);
if (ctl && remove_dead_region(phase, can_reshape))
return this;
+ ctl = in(MemNode::Control);
+ // Don't bother trying to transform a dead node
+ if( ctl && ctl->is_top() ) return NodeSentinel;
// Ignore if memory is dead, or self-loop
Node *mem = in(MemNode::Memory);
@@ -244,6 +247,7 @@ Node *MemNode::Ideal_common(PhaseGVN *phase, bool can_reshape) {
if (mem != old_mem) {
set_req(MemNode::Memory, mem);
+ if (phase->type( mem ) == Type::TOP) return NodeSentinel;
return this;
}
@@ -1231,6 +1235,10 @@ Node *LoadNode::split_through_phi(PhaseGVN *phase) {
// our new node, even though we may throw the node away.
// (This tweaking with igvn only works because x is a new node.)
igvn->set_type(x, t);
+ // If x is a TypeNode, capture any more-precise type permanently into Node
+ // othewise it will be not updated during igvn->transform since
+ // igvn->type(x) is set to x->Value() already.
+ x->raise_bottom_type(t);
Node *y = x->Identity(igvn);
if( y != x ) {
wins++;
@@ -1312,6 +1320,7 @@ Node *LoadNode::Ideal(PhaseGVN *phase, bool can_reshape) {
Node* opt_mem = MemNode::optimize_memory_chain(mem, addr_t, phase);
if (opt_mem != mem) {
set_req(MemNode::Memory, opt_mem);
+ if (phase->type( opt_mem ) == Type::TOP) return NULL;
return this;
}
const TypeOopPtr *t_oop = addr_t->isa_oopptr();
@@ -1409,7 +1418,7 @@ const Type *LoadNode::Value( PhaseTransform *phase ) const {
// had an original form like p1:(AddP x x (LShiftL quux 3)), where the
// expression (LShiftL quux 3) independently optimized to the constant 8.
if ((t->isa_int() == NULL) && (t->isa_long() == NULL)
- && Opcode() != Op_LoadKlass) {
+ && Opcode() != Op_LoadKlass && Opcode() != Op_LoadNKlass) {
// t might actually be lower than _type, if _type is a unique
// concrete subclass of abstract class t.
// Make sure the reference is not into the header, by comparing
@@ -2443,8 +2452,7 @@ MemBarNode* MemBarNode::make(Compile* C, int opcode, int atp, Node* pn) {
// Return a node which is more "ideal" than the current node. Strip out
// control copies
Node *MemBarNode::Ideal(PhaseGVN *phase, bool can_reshape) {
- if (remove_dead_region(phase, can_reshape)) return this;
- return NULL;
+ return remove_dead_region(phase, can_reshape) ? this : NULL;
}
//------------------------------Value------------------------------------------
diff --git a/src/share/vm/opto/node.cpp b/src/share/vm/opto/node.cpp
index df3e56c2d..9130403ed 100644
--- a/src/share/vm/opto/node.cpp
+++ b/src/share/vm/opto/node.cpp
@@ -1166,16 +1166,15 @@ bool Node::dominates(Node* sub, Node_List &nlist) {
// using it dead as well. This will happen normally via the usual IterGVN
// worklist but this call is more efficient. Do not update use-def info
// inside the dead region, just at the borders.
-static bool kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
+static void kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
// Con's are a popular node to re-hit in the hash table again.
- if( dead->is_Con() ) return false;
+ if( dead->is_Con() ) return;
// Can't put ResourceMark here since igvn->_worklist uses the same arena
// for verify pass with +VerifyOpto and we add/remove elements in it here.
Node_List nstack(Thread::current()->resource_area());
Node *top = igvn->C->top();
- bool progress = false;
nstack.push(dead);
while (nstack.size() > 0) {
@@ -1214,7 +1213,6 @@ static bool kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
for (uint i=0; i < dead->req(); i++) {
Node *n = dead->in(i); // Get input to dead guy
if (n != NULL && !n->is_top()) { // Input is valid?
- progress = true;
dead->set_req(i, top); // Smash input away
if (n->outcnt() == 0) { // Input also goes dead?
if (!n->is_Con())
@@ -1233,7 +1231,7 @@ static bool kill_dead_code( Node *dead, PhaseIterGVN *igvn ) {
}
} // (dead->outcnt() == 0)
} // while (nstack.size() > 0) for outputs
- return progress;
+ return;
}
//------------------------------remove_dead_region-----------------------------
@@ -1243,7 +1241,8 @@ bool Node::remove_dead_region(PhaseGVN *phase, bool can_reshape) {
// Lost control into this guy? I.e., it became unreachable?
// Aggressively kill all unreachable code.
if (can_reshape && n->is_top()) {
- return kill_dead_code(this, phase->is_IterGVN());
+ kill_dead_code(this, phase->is_IterGVN());
+ return false; // Node is dead.
}
if( n->is_Region() && n->as_Region()->is_copy() ) {
diff --git a/src/share/vm/opto/phaseX.cpp b/src/share/vm/opto/phaseX.cpp
index 45690469d..484629a90 100644
--- a/src/share/vm/opto/phaseX.cpp
+++ b/src/share/vm/opto/phaseX.cpp
@@ -986,7 +986,9 @@ Node *PhaseIterGVN::transform_old( Node *n ) {
// Apply the Ideal call in a loop until it no longer applies
Node *k = n;
DEBUG_ONLY(dead_loop_check(k);)
+ DEBUG_ONLY(bool is_new = (k->outcnt() == 0);)
Node *i = k->Ideal(this, /*can_reshape=*/true);
+ assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
#ifndef PRODUCT
if( VerifyIterativeGVN )
verify_step(k);
@@ -1024,7 +1026,9 @@ Node *PhaseIterGVN::transform_old( Node *n ) {
}
DEBUG_ONLY(dead_loop_check(k);)
// Try idealizing again
+ DEBUG_ONLY(is_new = (k->outcnt() == 0);)
i = k->Ideal(this, /*can_reshape=*/true);
+ assert(i != k || is_new || i->outcnt() > 0, "don't return dead nodes");
#ifndef PRODUCT
if( VerifyIterativeGVN )
verify_step(k);
diff --git a/src/share/vm/opto/reg_split.cpp b/src/share/vm/opto/reg_split.cpp
index 5101eb2e7..a562eacc2 100644
--- a/src/share/vm/opto/reg_split.cpp
+++ b/src/share/vm/opto/reg_split.cpp
@@ -284,7 +284,7 @@ Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint
// Check for single-def (LRG cannot redefined)
uint lidx = n2lidx(in);
if( lidx >= _maxlrg ) continue; // Value is a recent spill-copy
- if( lrgs(lidx)._def != NodeSentinel ) continue;
+ if (lrgs(lidx).is_singledef()) continue;
Block *b_def = _cfg._bbs[def->_idx];
int idx_def = b_def->find_node(def);
@@ -311,12 +311,20 @@ Node *PhaseChaitin::split_Rematerialize( Node *def, Block *b, uint insidx, uint
uint lidx = Find_id(in);
// Walk backwards thru spill copy node intermediates
- if( walkThru )
+ if (walkThru) {
while ( in->is_SpillCopy() && lidx >= _maxlrg ) {
in = in->in(1);
lidx = Find_id(in);
}
+ if (lidx < _maxlrg && lrgs(lidx).is_multidef()) {
+ // walkThru found a multidef LRG, which is unsafe to use, so
+ // just keep the original def used in the clone.
+ in = spill->in(i);
+ lidx = Find_id(in);
+ }
+ }
+
if( lidx < _maxlrg && lrgs(lidx).reg() >= LRG::SPILL_REG ) {
Node *rdef = Reachblock[lrg2reach[lidx]];
if( rdef ) spill->set_req(i,rdef);
@@ -505,7 +513,7 @@ uint PhaseChaitin::Split( uint maxlrg ) {
// Do not bother splitting or putting in Phis for single-def
// rematerialized live ranges. This happens alot to constants
// with long live ranges.
- if( lrgs(lidx)._def != NodeSentinel &&
+ if( lrgs(lidx).is_singledef() &&
lrgs(lidx)._def->rematerialize() ) {
// reset the Reaches & UP entries
Reachblock[slidx] = lrgs(lidx)._def;
diff --git a/src/share/vm/opto/subnode.cpp b/src/share/vm/opto/subnode.cpp
index 90ad08700..774aff9f4 100644
--- a/src/share/vm/opto/subnode.cpp
+++ b/src/share/vm/opto/subnode.cpp
@@ -633,20 +633,31 @@ const Type *CmpPNode::sub( const Type *t1, const Type *t2 ) const {
kps != 1 && // both or neither are klass pointers
!klass0->is_interface() && // do not trust interfaces
!klass1->is_interface()) {
+ bool unrelated_classes = false;
// See if neither subclasses the other, or if the class on top
- // is precise. In either of these cases, the compare must fail.
+ // is precise. In either of these cases, the compare is known
+ // to fail if at least one of the pointers is provably not null.
if (klass0->equals(klass1) || // if types are unequal but klasses are
!klass0->is_java_klass() || // types not part of Java language?
!klass1->is_java_klass()) { // types not part of Java language?
// Do nothing; we know nothing for imprecise types
} else if (klass0->is_subtype_of(klass1)) {
- // If klass1's type is PRECISE, then we can fail.
- if (xklass1) return TypeInt::CC_GT;
+ // If klass1's type is PRECISE, then classes are unrelated.
+ unrelated_classes = xklass1;
} else if (klass1->is_subtype_of(klass0)) {
- // If klass0's type is PRECISE, then we can fail.
- if (xklass0) return TypeInt::CC_GT;
+ // If klass0's type is PRECISE, then classes are unrelated.
+ unrelated_classes = xklass0;
} else { // Neither subtypes the other
- return TypeInt::CC_GT; // ...so always fail
+ unrelated_classes = true;
+ }
+ if (unrelated_classes) {
+ // The oops classes are known to be unrelated. If the joined PTRs of
+ // two oops is not Null and not Bottom, then we are sure that one
+ // of the two oops is non-null, and the comparison will always fail.
+ TypePtr::PTR jp = r0->join_ptr(r1->_ptr);
+ if (jp != TypePtr::Null && jp != TypePtr::BotPTR) {
+ return TypeInt::CC_GT;
+ }
}
}
}
@@ -681,7 +692,11 @@ Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
// Now check for LoadKlass on left.
Node* ldk1 = in(1);
- if (ldk1->Opcode() != Op_LoadKlass)
+ if (ldk1->is_DecodeN()) {
+ ldk1 = ldk1->in(1);
+ if (ldk1->Opcode() != Op_LoadNKlass )
+ return NULL;
+ } else if (ldk1->Opcode() != Op_LoadKlass )
return NULL;
// Take apart the address of the LoadKlass:
Node* adr1 = ldk1->in(MemNode::Address);
@@ -702,7 +717,11 @@ Node *CmpPNode::Ideal( PhaseGVN *phase, bool can_reshape ) {
// Check for a LoadKlass from primary supertype array.
// Any nested loadklass from loadklass+con must be from the p.s. array.
- if (ldk2->Opcode() != Op_LoadKlass)
+ if (ldk2->is_DecodeN()) {
+ // Keep ldk2 as DecodeN since it could be used in CmpP below.
+ if (ldk2->in(1)->Opcode() != Op_LoadNKlass )
+ return NULL;
+ } else if (ldk2->Opcode() != Op_LoadKlass)
return NULL;
// Verify that we understand the situation
@@ -769,20 +788,31 @@ const Type *CmpNNode::sub( const Type *t1, const Type *t2 ) const {
kps != 1 && // both or neither are klass pointers
!klass0->is_interface() && // do not trust interfaces
!klass1->is_interface()) {
+ bool unrelated_classes = false;
// See if neither subclasses the other, or if the class on top
- // is precise. In either of these cases, the compare must fail.
+ // is precise. In either of these cases, the compare is known
+ // to fail if at least one of the pointers is provably not null.
if (klass0->equals(klass1) || // if types are unequal but klasses are
!klass0->is_java_klass() || // types not part of Java language?
!klass1->is_java_klass()) { // types not part of Java language?
// Do nothing; we know nothing for imprecise types
} else if (klass0->is_subtype_of(klass1)) {
- // If klass1's type is PRECISE, then we can fail.
- if (xklass1) return TypeInt::CC_GT;
+ // If klass1's type is PRECISE, then classes are unrelated.
+ unrelated_classes = xklass1;
} else if (klass1->is_subtype_of(klass0)) {
- // If klass0's type is PRECISE, then we can fail.
- if (xklass0) return TypeInt::CC_GT;
+ // If klass0's type is PRECISE, then classes are unrelated.
+ unrelated_classes = xklass0;
} else { // Neither subtypes the other
- return TypeInt::CC_GT; // ...so always fail
+ unrelated_classes = true;
+ }
+ if (unrelated_classes) {
+ // The oops classes are known to be unrelated. If the joined PTRs of
+ // two oops is not Null and not Bottom, then we are sure that one
+ // of the two oops is non-null, and the comparison will always fail.
+ TypePtr::PTR jp = r0->join_ptr(r1->_ptr);
+ if (jp != TypePtr::Null && jp != TypePtr::BotPTR) {
+ return TypeInt::CC_GT;
+ }
}
}
}
diff --git a/src/share/vm/opto/type.cpp b/src/share/vm/opto/type.cpp
index 3a77c9da4..243b44c42 100644
--- a/src/share/vm/opto/type.cpp
+++ b/src/share/vm/opto/type.cpp
@@ -804,6 +804,7 @@ const Type *TypeF::xmeet( const Type *t ) const {
case InstPtr:
case KlassPtr:
case AryPtr:
+ case NarrowOop:
case Int:
case Long:
case DoubleTop:
@@ -1955,14 +1956,25 @@ const Type *TypePtr::xdual() const {
return new TypePtr( AnyPtr, dual_ptr(), dual_offset() );
}
+//------------------------------xadd_offset------------------------------------
+int TypePtr::xadd_offset( intptr_t offset ) const {
+ // Adding to 'TOP' offset? Return 'TOP'!
+ if( _offset == OffsetTop || offset == OffsetTop ) return OffsetTop;
+ // Adding to 'BOTTOM' offset? Return 'BOTTOM'!
+ if( _offset == OffsetBot || offset == OffsetBot ) return OffsetBot;
+ // Addition overflows or "accidentally" equals to OffsetTop? Return 'BOTTOM'!
+ offset += (intptr_t)_offset;
+ if (offset != (int)offset || offset == OffsetTop) return OffsetBot;
+
+ // assert( _offset >= 0 && _offset+offset >= 0, "" );
+ // It is possible to construct a negative offset during PhaseCCP
+
+ return (int)offset; // Sum valid offsets
+}
+
//------------------------------add_offset-------------------------------------
-const TypePtr *TypePtr::add_offset( int offset ) const {
- if( offset == 0 ) return this; // No change
- if( _offset == OffsetBot ) return this;
- if( offset == OffsetBot ) offset = OffsetBot;
- else if( _offset == OffsetTop || offset == OffsetTop ) offset = OffsetTop;
- else offset += _offset;
- return make( AnyPtr, _ptr, offset );
+const TypePtr *TypePtr::add_offset( intptr_t offset ) const {
+ return make( AnyPtr, _ptr, xadd_offset(offset) );
}
//------------------------------eq---------------------------------------------
@@ -2095,7 +2107,7 @@ const Type *TypeRawPtr::xdual() const {
}
//------------------------------add_offset-------------------------------------
-const TypePtr *TypeRawPtr::add_offset( int offset ) const {
+const TypePtr *TypeRawPtr::add_offset( intptr_t offset ) const {
if( offset == OffsetTop ) return BOTTOM; // Undefined offset-> undefined pointer
if( offset == OffsetBot ) return BOTTOM; // Unknown offset-> unknown pointer
if( offset == 0 ) return this; // No change
@@ -2263,6 +2275,7 @@ const Type *TypeOopPtr::xmeet( const Type *t ) const {
case DoubleTop:
case DoubleCon:
case DoubleBot:
+ case NarrowOop:
case Bottom: // Ye Olde Default
return Type::BOTTOM;
case Top:
@@ -2543,21 +2556,8 @@ bool TypeOopPtr::singleton(void) const {
return (_offset == 0) && !below_centerline(_ptr);
}
-//------------------------------xadd_offset------------------------------------
-int TypeOopPtr::xadd_offset( int offset ) const {
- // Adding to 'TOP' offset? Return 'TOP'!
- if( _offset == OffsetTop || offset == OffsetTop ) return OffsetTop;
- // Adding to 'BOTTOM' offset? Return 'BOTTOM'!
- if( _offset == OffsetBot || offset == OffsetBot ) return OffsetBot;
-
- // assert( _offset >= 0 && _offset+offset >= 0, "" );
- // It is possible to construct a negative offset during PhaseCCP
-
- return _offset+offset; // Sum valid offsets
-}
-
//------------------------------add_offset-------------------------------------
-const TypePtr *TypeOopPtr::add_offset( int offset ) const {
+const TypePtr *TypeOopPtr::add_offset( intptr_t offset ) const {
return make( _ptr, xadd_offset(offset) );
}
@@ -3074,7 +3074,7 @@ void TypeInstPtr::dump2( Dict &d, uint depth, outputStream *st ) const {
#endif
//------------------------------add_offset-------------------------------------
-const TypePtr *TypeInstPtr::add_offset( int offset ) const {
+const TypePtr *TypeInstPtr::add_offset( intptr_t offset ) const {
return make( _ptr, klass(), klass_is_exact(), const_oop(), xadd_offset(offset), _instance_id );
}
@@ -3425,7 +3425,7 @@ bool TypeAryPtr::empty(void) const {
}
//------------------------------add_offset-------------------------------------
-const TypePtr *TypeAryPtr::add_offset( int offset ) const {
+const TypePtr *TypeAryPtr::add_offset( intptr_t offset ) const {
return make( _ptr, _const_oop, _ary, _klass, _klass_is_exact, xadd_offset(offset), _instance_id );
}
@@ -3465,7 +3465,7 @@ bool TypeNarrowOop::empty(void) const {
return _ooptype->empty();
}
-//------------------------------meet-------------------------------------------
+//------------------------------xmeet------------------------------------------
// Compute the MEET of two types. It returns a new Type object.
const Type *TypeNarrowOop::xmeet( const Type *t ) const {
// Perform a fast test for common case; meeting the same types together.
@@ -3483,6 +3483,13 @@ const Type *TypeNarrowOop::xmeet( const Type *t ) const {
case DoubleTop:
case DoubleCon:
case DoubleBot:
+ case AnyPtr:
+ case RawPtr:
+ case OopPtr:
+ case InstPtr:
+ case KlassPtr:
+ case AryPtr:
+
case Bottom: // Ye Olde Default
return Type::BOTTOM;
case Top:
@@ -3499,16 +3506,9 @@ const Type *TypeNarrowOop::xmeet( const Type *t ) const {
default: // All else is a mistake
typerr(t);
- case RawPtr:
- case AnyPtr:
- case OopPtr:
- case InstPtr:
- case KlassPtr:
- case AryPtr:
- typerr(t);
- return Type::BOTTOM;
-
} // End of switch
+
+ return this;
}
const Type *TypeNarrowOop::xdual() const { // Compute dual right now.
@@ -3652,7 +3652,7 @@ ciKlass* TypeAryPtr::klass() const {
//------------------------------add_offset-------------------------------------
// Access internals of klass object
-const TypePtr *TypeKlassPtr::add_offset( int offset ) const {
+const TypePtr *TypeKlassPtr::add_offset( intptr_t offset ) const {
return make( _ptr, klass(), xadd_offset(offset) );
}
@@ -3702,6 +3702,7 @@ const Type *TypeKlassPtr::xmeet( const Type *t ) const {
case DoubleTop:
case DoubleCon:
case DoubleBot:
+ case NarrowOop:
case Bottom: // Ye Olde Default
return Type::BOTTOM;
case Top:
diff --git a/src/share/vm/opto/type.hpp b/src/share/vm/opto/type.hpp
index 7dbcf0962..68366edca 100644
--- a/src/share/vm/opto/type.hpp
+++ b/src/share/vm/opto/type.hpp
@@ -581,7 +581,8 @@ public:
virtual intptr_t get_con() const;
- virtual const TypePtr *add_offset( int offset ) const;
+ int xadd_offset( intptr_t offset ) const;
+ virtual const TypePtr *add_offset( intptr_t offset ) const;
virtual bool singleton(void) const; // TRUE if type is a singleton
virtual bool empty(void) const; // TRUE if type is vacuous
@@ -632,7 +633,7 @@ public:
virtual intptr_t get_con() const;
- virtual const TypePtr *add_offset( int offset ) const;
+ virtual const TypePtr *add_offset( intptr_t offset ) const;
virtual const Type *xmeet( const Type *t ) const;
virtual const Type *xdual() const; // Compute dual right now.
@@ -659,7 +660,6 @@ public:
};
protected:
- int xadd_offset( int offset ) const;
// Oop is NULL, unless this is a constant oop.
ciObject* _const_oop; // Constant oop
// If _klass is NULL, then so is _sig. This is an unloaded klass.
@@ -724,7 +724,7 @@ public:
// corresponding pointer to klass, for a given instance
const TypeKlassPtr* as_klass_type() const;
- virtual const TypePtr *add_offset( int offset ) const;
+ virtual const TypePtr *add_offset( intptr_t offset ) const;
virtual const Type *xmeet( const Type *t ) const;
virtual const Type *xdual() const; // Compute dual right now.
@@ -793,7 +793,7 @@ class TypeInstPtr : public TypeOopPtr {
virtual const TypeOopPtr *cast_to_instance_id(int instance_id) const;
- virtual const TypePtr *add_offset( int offset ) const;
+ virtual const TypePtr *add_offset( intptr_t offset ) const;
virtual const Type *xmeet( const Type *t ) const;
virtual const TypeInstPtr *xmeet_unloaded( const TypeInstPtr *t ) const;
@@ -842,7 +842,7 @@ public:
virtual const TypeAryPtr* cast_to_size(const TypeInt* size) const;
virtual bool empty(void) const; // TRUE if type is vacuous
- virtual const TypePtr *add_offset( int offset ) const;
+ virtual const TypePtr *add_offset( intptr_t offset ) const;
virtual const Type *xmeet( const Type *t ) const;
virtual const Type *xdual() const; // Compute dual right now.
@@ -896,7 +896,7 @@ public:
// corresponding pointer to instance, for a given class
const TypeOopPtr* as_instance_type() const;
- virtual const TypePtr *add_offset( int offset ) const;
+ virtual const TypePtr *add_offset( intptr_t offset ) const;
virtual const Type *xmeet( const Type *t ) const;
virtual const Type *xdual() const; // Compute dual right now.
diff --git a/src/share/vm/runtime/globals.hpp b/src/share/vm/runtime/globals.hpp
index 51f59293b..f06fa3cef 100644
--- a/src/share/vm/runtime/globals.hpp
+++ b/src/share/vm/runtime/globals.hpp
@@ -1443,7 +1443,7 @@ class CommandLineFlags {
"CMSPrecleanNumerator:CMSPrecleanDenominator yields convergence" \
" ratio") \
\
- product(bool, CMSPrecleanRefLists1, true, \
+ product(bool, CMSPrecleanRefLists1, false, \
"Preclean ref lists during (initial) preclean phase") \
\
product(bool, CMSPrecleanRefLists2, false, \
diff --git a/src/share/vm/runtime/vmStructs.cpp b/src/share/vm/runtime/vmStructs.cpp
index cb38e15d0..0c0dee130 100644
--- a/src/share/vm/runtime/vmStructs.cpp
+++ b/src/share/vm/runtime/vmStructs.cpp
@@ -583,7 +583,7 @@ static inline uint64_t cast_uint64_t(size_t x)
/***********************************/ \
\
static_field(StubRoutines, _call_stub_return_address, address) \
- IA32_ONLY(static_field(StubRoutines::i486,_call_stub_compiled_return, address)) \
+ IA32_ONLY(static_field(StubRoutines::x86,_call_stub_compiled_return, address)) \
\
/***************************************/ \
/* PcDesc and other compiled code info */ \
@@ -1107,7 +1107,7 @@ static inline uint64_t cast_uint64_t(size_t x)
\
declare_toplevel_type(StubQueue) \
declare_toplevel_type(StubRoutines) \
- IA32_ONLY(declare_toplevel_type(StubRoutines::i486)) \
+ IA32_ONLY(declare_toplevel_type(StubRoutines::x86)) \
declare_toplevel_type(Stub) \
declare_type(InterpreterCodelet, Stub) \
\
diff --git a/src/share/vm/utilities/macros.hpp b/src/share/vm/utilities/macros.hpp
index 2f495efac..7d7bc654a 100644
--- a/src/share/vm/utilities/macros.hpp
+++ b/src/share/vm/utilities/macros.hpp
@@ -144,6 +144,14 @@
#define NOT_WINDOWS(code) code
#endif
+#if defined(IA32) || defined(AMD64)
+#define X86
+#define X86_ONLY(code) code
+#else
+#undef X86
+#define X86_ONLY(code)
+#endif
+
#ifdef IA32
#define IA32_ONLY(code) code
#define NOT_IA32(code)
diff --git a/src/os_cpu/windows_x86/vm/assembler_windows_x86_64.cpp b/test/compiler/6741738/Tester.java
index 7ff190fb2..922f5e2f4 100644
--- a/src/os_cpu/windows_x86/vm/assembler_windows_x86_64.cpp
+++ b/test/compiler/6741738/Tester.java
@@ -1,5 +1,5 @@
/*
- * Copyright 2003-2008 Sun Microsystems, Inc. All Rights Reserved.
+ * Copyright 2008 Sun Microsystems, Inc. All Rights Reserved.
* DO NOT ALTER OR REMOVE COPYRIGHT NOTICES OR THIS FILE HEADER.
*
* This code is free software; you can redistribute it and/or modify it
@@ -19,49 +19,32 @@
* Please contact Sun Microsystems, Inc., 4150 Network Circle, Santa Clara,
* CA 95054 USA or visit www.sun.com if you need additional information or
* have any questions.
- *
*/
-#include "incls/_precompiled.incl"
-#include "incls/_assembler_windows_x86_64.cpp.incl"
-
-
-void MacroAssembler::int3() {
- emit_byte(0xCC);
-}
+/*
+ * @test
+ * @bug 6741738
+ * @summary TypePtr::add_offset() set incorrect offset when the add overflows
+ * @run main/othervm -Xcomp -XX:CompileOnly=Tester.foo Tester
+ */
-// call (Thread*)TlsGetValue(thread_index());
-void MacroAssembler::get_thread(Register thread) {
- if (thread != rax) {
- pushq(rax);
- }
- pushq(rdi);
- pushq(rsi);
- pushq(rdx);
- pushq(rcx);
- pushq(r8);
- pushq(r9);
- pushq(r10);
- // XXX
- movq(r10, rsp);
- andq(rsp, -16);
- pushq(r10);
- pushq(r11);
+public class Tester {
+ private String[] values;
+ private int count;
- movl(c_rarg0, ThreadLocalStorage::thread_index());
- call(RuntimeAddress((address)TlsGetValue));
+ String foo() {
+ int i = Integer.MAX_VALUE-1;
+ String s;
+ try {
+ s = values[i];
+ } catch (Throwable e) {
+ s = "";
+ }
+ return s;
+ }
- popq(r11);
- popq(rsp);
- popq(r10);
- popq(r9);
- popq(r8);
- popq(rcx);
- popq(rdx);
- popq(rsi);
- popq(rdi);
- if (thread != rax) {
- movq(thread, rax);
- popq(rax);
- }
+ public static void main(String[] args) {
+ Tester t = new Tester();
+ String s = t.foo();
+ }
}