aboutsummaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authoramurillo <none@none>2013-07-18 09:25:06 -0700
committeramurillo <none@none>2013-07-18 09:25:06 -0700
commitcd4853e29e04aa92714f5726c659d95e3eab01db (patch)
treed3a999b64457667a1e5fd907d66e32c25a41a656
parent72557d09f27611b070aba92b8419a4a742165afd (diff)
parentb2544266c60e316f0a8a59a062360ed7e2c1e724 (diff)
-rw-r--r--make/hotspot_version2
-rw-r--r--make/linux/makefiles/vm.make3
-rw-r--r--src/cpu/sparc/vm/stubGenerator_sparc.cpp53
-rw-r--r--src/cpu/x86/vm/stubGenerator_x86_32.cpp41
-rw-r--r--src/cpu/x86/vm/stubGenerator_x86_64.cpp46
-rw-r--r--src/os/windows/vm/os_windows.cpp5
-rw-r--r--src/os_cpu/bsd_x86/vm/bsd_x86_32.s18
-rw-r--r--src/os_cpu/bsd_x86/vm/bsd_x86_64.s22
-rw-r--r--src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp19
-rw-r--r--src/os_cpu/linux_sparc/vm/linux_sparc.s36
-rw-r--r--src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp13
-rw-r--r--src/os_cpu/linux_x86/vm/linux_x86_32.s18
-rw-r--r--src/os_cpu/linux_x86/vm/linux_x86_64.s22
-rw-r--r--src/os_cpu/linux_x86/vm/os_linux_x86.cpp19
-rw-r--r--src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp20
-rw-r--r--src/os_cpu/solaris_sparc/vm/solaris_sparc.s41
-rw-r--r--src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp20
-rw-r--r--src/os_cpu/solaris_x86/vm/solaris_x86_32.s14
-rw-r--r--src/os_cpu/solaris_x86/vm/solaris_x86_64.s52
-rw-r--r--src/os_cpu/windows_x86/vm/os_windows_x86.cpp18
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp4
-rw-r--r--src/share/vm/runtime/os.hpp4
-rw-r--r--src/share/vm/runtime/stubRoutines.cpp7
-rw-r--r--src/share/vm/runtime/stubRoutines.hpp47
-rw-r--r--src/share/vm/services/memTracker.cpp8
-rw-r--r--src/share/vm/utilities/globalDefinitions.hpp8
26 files changed, 244 insertions, 316 deletions
diff --git a/make/hotspot_version b/make/hotspot_version
index 8d98d13fb..86018ca9e 100644
--- a/make/hotspot_version
+++ b/make/hotspot_version
@@ -35,7 +35,7 @@ HOTSPOT_VM_COPYRIGHT=Copyright 2013
HS_MAJOR_VER=25
HS_MINOR_VER=0
-HS_BUILD_NUMBER=41
+HS_BUILD_NUMBER=42
JDK_MAJOR_VER=1
JDK_MINOR_VER=8
diff --git a/make/linux/makefiles/vm.make b/make/linux/makefiles/vm.make
index 3c3bd2e2d..4c984be2c 100644
--- a/make/linux/makefiles/vm.make
+++ b/make/linux/makefiles/vm.make
@@ -46,6 +46,7 @@ ifeq ($(findstring true, $(JVM_VARIANT_ZERO) $(JVM_VARIANT_ZEROSHARK)), true)
include $(MAKEFILES_DIR)/zeroshark.make
else
include $(MAKEFILES_DIR)/$(BUILDARCH).make
+ -include $(HS_ALT_MAKE)/$(Platform_os_family)/makefiles/$(BUILDARCH).make
endif
# set VPATH so make knows where to look for source files
@@ -380,4 +381,4 @@ build: $(LIBJVM) $(LAUNCHER) $(LIBJSIG) $(LIBJVM_DB) $(BUILDLIBSAPROC) dtraceChe
install: install_jvm install_jsig install_saproc
-.PHONY: default build install install_jvm
+.PHONY: default build install install_jvm $(HS_ALT_MAKE)/$(Platform_os_family)/makefiles/$(BUILDARCH).make
diff --git a/src/cpu/sparc/vm/stubGenerator_sparc.cpp b/src/cpu/sparc/vm/stubGenerator_sparc.cpp
index 494c1bc40..214940cdb 100644
--- a/src/cpu/sparc/vm/stubGenerator_sparc.cpp
+++ b/src/cpu/sparc/vm/stubGenerator_sparc.cpp
@@ -410,6 +410,51 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
+ // Safefetch stubs.
+ void generate_safefetch(const char* name, int size, address* entry,
+ address* fault_pc, address* continuation_pc) {
+ // safefetch signatures:
+ // int SafeFetch32(int* adr, int errValue);
+ // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
+ //
+ // arguments:
+ // o0 = adr
+ // o1 = errValue
+ //
+ // result:
+ // o0 = *adr or errValue
+
+ StubCodeMark mark(this, "StubRoutines", name);
+
+ // Entry point, pc or function descriptor.
+ __ align(CodeEntryAlignment);
+ *entry = __ pc();
+
+ __ mov(O0, G1); // g1 = o0
+ __ mov(O1, O0); // o0 = o1
+ // Load *adr into c_rarg1, may fault.
+ *fault_pc = __ pc();
+ switch (size) {
+ case 4:
+ // int32_t
+ __ ldsw(G1, 0, O0); // o0 = [g1]
+ break;
+ case 8:
+ // int64_t
+ __ ldx(G1, 0, O0); // o0 = [g1]
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+
+ // return errValue or *adr
+ *continuation_pc = __ pc();
+ // By convention with the trap handler we ensure there is a non-CTI
+ // instruction in the trap shadow.
+ __ nop();
+ __ retl();
+ __ delayed()->nop();
+ }
//------------------------------------------------------------------------------------------------------------------------
// Continuation point for throwing of implicit exceptions that are not handled in
@@ -3315,6 +3360,14 @@ class StubGenerator: public StubCodeGenerator {
// Don't initialize the platform math functions since sparc
// doesn't have intrinsics for these operations.
+
+ // Safefetch stubs.
+ generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
+ &StubRoutines::_safefetch32_fault_pc,
+ &StubRoutines::_safefetch32_continuation_pc);
+ generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
+ &StubRoutines::_safefetchN_fault_pc,
+ &StubRoutines::_safefetchN_continuation_pc);
}
diff --git a/src/cpu/x86/vm/stubGenerator_x86_32.cpp b/src/cpu/x86/vm/stubGenerator_x86_32.cpp
index 82e4183ef..a8abfea6b 100644
--- a/src/cpu/x86/vm/stubGenerator_x86_32.cpp
+++ b/src/cpu/x86/vm/stubGenerator_x86_32.cpp
@@ -2766,6 +2766,39 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
+ // Safefetch stubs.
+ void generate_safefetch(const char* name, int size, address* entry,
+ address* fault_pc, address* continuation_pc) {
+ // safefetch signatures:
+ // int SafeFetch32(int* adr, int errValue);
+ // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
+
+ StubCodeMark mark(this, "StubRoutines", name);
+
+ // Entry point, pc or function descriptor.
+ *entry = __ pc();
+
+ __ movl(rax, Address(rsp, 0x8));
+ __ movl(rcx, Address(rsp, 0x4));
+ // Load *adr into eax, may fault.
+ *fault_pc = __ pc();
+ switch (size) {
+ case 4:
+ // int32_t
+ __ movl(rax, Address(rcx, 0));
+ break;
+ case 8:
+ // int64_t
+ Unimplemented();
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+
+ // Return errValue or *adr.
+ *continuation_pc = __ pc();
+ __ ret(0);
+ }
public:
// Information about frame layout at time of blocking runtime call.
@@ -2978,6 +3011,14 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt();
}
+
+ // Safefetch stubs.
+ generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
+ &StubRoutines::_safefetch32_fault_pc,
+ &StubRoutines::_safefetch32_continuation_pc);
+ StubRoutines::_safefetchN_entry = StubRoutines::_safefetch32_entry;
+ StubRoutines::_safefetchN_fault_pc = StubRoutines::_safefetch32_fault_pc;
+ StubRoutines::_safefetchN_continuation_pc = StubRoutines::_safefetch32_continuation_pc;
}
diff --git a/src/cpu/x86/vm/stubGenerator_x86_64.cpp b/src/cpu/x86/vm/stubGenerator_x86_64.cpp
index 2d94642f8..c80f18079 100644
--- a/src/cpu/x86/vm/stubGenerator_x86_64.cpp
+++ b/src/cpu/x86/vm/stubGenerator_x86_64.cpp
@@ -3357,7 +3357,45 @@ class StubGenerator: public StubCodeGenerator {
return start;
}
+ // Safefetch stubs.
+ void generate_safefetch(const char* name, int size, address* entry,
+ address* fault_pc, address* continuation_pc) {
+ // safefetch signatures:
+ // int SafeFetch32(int* adr, int errValue);
+ // intptr_t SafeFetchN (intptr_t* adr, intptr_t errValue);
+ //
+ // arguments:
+ // c_rarg0 = adr
+ // c_rarg1 = errValue
+ //
+ // result:
+ // PPC_RET = *adr or errValue
+
+ StubCodeMark mark(this, "StubRoutines", name);
+
+ // Entry point, pc or function descriptor.
+ *entry = __ pc();
+
+ // Load *adr into c_rarg1, may fault.
+ *fault_pc = __ pc();
+ switch (size) {
+ case 4:
+ // int32_t
+ __ movl(c_rarg1, Address(c_rarg0, 0));
+ break;
+ case 8:
+ // int64_t
+ __ movq(c_rarg1, Address(c_rarg0, 0));
+ break;
+ default:
+ ShouldNotReachHere();
+ }
+ // return errValue or *adr
+ *continuation_pc = __ pc();
+ __ movq(rax, c_rarg1);
+ __ ret(0);
+ }
// This is a version of CBC/AES Decrypt which does 4 blocks in a loop at a time
// to hide instruction latency
@@ -3833,6 +3871,14 @@ class StubGenerator: public StubCodeGenerator {
StubRoutines::_cipherBlockChaining_encryptAESCrypt = generate_cipherBlockChaining_encryptAESCrypt();
StubRoutines::_cipherBlockChaining_decryptAESCrypt = generate_cipherBlockChaining_decryptAESCrypt_Parallel();
}
+
+ // Safefetch stubs.
+ generate_safefetch("SafeFetch32", sizeof(int), &StubRoutines::_safefetch32_entry,
+ &StubRoutines::_safefetch32_fault_pc,
+ &StubRoutines::_safefetch32_continuation_pc);
+ generate_safefetch("SafeFetchN", sizeof(intptr_t), &StubRoutines::_safefetchN_entry,
+ &StubRoutines::_safefetchN_fault_pc,
+ &StubRoutines::_safefetchN_continuation_pc);
}
public:
diff --git a/src/os/windows/vm/os_windows.cpp b/src/os/windows/vm/os_windows.cpp
index 23d2efb36..3b00e86a2 100644
--- a/src/os/windows/vm/os_windows.cpp
+++ b/src/os/windows/vm/os_windows.cpp
@@ -2323,6 +2323,11 @@ LONG WINAPI topLevelExceptionFilter(struct _EXCEPTION_POINTERS* exceptionInfo) {
#endif
Thread* t = ThreadLocalStorage::get_thread_slow(); // slow & steady
+ // Handle SafeFetch32 and SafeFetchN exceptions.
+ if (StubRoutines::is_safefetch_fault(pc)) {
+ return Handle_Exception(exceptionInfo, StubRoutines::continuation_for_safefetch_fault(pc));
+ }
+
#ifndef _WIN64
// Execution protection violation - win32 running on AMD64 only
// Handled first to avoid misdiagnosis as a "normal" access violation;
diff --git a/src/os_cpu/bsd_x86/vm/bsd_x86_32.s b/src/os_cpu/bsd_x86/vm/bsd_x86_32.s
index 402c8da11..3275996f0 100644
--- a/src/os_cpu/bsd_x86/vm/bsd_x86_32.s
+++ b/src/os_cpu/bsd_x86/vm/bsd_x86_32.s
@@ -63,24 +63,6 @@ SYMBOL(fixcw):
popl %eax
ret
- .globl SYMBOL(SafeFetch32), SYMBOL(Fetch32PFI), SYMBOL(Fetch32Resume)
- .globl SYMBOL(SafeFetchN)
- ## TODO: avoid exposing Fetch32PFI and Fetch32Resume.
- ## Instead, the signal handler would call a new SafeFetchTriage(FaultingEIP)
- ## routine to vet the address. If the address is the faulting LD then
- ## SafeFetchTriage() would return the resume-at EIP, otherwise null.
- ELF_TYPE(SafeFetch32,@function)
- .p2align 4,,15
-SYMBOL(SafeFetch32):
-SYMBOL(SafeFetchN):
- movl 0x8(%esp), %eax
- movl 0x4(%esp), %ecx
-SYMBOL(Fetch32PFI):
- movl (%ecx), %eax
-SYMBOL(Fetch32Resume):
- ret
-
-
.globl SYMBOL(SpinPause)
ELF_TYPE(SpinPause,@function)
.p2align 4,,15
diff --git a/src/os_cpu/bsd_x86/vm/bsd_x86_64.s b/src/os_cpu/bsd_x86/vm/bsd_x86_64.s
index 65d2db45f..2f70fce77 100644
--- a/src/os_cpu/bsd_x86/vm/bsd_x86_64.s
+++ b/src/os_cpu/bsd_x86/vm/bsd_x86_64.s
@@ -46,28 +46,6 @@
.text
- .globl SYMBOL(SafeFetch32), SYMBOL(Fetch32PFI), SYMBOL(Fetch32Resume)
- .p2align 4,,15
- ELF_TYPE(SafeFetch32,@function)
- // Prototype: int SafeFetch32 (int * Adr, int ErrValue)
-SYMBOL(SafeFetch32):
- movl %esi, %eax
-SYMBOL(Fetch32PFI):
- movl (%rdi), %eax
-SYMBOL(Fetch32Resume):
- ret
-
- .globl SYMBOL(SafeFetchN), SYMBOL(FetchNPFI), SYMBOL(FetchNResume)
- .p2align 4,,15
- ELF_TYPE(SafeFetchN,@function)
- // Prototype: intptr_t SafeFetchN (intptr_t * Adr, intptr_t ErrValue)
-SYMBOL(SafeFetchN):
- movq %rsi, %rax
-SYMBOL(FetchNPFI):
- movq (%rdi), %rax
-SYMBOL(FetchNResume):
- ret
-
.globl SYMBOL(SpinPause)
.p2align 4,,15
ELF_TYPE(SpinPause,@function)
diff --git a/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp b/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp
index aa36599ea..55ef24b89 100644
--- a/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp
+++ b/src/os_cpu/bsd_x86/vm/os_bsd_x86.cpp
@@ -385,13 +385,6 @@ enum {
trap_page_fault = 0xE
};
-extern "C" void Fetch32PFI () ;
-extern "C" void Fetch32Resume () ;
-#ifdef AMD64
-extern "C" void FetchNPFI () ;
-extern "C" void FetchNResume () ;
-#endif // AMD64
-
extern "C" JNIEXPORT int
JVM_handle_bsd_signal(int sig,
siginfo_t* info,
@@ -454,16 +447,10 @@ JVM_handle_bsd_signal(int sig,
if (info != NULL && uc != NULL && thread != NULL) {
pc = (address) os::Bsd::ucontext_get_pc(uc);
- if (pc == (address) Fetch32PFI) {
- uc->context_pc = intptr_t(Fetch32Resume) ;
- return 1 ;
+ if (StubRoutines::is_safefetch_fault(pc)) {
+ uc->context_pc = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc));
+ return 1;
}
-#ifdef AMD64
- if (pc == (address) FetchNPFI) {
- uc->context_pc = intptr_t (FetchNResume) ;
- return 1 ;
- }
-#endif // AMD64
// Handle ALL stack overflow variations here
if (sig == SIGSEGV || sig == SIGBUS) {
diff --git a/src/os_cpu/linux_sparc/vm/linux_sparc.s b/src/os_cpu/linux_sparc/vm/linux_sparc.s
index e04f871f4..d7c2ce874 100644
--- a/src/os_cpu/linux_sparc/vm/linux_sparc.s
+++ b/src/os_cpu/linux_sparc/vm/linux_sparc.s
@@ -21,42 +21,6 @@
# questions.
#
- # Prototype: int SafeFetch32 (int * adr, int ErrValue)
- # The "ld" at Fetch32 is potentially faulting instruction.
- # If the instruction traps the trap handler will arrange
- # for control to resume at Fetch32Resume.
- # By convention with the trap handler we ensure there is a non-CTI
- # instruction in the trap shadow.
-
-
- .globl SafeFetch32, Fetch32PFI, Fetch32Resume
- .globl SafeFetchN
- .align 32
- .type SafeFetch32,@function
-SafeFetch32:
- mov %o0, %g1
- mov %o1, %o0
-Fetch32PFI:
- # <-- Potentially faulting instruction
- ld [%g1], %o0
-Fetch32Resume:
- nop
- retl
- nop
-
- .globl SafeFetchN, FetchNPFI, FetchNResume
- .type SafeFetchN,@function
- .align 32
-SafeFetchN:
- mov %o0, %g1
- mov %o1, %o0
-FetchNPFI:
- ldn [%g1], %o0
-FetchNResume:
- nop
- retl
- nop
-
# Possibilities:
# -- membar
# -- CAS (SP + BIAS, G0, G0)
diff --git a/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp b/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp
index d97f0e041..2367e2a06 100644
--- a/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp
+++ b/src/os_cpu/linux_sparc/vm/os_linux_sparc.cpp
@@ -366,18 +366,9 @@ intptr_t* os::Linux::ucontext_get_fp(ucontext_t *uc) {
// Utility functions
-extern "C" void Fetch32PFI();
-extern "C" void Fetch32Resume();
-extern "C" void FetchNPFI();
-extern "C" void FetchNResume();
-
inline static bool checkPrefetch(sigcontext* uc, address pc) {
- if (pc == (address) Fetch32PFI) {
- set_cont_address(uc, address(Fetch32Resume));
- return true;
- }
- if (pc == (address) FetchNPFI) {
- set_cont_address(uc, address(FetchNResume));
+ if (StubRoutines::is_safefetch_fault(pc)) {
+ set_cont_address(uc, address(StubRoutines::continuation_for_safefetch_fault(pc)));
return true;
}
return false;
diff --git a/src/os_cpu/linux_x86/vm/linux_x86_32.s b/src/os_cpu/linux_x86/vm/linux_x86_32.s
index d29d31df4..7936cbf52 100644
--- a/src/os_cpu/linux_x86/vm/linux_x86_32.s
+++ b/src/os_cpu/linux_x86/vm/linux_x86_32.s
@@ -42,24 +42,6 @@
.text
- .globl SafeFetch32, Fetch32PFI, Fetch32Resume
- .globl SafeFetchN
- ## TODO: avoid exposing Fetch32PFI and Fetch32Resume.
- ## Instead, the signal handler would call a new SafeFetchTriage(FaultingEIP)
- ## routine to vet the address. If the address is the faulting LD then
- ## SafeFetchTriage() would return the resume-at EIP, otherwise null.
- .type SafeFetch32,@function
- .p2align 4,,15
-SafeFetch32:
-SafeFetchN:
- movl 0x8(%esp), %eax
- movl 0x4(%esp), %ecx
-Fetch32PFI:
- movl (%ecx), %eax
-Fetch32Resume:
- ret
-
-
.globl SpinPause
.type SpinPause,@function
.p2align 4,,15
diff --git a/src/os_cpu/linux_x86/vm/linux_x86_64.s b/src/os_cpu/linux_x86/vm/linux_x86_64.s
index 8be68610e..fb688e7a7 100644
--- a/src/os_cpu/linux_x86/vm/linux_x86_64.s
+++ b/src/os_cpu/linux_x86/vm/linux_x86_64.s
@@ -38,28 +38,6 @@
.text
- .globl SafeFetch32, Fetch32PFI, Fetch32Resume
- .align 16
- .type SafeFetch32,@function
- // Prototype: int SafeFetch32 (int * Adr, int ErrValue)
-SafeFetch32:
- movl %esi, %eax
-Fetch32PFI:
- movl (%rdi), %eax
-Fetch32Resume:
- ret
-
- .globl SafeFetchN, FetchNPFI, FetchNResume
- .align 16
- .type SafeFetchN,@function
- // Prototype: intptr_t SafeFetchN (intptr_t * Adr, intptr_t ErrValue)
-SafeFetchN:
- movq %rsi, %rax
-FetchNPFI:
- movq (%rdi), %rax
-FetchNResume:
- ret
-
.globl SpinPause
.align 16
.type SpinPause,@function
diff --git a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp
index 4fc3b76d2..f7a57773f 100644
--- a/src/os_cpu/linux_x86/vm/os_linux_x86.cpp
+++ b/src/os_cpu/linux_x86/vm/os_linux_x86.cpp
@@ -209,13 +209,6 @@ enum {
trap_page_fault = 0xE
};
-extern "C" void Fetch32PFI () ;
-extern "C" void Fetch32Resume () ;
-#ifdef AMD64
-extern "C" void FetchNPFI () ;
-extern "C" void FetchNResume () ;
-#endif // AMD64
-
extern "C" JNIEXPORT int
JVM_handle_linux_signal(int sig,
siginfo_t* info,
@@ -278,16 +271,10 @@ JVM_handle_linux_signal(int sig,
if (info != NULL && uc != NULL && thread != NULL) {
pc = (address) os::Linux::ucontext_get_pc(uc);
- if (pc == (address) Fetch32PFI) {
- uc->uc_mcontext.gregs[REG_PC] = intptr_t(Fetch32Resume) ;
- return 1 ;
+ if (StubRoutines::is_safefetch_fault(pc)) {
+ uc->uc_mcontext.gregs[REG_PC] = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc));
+ return 1;
}
-#ifdef AMD64
- if (pc == (address) FetchNPFI) {
- uc->uc_mcontext.gregs[REG_PC] = intptr_t (FetchNResume) ;
- return 1 ;
- }
-#endif // AMD64
#ifndef AMD64
// Halt if SI_KERNEL before more crashes get misdiagnosed as Java bugs
diff --git a/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp b/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp
index 939def32f..4257f4e46 100644
--- a/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp
+++ b/src/os_cpu/solaris_sparc/vm/os_solaris_sparc.cpp
@@ -303,11 +303,6 @@ bool os::is_allocatable(size_t bytes) {
#endif
}
-extern "C" void Fetch32PFI () ;
-extern "C" void Fetch32Resume () ;
-extern "C" void FetchNPFI () ;
-extern "C" void FetchNResume () ;
-
extern "C" JNIEXPORT int
JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
int abort_if_unrecognized) {
@@ -379,17 +374,10 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
npc = (address) uc->uc_mcontext.gregs[REG_nPC];
// SafeFetch() support
- // Implemented with either a fixed set of addresses such
- // as Fetch32*, or with Thread._OnTrap.
- if (uc->uc_mcontext.gregs[REG_PC] == intptr_t(Fetch32PFI)) {
- uc->uc_mcontext.gregs [REG_PC] = intptr_t(Fetch32Resume) ;
- uc->uc_mcontext.gregs [REG_nPC] = intptr_t(Fetch32Resume) + 4 ;
- return true ;
- }
- if (uc->uc_mcontext.gregs[REG_PC] == intptr_t(FetchNPFI)) {
- uc->uc_mcontext.gregs [REG_PC] = intptr_t(FetchNResume) ;
- uc->uc_mcontext.gregs [REG_nPC] = intptr_t(FetchNResume) + 4 ;
- return true ;
+ if (StubRoutines::is_safefetch_fault(pc)) {
+ uc->uc_mcontext.gregs[REG_PC] = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc));
+ uc->uc_mcontext.gregs[REG_nPC] = uc->uc_mcontext.gregs[REG_PC] + 4;
+ return 1;
}
// Handle ALL stack overflow variations here
diff --git a/src/os_cpu/solaris_sparc/vm/solaris_sparc.s b/src/os_cpu/solaris_sparc/vm/solaris_sparc.s
index aa526a09d..39aaa77f6 100644
--- a/src/os_cpu/solaris_sparc/vm/solaris_sparc.s
+++ b/src/os_cpu/solaris_sparc/vm/solaris_sparc.s
@@ -21,47 +21,6 @@
!! questions.
!!
- !! Prototype: int SafeFetch32 (int * adr, int ErrValue)
- !! The "ld" at Fetch32 is potentially faulting instruction.
- !! If the instruction traps the trap handler will arrange
- !! for control to resume at Fetch32Resume.
- !! By convention with the trap handler we ensure there is a non-CTI
- !! instruction in the trap shadow.
- !!
- !! The reader might be tempted to move this service to .il.
- !! Don't. Sun's CC back-end reads and optimize code emitted
- !! by the .il "call", in some cases optimizing the code, completely eliding it,
- !! or by moving the code from the "call site".
-
- !! ASM better know we may use G6 for our own purposes
- .register %g6, #ignore
-
- .globl SafeFetch32
- .align 32
- .global Fetch32PFI, Fetch32Resume
-SafeFetch32:
- mov %o0, %g1
- mov %o1, %o0
-Fetch32PFI:
- ld [%g1], %o0 !! <-- Potentially faulting instruction
-Fetch32Resume:
- nop
- retl
- nop
-
- .globl SafeFetchN
- .align 32
- .globl FetchNPFI, FetchNResume
-SafeFetchN:
- mov %o0, %g1
- mov %o1, %o0
-FetchNPFI:
- ldn [%g1], %o0
-FetchNResume:
- nop
- retl
- nop
-
!! Possibilities:
!! -- membar
!! -- CAS (SP + BIAS, G0, G0)
diff --git a/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp b/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp
index 4ed094db7..f479ffbb1 100644
--- a/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp
+++ b/src/os_cpu/solaris_x86/vm/os_solaris_x86.cpp
@@ -352,13 +352,6 @@ bool os::is_allocatable(size_t bytes) {
}
-extern "C" void Fetch32PFI () ;
-extern "C" void Fetch32Resume () ;
-#ifdef AMD64
-extern "C" void FetchNPFI () ;
-extern "C" void FetchNResume () ;
-#endif // AMD64
-
extern "C" JNIEXPORT int
JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
int abort_if_unrecognized) {
@@ -436,17 +429,10 @@ JVM_handle_solaris_signal(int sig, siginfo_t* info, void* ucVoid,
// factor me: getPCfromContext
pc = (address) uc->uc_mcontext.gregs[REG_PC];
- // SafeFetch32() support
- if (pc == (address) Fetch32PFI) {
- uc->uc_mcontext.gregs[REG_PC] = intptr_t(Fetch32Resume) ;
- return true ;
- }
-#ifdef AMD64
- if (pc == (address) FetchNPFI) {
- uc->uc_mcontext.gregs [REG_PC] = intptr_t(FetchNResume) ;
- return true ;
+ if (StubRoutines::is_safefetch_fault(pc)) {
+ uc->uc_mcontext.gregs[REG_PC] = intptr_t(StubRoutines::continuation_for_safefetch_fault(pc));
+ return true;
}
-#endif // AMD64
// Handle ALL stack overflow variations here
if (sig == SIGSEGV && info->si_code == SEGV_ACCERR) {
diff --git a/src/os_cpu/solaris_x86/vm/solaris_x86_32.s b/src/os_cpu/solaris_x86/vm/solaris_x86_32.s
index 1fac3b25f..19e790b60 100644
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_32.s
+++ b/src/os_cpu/solaris_x86/vm/solaris_x86_32.s
@@ -54,20 +54,6 @@ fixcw:
popl %eax
ret
- .align 16
- .globl SafeFetch32
- .globl SafeFetchN
- .globl Fetch32PFI, Fetch32Resume
-SafeFetch32:
-SafeFetchN:
- movl 0x8(%esp), %eax
- movl 0x4(%esp), %ecx
-Fetch32PFI:
- movl (%ecx), %eax
-Fetch32Resume:
- ret
-
-
.align 16
.globl SpinPause
SpinPause:
diff --git a/src/os_cpu/solaris_x86/vm/solaris_x86_64.s b/src/os_cpu/solaris_x86/vm/solaris_x86_64.s
index 95050af24..487b569e5 100644
--- a/src/os_cpu/solaris_x86/vm/solaris_x86_64.s
+++ b/src/os_cpu/solaris_x86/vm/solaris_x86_64.s
@@ -21,54 +21,34 @@
/ questions.
/
- .globl fs_load
- .globl fs_thread
+ .globl fs_load
+ .globl fs_thread
// NOTE WELL! The _Copy functions are called directly
- // from server-compiler-generated code via CallLeafNoFP,
- // which means that they *must* either not use floating
- // point or use it in the same manner as does the server
- // compiler.
+ // from server-compiler-generated code via CallLeafNoFP,
+ // which means that they *must* either not use floating
+ // point or use it in the same manner as does the server
+ // compiler.
.globl _Copy_arrayof_conjoint_bytes
.globl _Copy_conjoint_jshorts_atomic
- .globl _Copy_arrayof_conjoint_jshorts
+ .globl _Copy_arrayof_conjoint_jshorts
.globl _Copy_conjoint_jints_atomic
.globl _Copy_arrayof_conjoint_jints
- .globl _Copy_conjoint_jlongs_atomic
+ .globl _Copy_conjoint_jlongs_atomic
.globl _Copy_arrayof_conjoint_jlongs
- .section .text,"ax"
+ .section .text,"ax"
/ Fast thread accessors, used by threadLS_solaris_amd64.cpp
- .align 16
+ .align 16
fs_load:
- movq %fs:(%rdi),%rax
- ret
-
- .align 16
-fs_thread:
- movq %fs:0x0,%rax
- ret
-
- .globl SafeFetch32, Fetch32PFI, Fetch32Resume
- .align 16
- // Prototype: int SafeFetch32 (int * Adr, int ErrValue)
-SafeFetch32:
- movl %esi, %eax
-Fetch32PFI:
- movl (%rdi), %eax
-Fetch32Resume:
+ movq %fs:(%rdi),%rax
ret
- .globl SafeFetchN, FetchNPFI, FetchNResume
- .align 16
- // Prototype: intptr_t SafeFetchN (intptr_t * Adr, intptr_t ErrValue)
-SafeFetchN:
- movq %rsi, %rax
-FetchNPFI:
- movq (%rdi), %rax
-FetchNResume:
+ .align 16
+fs_thread:
+ movq %fs:0x0,%rax
ret
.globl SpinPause
@@ -78,7 +58,7 @@ SpinPause:
nop
movq $1, %rax
ret
-
+
/ Support for void Copy::arrayof_conjoint_bytes(void* from,
/ void* to,
@@ -340,7 +320,7 @@ aci_CopyLeft:
addq $4,%rdx
jg 1b
ret
-
+
/ Support for void Copy::arrayof_conjoint_jlongs(jlong* from,
/ jlong* to,
/ size_t count)
diff --git a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp
index 1ef29f99a..a0f2a7680 100644
--- a/src/os_cpu/windows_x86/vm/os_windows_x86.cpp
+++ b/src/os_cpu/windows_x86/vm/os_windows_x86.cpp
@@ -518,24 +518,6 @@ void os::print_register_info(outputStream *st, void *context) {
st->cr();
}
-extern "C" int SafeFetch32 (int * adr, int Err) {
- int rv = Err ;
- _try {
- rv = *((volatile int *) adr) ;
- } __except(EXCEPTION_EXECUTE_HANDLER) {
- }
- return rv ;
-}
-
-extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t Err) {
- intptr_t rv = Err ;
- _try {
- rv = *((volatile intptr_t *) adr) ;
- } __except(EXCEPTION_EXECUTE_HANDLER) {
- }
- return rv ;
-}
-
extern "C" int SpinPause () {
#ifdef AMD64
return 0 ;
diff --git a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
index 3bd042819..4c6d2bbf8 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
@@ -873,7 +873,7 @@ bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc
size_t alloc_byte_size = alloc_word_size * HeapWordSize;
if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
- if (gcs_are_young()) {
+ if (gcs_are_young() && !_last_young_gc) {
ergo_verbose5(ErgoConcCycles,
"request concurrent cycle initiation",
ergo_format_reason("occupancy higher than threshold")
@@ -931,7 +931,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
last_pause_included_initial_mark = during_initial_mark_pause();
if (last_pause_included_initial_mark) {
record_concurrent_mark_init_end(0.0);
- } else if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
+ } else if (need_to_start_conc_mark("end of GC")) {
// Note: this might have already been set, if during the last
// pause we decided to start a cycle but at the beginning of
// this pause we decided to postpone it. That's OK.
diff --git a/src/share/vm/runtime/os.hpp b/src/share/vm/runtime/os.hpp
index 0d0e59efa..6078a2118 100644
--- a/src/share/vm/runtime/os.hpp
+++ b/src/share/vm/runtime/os.hpp
@@ -915,8 +915,6 @@ class os: AllStatic {
// of the global SpinPause() with C linkage.
// It'd also be eligible for inlining on many platforms.
-extern "C" int SpinPause () ;
-extern "C" int SafeFetch32 (int * adr, int errValue) ;
-extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t errValue) ;
+extern "C" int SpinPause();
#endif // SHARE_VM_RUNTIME_OS_HPP
diff --git a/src/share/vm/runtime/stubRoutines.cpp b/src/share/vm/runtime/stubRoutines.cpp
index a1179acd5..ff12ca651 100644
--- a/src/share/vm/runtime/stubRoutines.cpp
+++ b/src/share/vm/runtime/stubRoutines.cpp
@@ -136,6 +136,13 @@ double (* StubRoutines::_intrinsic_sin )(double) = NULL;
double (* StubRoutines::_intrinsic_cos )(double) = NULL;
double (* StubRoutines::_intrinsic_tan )(double) = NULL;
+address StubRoutines::_safefetch32_entry = NULL;
+address StubRoutines::_safefetch32_fault_pc = NULL;
+address StubRoutines::_safefetch32_continuation_pc = NULL;
+address StubRoutines::_safefetchN_entry = NULL;
+address StubRoutines::_safefetchN_fault_pc = NULL;
+address StubRoutines::_safefetchN_continuation_pc = NULL;
+
// Initialization
//
// Note: to break cycle with universe initialization, stubs are generated in two phases.
diff --git a/src/share/vm/runtime/stubRoutines.hpp b/src/share/vm/runtime/stubRoutines.hpp
index b8d61ea0c..e43e3ab0e 100644
--- a/src/share/vm/runtime/stubRoutines.hpp
+++ b/src/share/vm/runtime/stubRoutines.hpp
@@ -221,6 +221,14 @@ class StubRoutines: AllStatic {
static double (*_intrinsic_cos)(double);
static double (*_intrinsic_tan)(double);
+ // Safefetch stubs.
+ static address _safefetch32_entry;
+ static address _safefetch32_fault_pc;
+ static address _safefetch32_continuation_pc;
+ static address _safefetchN_entry;
+ static address _safefetchN_fault_pc;
+ static address _safefetchN_continuation_pc;
+
public:
// Initialization/Testing
static void initialize1(); // must happen before universe::genesis
@@ -382,6 +390,34 @@ class StubRoutines: AllStatic {
}
//
+ // Safefetch stub support
+ //
+
+ typedef int (*SafeFetch32Stub)(int* adr, int errValue);
+ typedef intptr_t (*SafeFetchNStub) (intptr_t* adr, intptr_t errValue);
+
+ static SafeFetch32Stub SafeFetch32_stub() { return CAST_TO_FN_PTR(SafeFetch32Stub, _safefetch32_entry); }
+ static SafeFetchNStub SafeFetchN_stub() { return CAST_TO_FN_PTR(SafeFetchNStub, _safefetchN_entry); }
+
+ static bool is_safefetch_fault(address pc) {
+ return pc != NULL &&
+ (pc == _safefetch32_fault_pc ||
+ pc == _safefetchN_fault_pc);
+ }
+
+ static address continuation_for_safefetch_fault(address pc) {
+ assert(_safefetch32_continuation_pc != NULL &&
+ _safefetchN_continuation_pc != NULL,
+ "not initialized");
+
+ if (pc == _safefetch32_fault_pc) return _safefetch32_continuation_pc;
+ if (pc == _safefetchN_fault_pc) return _safefetchN_continuation_pc;
+
+ ShouldNotReachHere();
+ return NULL;
+ }
+
+ //
// Default versions of the above arraycopy functions for platforms which do
// not have specialized versions
//
@@ -400,4 +436,15 @@ class StubRoutines: AllStatic {
static void arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count);
};
+// Safefetch allows to load a value from a location that's not known
+// to be valid. If the load causes a fault, the error value is returned.
+inline int SafeFetch32(int* adr, int errValue) {
+ assert(StubRoutines::SafeFetch32_stub(), "stub not yet generated");
+ return StubRoutines::SafeFetch32_stub()(adr, errValue);
+}
+inline intptr_t SafeFetchN(intptr_t* adr, intptr_t errValue) {
+ assert(StubRoutines::SafeFetchN_stub(), "stub not yet generated");
+ return StubRoutines::SafeFetchN_stub()(adr, errValue);
+}
+
#endif // SHARE_VM_RUNTIME_STUBROUTINES_HPP
diff --git a/src/share/vm/services/memTracker.cpp b/src/share/vm/services/memTracker.cpp
index e0a1b29a8..353e6a17f 100644
--- a/src/share/vm/services/memTracker.cpp
+++ b/src/share/vm/services/memTracker.cpp
@@ -81,13 +81,13 @@ void MemTracker::init_tracking_options(const char* option_line) {
} else if (strcmp(option_line, "=detail") == 0) {
// detail relies on a stack-walking ability that may not
// be available depending on platform and/or compiler flags
- if (PLATFORM_NMT_DETAIL_SUPPORTED) {
+#if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
_tracking_level = NMT_detail;
- } else {
+#else
jio_fprintf(defaultStream::error_stream(),
- "NMT detail is not supported on this platform. Using NMT summary instead.");
+ "NMT detail is not supported on this platform. Using NMT summary instead.\n");
_tracking_level = NMT_summary;
- }
+#endif
} else if (strcmp(option_line, "=off") != 0) {
vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
}
diff --git a/src/share/vm/utilities/globalDefinitions.hpp b/src/share/vm/utilities/globalDefinitions.hpp
index e7f3f3f70..181e80a08 100644
--- a/src/share/vm/utilities/globalDefinitions.hpp
+++ b/src/share/vm/utilities/globalDefinitions.hpp
@@ -381,12 +381,12 @@ const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlass
#endif
/*
- * If a platform does not support NMT_detail
+ * If a platform does not support native stack walking
* the platform specific globalDefinitions (above)
- * can set PLATFORM_NMT_DETAIL_SUPPORTED to false
+ * can set PLATFORM_NATIVE_STACK_WALKING_SUPPORTED to 0
*/
-#ifndef PLATFORM_NMT_DETAIL_SUPPORTED
-#define PLATFORM_NMT_DETAIL_SUPPORTED true
+#ifndef PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
+#define PLATFORM_NATIVE_STACK_WALKING_SUPPORTED 1
#endif
// The byte alignment to be used by Arena::Amalloc. See bugid 4169348.