aboutsummaryrefslogtreecommitdiff
path: root/src/share/vm
diff options
context:
space:
mode:
Diffstat (limited to 'src/share/vm')
-rw-r--r--src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp4
-rw-r--r--src/share/vm/runtime/os.hpp4
-rw-r--r--src/share/vm/runtime/stubRoutines.cpp7
-rw-r--r--src/share/vm/runtime/stubRoutines.hpp47
-rw-r--r--src/share/vm/services/memTracker.cpp8
-rw-r--r--src/share/vm/utilities/globalDefinitions.hpp8
6 files changed, 65 insertions, 13 deletions
diff --git a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
index 3bd042819..4c6d2bbf8 100644
--- a/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
+++ b/src/share/vm/gc_implementation/g1/g1CollectorPolicy.cpp
@@ -873,7 +873,7 @@ bool G1CollectorPolicy::need_to_start_conc_mark(const char* source, size_t alloc
size_t alloc_byte_size = alloc_word_size * HeapWordSize;
if ((cur_used_bytes + alloc_byte_size) > marking_initiating_used_threshold) {
- if (gcs_are_young()) {
+ if (gcs_are_young() && !_last_young_gc) {
ergo_verbose5(ErgoConcCycles,
"request concurrent cycle initiation",
ergo_format_reason("occupancy higher than threshold")
@@ -931,7 +931,7 @@ void G1CollectorPolicy::record_collection_pause_end(double pause_time_ms, Evacua
last_pause_included_initial_mark = during_initial_mark_pause();
if (last_pause_included_initial_mark) {
record_concurrent_mark_init_end(0.0);
- } else if (!_last_young_gc && need_to_start_conc_mark("end of GC")) {
+ } else if (need_to_start_conc_mark("end of GC")) {
// Note: this might have already been set, if during the last
// pause we decided to start a cycle but at the beginning of
// this pause we decided to postpone it. That's OK.
diff --git a/src/share/vm/runtime/os.hpp b/src/share/vm/runtime/os.hpp
index 0d0e59efa..6078a2118 100644
--- a/src/share/vm/runtime/os.hpp
+++ b/src/share/vm/runtime/os.hpp
@@ -915,8 +915,6 @@ class os: AllStatic {
// of the global SpinPause() with C linkage.
// It'd also be eligible for inlining on many platforms.
-extern "C" int SpinPause () ;
-extern "C" int SafeFetch32 (int * adr, int errValue) ;
-extern "C" intptr_t SafeFetchN (intptr_t * adr, intptr_t errValue) ;
+extern "C" int SpinPause();
#endif // SHARE_VM_RUNTIME_OS_HPP
diff --git a/src/share/vm/runtime/stubRoutines.cpp b/src/share/vm/runtime/stubRoutines.cpp
index a1179acd5..ff12ca651 100644
--- a/src/share/vm/runtime/stubRoutines.cpp
+++ b/src/share/vm/runtime/stubRoutines.cpp
@@ -136,6 +136,13 @@ double (* StubRoutines::_intrinsic_sin )(double) = NULL;
double (* StubRoutines::_intrinsic_cos )(double) = NULL;
double (* StubRoutines::_intrinsic_tan )(double) = NULL;
+address StubRoutines::_safefetch32_entry = NULL;
+address StubRoutines::_safefetch32_fault_pc = NULL;
+address StubRoutines::_safefetch32_continuation_pc = NULL;
+address StubRoutines::_safefetchN_entry = NULL;
+address StubRoutines::_safefetchN_fault_pc = NULL;
+address StubRoutines::_safefetchN_continuation_pc = NULL;
+
// Initialization
//
// Note: to break cycle with universe initialization, stubs are generated in two phases.
diff --git a/src/share/vm/runtime/stubRoutines.hpp b/src/share/vm/runtime/stubRoutines.hpp
index b8d61ea0c..e43e3ab0e 100644
--- a/src/share/vm/runtime/stubRoutines.hpp
+++ b/src/share/vm/runtime/stubRoutines.hpp
@@ -221,6 +221,14 @@ class StubRoutines: AllStatic {
static double (*_intrinsic_cos)(double);
static double (*_intrinsic_tan)(double);
+ // Safefetch stubs.
+ static address _safefetch32_entry;
+ static address _safefetch32_fault_pc;
+ static address _safefetch32_continuation_pc;
+ static address _safefetchN_entry;
+ static address _safefetchN_fault_pc;
+ static address _safefetchN_continuation_pc;
+
public:
// Initialization/Testing
static void initialize1(); // must happen before universe::genesis
@@ -382,6 +390,34 @@ class StubRoutines: AllStatic {
}
//
+ // Safefetch stub support
+ //
+
+ typedef int (*SafeFetch32Stub)(int* adr, int errValue);
+ typedef intptr_t (*SafeFetchNStub) (intptr_t* adr, intptr_t errValue);
+
+ static SafeFetch32Stub SafeFetch32_stub() { return CAST_TO_FN_PTR(SafeFetch32Stub, _safefetch32_entry); }
+ static SafeFetchNStub SafeFetchN_stub() { return CAST_TO_FN_PTR(SafeFetchNStub, _safefetchN_entry); }
+
+ static bool is_safefetch_fault(address pc) {
+ return pc != NULL &&
+ (pc == _safefetch32_fault_pc ||
+ pc == _safefetchN_fault_pc);
+ }
+
+ static address continuation_for_safefetch_fault(address pc) {
+ assert(_safefetch32_continuation_pc != NULL &&
+ _safefetchN_continuation_pc != NULL,
+ "not initialized");
+
+ if (pc == _safefetch32_fault_pc) return _safefetch32_continuation_pc;
+ if (pc == _safefetchN_fault_pc) return _safefetchN_continuation_pc;
+
+ ShouldNotReachHere();
+ return NULL;
+ }
+
+ //
// Default versions of the above arraycopy functions for platforms which do
// not have specialized versions
//
@@ -400,4 +436,15 @@ class StubRoutines: AllStatic {
static void arrayof_oop_copy_uninit(HeapWord* src, HeapWord* dest, size_t count);
};
+// Safefetch allows to load a value from a location that's not known
+// to be valid. If the load causes a fault, the error value is returned.
+inline int SafeFetch32(int* adr, int errValue) {
+ assert(StubRoutines::SafeFetch32_stub(), "stub not yet generated");
+ return StubRoutines::SafeFetch32_stub()(adr, errValue);
+}
+inline intptr_t SafeFetchN(intptr_t* adr, intptr_t errValue) {
+ assert(StubRoutines::SafeFetchN_stub(), "stub not yet generated");
+ return StubRoutines::SafeFetchN_stub()(adr, errValue);
+}
+
#endif // SHARE_VM_RUNTIME_STUBROUTINES_HPP
diff --git a/src/share/vm/services/memTracker.cpp b/src/share/vm/services/memTracker.cpp
index e0a1b29a8..353e6a17f 100644
--- a/src/share/vm/services/memTracker.cpp
+++ b/src/share/vm/services/memTracker.cpp
@@ -81,13 +81,13 @@ void MemTracker::init_tracking_options(const char* option_line) {
} else if (strcmp(option_line, "=detail") == 0) {
// detail relies on a stack-walking ability that may not
// be available depending on platform and/or compiler flags
- if (PLATFORM_NMT_DETAIL_SUPPORTED) {
+#if PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
_tracking_level = NMT_detail;
- } else {
+#else
jio_fprintf(defaultStream::error_stream(),
- "NMT detail is not supported on this platform. Using NMT summary instead.");
+ "NMT detail is not supported on this platform. Using NMT summary instead.\n");
_tracking_level = NMT_summary;
- }
+#endif
} else if (strcmp(option_line, "=off") != 0) {
vm_exit_during_initialization("Syntax error, expecting -XX:NativeMemoryTracking=[off|summary|detail]", NULL);
}
diff --git a/src/share/vm/utilities/globalDefinitions.hpp b/src/share/vm/utilities/globalDefinitions.hpp
index e7f3f3f70..181e80a08 100644
--- a/src/share/vm/utilities/globalDefinitions.hpp
+++ b/src/share/vm/utilities/globalDefinitions.hpp
@@ -381,12 +381,12 @@ const uint64_t KlassEncodingMetaspaceMax = (uint64_t(max_juint) + 1) << LogKlass
#endif
/*
- * If a platform does not support NMT_detail
+ * If a platform does not support native stack walking
* the platform specific globalDefinitions (above)
- * can set PLATFORM_NMT_DETAIL_SUPPORTED to false
+ * can set PLATFORM_NATIVE_STACK_WALKING_SUPPORTED to 0
*/
-#ifndef PLATFORM_NMT_DETAIL_SUPPORTED
-#define PLATFORM_NMT_DETAIL_SUPPORTED true
+#ifndef PLATFORM_NATIVE_STACK_WALKING_SUPPORTED
+#define PLATFORM_NATIVE_STACK_WALKING_SUPPORTED 1
#endif
// The byte alignment to be used by Arena::Amalloc. See bugid 4169348.