aboutsummaryrefslogtreecommitdiff
path: root/src/share
diff options
context:
space:
mode:
authorzgu <none@none>2012-11-09 19:24:31 -0500
committerzgu <none@none>2012-11-09 19:24:31 -0500
commitf8294c05cd106ae1e28effe3b26d3b33c6cbefaf (patch)
tree26692b1d8c3e5de2a76b0af036b02b3f5037224d /src/share
parentee4c432d3f765b0e4fdd9badaa3054ce0c9881eb (diff)
8001592: NMT: assertion failed: assert(_amount >= amt) failed: Just check: memBaseline.hpp:180
Summary: Fixed NMT that miscounted arena memory when it is used as value or stack object. Reviewed-by: acorn, coleenp
Diffstat (limited to 'src/share')
-rw-r--r--src/share/vm/services/memBaseline.cpp32
-rw-r--r--src/share/vm/services/memPtr.hpp10
-rw-r--r--src/share/vm/services/memSnapshot.cpp117
-rw-r--r--src/share/vm/services/memSnapshot.hpp83
-rw-r--r--src/share/vm/services/memTracker.hpp6
5 files changed, 135 insertions, 113 deletions
diff --git a/src/share/vm/services/memBaseline.cpp b/src/share/vm/services/memBaseline.cpp
index 57bdb0138..9127e00f9 100644
--- a/src/share/vm/services/memBaseline.cpp
+++ b/src/share/vm/services/memBaseline.cpp
@@ -115,17 +115,25 @@ bool MemBaseline::baseline_malloc_summary(const MemPointerArray* malloc_records)
while (malloc_ptr != NULL) {
index = flag2index(FLAGS_TO_MEMORY_TYPE(malloc_ptr->flags()));
size_t size = malloc_ptr->size();
- _total_malloced += size;
- _malloc_data[index].inc(size);
- if (MemPointerRecord::is_arena_record(malloc_ptr->flags())) {
- // see if arena size record present
- MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
- if (MemPointerRecord::is_arena_size_record(next_malloc_ptr->flags())) {
- assert(next_malloc_ptr->is_size_record_of_arena(malloc_ptr), "arena records do not match");
- size = next_malloc_ptr->size();
- _arena_data[index].inc(size);
- used_arena_size += size;
- malloc_itr.next();
+ if (malloc_ptr->is_arena_memory_record()) {
+ // We do have anonymous arenas, they are either used as value objects,
+ // which are embedded inside other objects, or used as stack objects.
+ _arena_data[index].inc(size);
+ used_arena_size += size;
+ } else {
+ _total_malloced += size;
+ _malloc_data[index].inc(size);
+ if (malloc_ptr->is_arena_record()) {
+ // see if arena memory record present
+ MemPointerRecord* next_malloc_ptr = (MemPointerRecordEx*)malloc_itr.peek_next();
+ if (next_malloc_ptr->is_arena_memory_record()) {
+ assert(next_malloc_ptr->is_memory_record_of_arena(malloc_ptr),
+ "Arena records do not match");
+ size = next_malloc_ptr->size();
+ _arena_data[index].inc(size);
+ used_arena_size += size;
+ malloc_itr.next();
+ }
}
}
malloc_ptr = (MemPointerRecordEx*)malloc_itr.next();
@@ -193,7 +201,7 @@ bool MemBaseline::baseline_malloc_details(const MemPointerArray* malloc_records)
// baseline memory that is totaled over 1 KB
while (malloc_ptr != NULL) {
- if (!MemPointerRecord::is_arena_size_record(malloc_ptr->flags())) {
+ if (!MemPointerRecord::is_arena_memory_record(malloc_ptr->flags())) {
// skip thread stacks
if (!IS_MEMORY_TYPE(malloc_ptr->flags(), mtThreadStack)) {
if (malloc_callsite.addr() != malloc_ptr->pc()) {
diff --git a/src/share/vm/services/memPtr.hpp b/src/share/vm/services/memPtr.hpp
index 3f888d968..a27c3eb4a 100644
--- a/src/share/vm/services/memPtr.hpp
+++ b/src/share/vm/services/memPtr.hpp
@@ -165,7 +165,7 @@ public:
return (flags & (otArena | tag_size)) == otArena;
}
- inline static bool is_arena_size_record(MEMFLAGS flags) {
+ inline static bool is_arena_memory_record(MEMFLAGS flags) {
return (flags & (otArena | tag_size)) == (otArena | tag_size);
}
@@ -256,8 +256,8 @@ public:
}
// if this record records a size information of an arena
- inline bool is_arena_size_record() const {
- return is_arena_size_record(_flags);
+ inline bool is_arena_memory_record() const {
+ return is_arena_memory_record(_flags);
}
// if this pointer represents an address to an arena object
@@ -266,8 +266,8 @@ public:
}
// if this record represents a size information of specific arena
- inline bool is_size_record_of_arena(const MemPointerRecord* arena_rc) {
- assert(is_arena_size_record(), "not size record");
+ inline bool is_memory_record_of_arena(const MemPointerRecord* arena_rc) {
+ assert(is_arena_memory_record(), "not size record");
assert(arena_rc->is_arena_record(), "not arena record");
return (arena_rc->addr() + sizeof(void*)) == addr();
}
diff --git a/src/share/vm/services/memSnapshot.cpp b/src/share/vm/services/memSnapshot.cpp
index 936df1985..cd6870c91 100644
--- a/src/share/vm/services/memSnapshot.cpp
+++ b/src/share/vm/services/memSnapshot.cpp
@@ -50,7 +50,7 @@ void decode_pointer_record(MemPointerRecord* rec) {
tty->print_cr(" (tag)");
}
} else {
- if (rec->is_arena_size_record()) {
+ if (rec->is_arena_memory_record()) {
tty->print_cr(" (arena size)");
} else if (rec->is_allocation_record()) {
tty->print_cr(" (malloc)");
@@ -390,21 +390,31 @@ MemSnapshot::~MemSnapshot() {
}
}
-void MemSnapshot::copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
+
+void MemSnapshot::copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src) {
assert(dest != NULL && src != NULL, "Just check");
assert(dest->addr() == src->addr(), "Just check");
+ assert(dest->seq() > 0 && src->seq() > 0, "not sequenced");
- MEMFLAGS flags = dest->flags();
+ if (MemTracker::track_callsite()) {
+ *(SeqMemPointerRecordEx*)dest = *(SeqMemPointerRecordEx*)src;
+ } else {
+ *(SeqMemPointerRecord*)dest = *(SeqMemPointerRecord*)src;
+ }
+}
+
+void MemSnapshot::assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src) {
+ assert(src != NULL && dest != NULL, "Just check");
+ assert(dest->seq() == 0 && src->seq() >0, "cast away sequence");
if (MemTracker::track_callsite()) {
*(MemPointerRecordEx*)dest = *(MemPointerRecordEx*)src;
} else {
- *dest = *src;
+ *(MemPointerRecord*)dest = *(MemPointerRecord*)src;
}
}
-
-// merge a per-thread memory recorder to the staging area
+// merge a recorder to the staging area
bool MemSnapshot::merge(MemRecorder* rec) {
assert(rec != NULL && !rec->out_of_memory(), "Just check");
@@ -412,71 +422,45 @@ bool MemSnapshot::merge(MemRecorder* rec) {
MutexLockerEx lock(_lock, true);
MemPointerIterator malloc_staging_itr(_staging_area.malloc_data());
- MemPointerRecord *p1, *p2;
- p1 = (MemPointerRecord*) itr.current();
- while (p1 != NULL) {
- if (p1->is_vm_pointer()) {
+ MemPointerRecord* incoming_rec = (MemPointerRecord*) itr.current();
+ MemPointerRecord* matched_rec;
+
+ while (incoming_rec != NULL) {
+ if (incoming_rec->is_vm_pointer()) {
// we don't do anything with virtual memory records during merge
- if (!_staging_area.vm_data()->append(p1)) {
+ if (!_staging_area.vm_data()->append(incoming_rec)) {
return false;
}
} else {
// locate matched record and/or also position the iterator to proper
// location for this incoming record.
- p2 = (MemPointerRecord*)malloc_staging_itr.locate(p1->addr());
- // we have not seen this memory block, so just add to staging area
- if (p2 == NULL) {
- if (!malloc_staging_itr.insert(p1)) {
+ matched_rec = (MemPointerRecord*)malloc_staging_itr.locate(incoming_rec->addr());
+ // we have not seen this memory block in this generation,
+ // so just add to staging area
+ if (matched_rec == NULL) {
+ if (!malloc_staging_itr.insert(incoming_rec)) {
return false;
}
- } else if (p1->addr() == p2->addr()) {
- MemPointerRecord* staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next();
- // a memory block can have many tagging records, find right one to replace or
- // right position to insert
- while (staging_next != NULL && staging_next->addr() == p1->addr()) {
- if ((staging_next->flags() & MemPointerRecord::tag_masks) <=
- (p1->flags() & MemPointerRecord::tag_masks)) {
- p2 = (MemPointerRecord*)malloc_staging_itr.next();
- staging_next = (MemPointerRecord*)malloc_staging_itr.peek_next();
- } else {
- break;
- }
+ } else if (incoming_rec->addr() == matched_rec->addr()) {
+ // whoever has higher sequence number wins
+ if (incoming_rec->seq() > matched_rec->seq()) {
+ copy_seq_pointer(matched_rec, incoming_rec);
}
- int df = (p1->flags() & MemPointerRecord::tag_masks) -
- (p2->flags() & MemPointerRecord::tag_masks);
- if (df == 0) {
- assert(p1->seq() > 0, "not sequenced");
- assert(p2->seq() > 0, "not sequenced");
- if (p1->seq() > p2->seq()) {
- copy_pointer(p2, p1);
- }
- } else if (df < 0) {
- if (!malloc_staging_itr.insert(p1)) {
- return false;
- }
- } else {
- if (!malloc_staging_itr.insert_after(p1)) {
- return false;
- }
- }
- } else if (p1->addr() < p2->addr()) {
- if (!malloc_staging_itr.insert(p1)) {
+ } else if (incoming_rec->addr() < matched_rec->addr()) {
+ if (!malloc_staging_itr.insert(incoming_rec)) {
return false;
}
} else {
- if (!malloc_staging_itr.insert_after(p1)) {
- return false;
- }
+ ShouldNotReachHere();
}
}
- p1 = (MemPointerRecord*)itr.next();
+ incoming_rec = (MemPointerRecord*)itr.next();
}
NOT_PRODUCT(void check_staging_data();)
return true;
}
-
// promote data to next generation
bool MemSnapshot::promote() {
assert(_alloc_ptrs != NULL && _vm_ptrs != NULL, "Just check");
@@ -507,20 +491,25 @@ bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
// found matched memory block
if (matched_rec != NULL && new_rec->addr() == matched_rec->addr()) {
// snapshot already contains 'live' records
- assert(matched_rec->is_allocation_record() || matched_rec->is_arena_size_record(),
+ assert(matched_rec->is_allocation_record() || matched_rec->is_arena_memory_record(),
"Sanity check");
// update block states
- if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
- copy_pointer(matched_rec, new_rec);
+ if (new_rec->is_allocation_record()) {
+ assign_pointer(matched_rec, new_rec);
+ } else if (new_rec->is_arena_memory_record()) {
+ if (new_rec->size() == 0) {
+ // remove size record once size drops to 0
+ malloc_snapshot_itr.remove();
+ } else {
+ assign_pointer(matched_rec, new_rec);
+ }
} else {
// a deallocation record
assert(new_rec->is_deallocation_record(), "Sanity check");
// an arena record can be followed by a size record, we need to remove both
if (matched_rec->is_arena_record()) {
MemPointerRecord* next = (MemPointerRecord*)malloc_snapshot_itr.peek_next();
- if (next->is_arena_size_record()) {
- // it has to match the arena record
- assert(next->is_size_record_of_arena(matched_rec), "Sanity check");
+ if (next->is_arena_memory_record() && next->is_memory_record_of_arena(matched_rec)) {
malloc_snapshot_itr.remove();
}
}
@@ -528,17 +517,13 @@ bool MemSnapshot::promote_malloc_records(MemPointerArrayIterator* itr) {
malloc_snapshot_itr.remove();
}
} else {
- // it is a new record, insert into snapshot
- if (new_rec->is_arena_size_record()) {
- MemPointerRecord* prev = (MemPointerRecord*)malloc_snapshot_itr.peek_prev();
- if (prev == NULL || !prev->is_arena_record() || !new_rec->is_size_record_of_arena(prev)) {
- // no matched arena record, ignore the size record
- new_rec = NULL;
- }
+ // don't insert size 0 record
+ if (new_rec->is_arena_memory_record() && new_rec->size() == 0) {
+ new_rec = NULL;
}
- // only 'live' record can go into snapshot
+
if (new_rec != NULL) {
- if (new_rec->is_allocation_record() || new_rec->is_arena_size_record()) {
+ if (new_rec->is_allocation_record() || new_rec->is_arena_memory_record()) {
if (matched_rec != NULL && new_rec->addr() > matched_rec->addr()) {
if (!malloc_snapshot_itr.insert_after(new_rec)) {
return false;
diff --git a/src/share/vm/services/memSnapshot.hpp b/src/share/vm/services/memSnapshot.hpp
index dd52f4cd7..1620c5454 100644
--- a/src/share/vm/services/memSnapshot.hpp
+++ b/src/share/vm/services/memSnapshot.hpp
@@ -31,7 +31,6 @@
#include "services/memBaseline.hpp"
#include "services/memPtrArray.hpp"
-
// Snapshot pointer array iterator
// The pointer array contains malloc-ed pointers
@@ -165,39 +164,58 @@ class VMMemPointerIterator : public MemPointerIterator {
};
class MallocRecordIterator : public MemPointerArrayIterator {
- protected:
+ private:
MemPointerArrayIteratorImpl _itr;
+
+
public:
MallocRecordIterator(MemPointerArray* arr) : _itr(arr) {
}
virtual MemPointer* current() const {
- MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
- assert(cur == NULL || !cur->is_vm_pointer(), "seek error");
- MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
- if (next == NULL || next->addr() != cur->addr()) {
- return cur;
- } else {
- assert(!cur->is_vm_pointer(), "Sanity check");
- assert(cur->is_allocation_record() && next->is_deallocation_record(),
- "sorting order");
- assert(cur->seq() != next->seq(), "Sanity check");
- return cur->seq() > next->seq() ? cur : next;
+#ifdef ASSERT
+ MemPointer* cur_rec = _itr.current();
+ if (cur_rec != NULL) {
+ MemPointer* prev_rec = _itr.peek_prev();
+ MemPointer* next_rec = _itr.peek_next();
+ assert(prev_rec == NULL || prev_rec->addr() < cur_rec->addr(), "Sorting order");
+ assert(next_rec == NULL || next_rec->addr() > cur_rec->addr(), "Sorting order");
}
+#endif
+ return _itr.current();
}
-
virtual MemPointer* next() {
- MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
- assert(cur == NULL || !cur->is_vm_pointer(), "Sanity check");
- MemPointerRecord* next = (MemPointerRecord*)_itr.next();
- if (next == NULL) {
- return NULL;
- }
- if (cur->addr() == next->addr()) {
- next = (MemPointerRecord*)_itr.next();
+ MemPointerRecord* next_rec = (MemPointerRecord*)_itr.next();
+ // arena memory record is a special case, which we have to compare
+ // sequence number against its associated arena record.
+ if (next_rec != NULL && next_rec->is_arena_memory_record()) {
+ MemPointerRecord* prev_rec = (MemPointerRecord*)_itr.peek_prev();
+ // if there is an associated arena record, it has to be previous
+ // record because of sorting order (by address) - NMT generates a pseudo address
+ // for arena's size record by offsetting arena's address, that guarantees
+ // the order of arena record and it's size record.
+ if (prev_rec != NULL && prev_rec->is_arena_record() &&
+ next_rec->is_memory_record_of_arena(prev_rec)) {
+ if (prev_rec->seq() > next_rec->seq()) {
+ // Skip this arena memory record
+ // Two scenarios:
+ // - if the arena record is an allocation record, this early
+ // size record must be leftover by previous arena,
+ // and the last size record should have size = 0.
+ // - if the arena record is a deallocation record, this
+ // size record should be its cleanup record, which should
+ // also have size = 0. In other world, arena alway reset
+ // its size before gone (see Arena's destructor)
+ assert(next_rec->size() == 0, "size not reset");
+ return _itr.next();
+ } else {
+ assert(prev_rec->is_allocation_record(),
+ "Arena size record ahead of allocation record");
+ }
+ }
}
- return current();
+ return next_rec;
}
MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; }
@@ -213,9 +231,12 @@ class MallocRecordIterator : public MemPointerArrayIterator {
// still chances seeing duplicated records during promotion.
// We want to use the record with higher sequence number, because it has
// more accurate callsite pc.
-class VMRecordIterator : public MallocRecordIterator {
+class VMRecordIterator : public MemPointerArrayIterator {
+ private:
+ MemPointerArrayIteratorImpl _itr;
+
public:
- VMRecordIterator(MemPointerArray* arr) : MallocRecordIterator(arr) {
+ VMRecordIterator(MemPointerArray* arr) : _itr(arr) {
MemPointerRecord* cur = (MemPointerRecord*)_itr.current();
MemPointerRecord* next = (MemPointerRecord*)_itr.peek_next();
while (next != NULL) {
@@ -256,6 +277,12 @@ class VMRecordIterator : public MallocRecordIterator {
return cur;
}
+ MemPointer* peek_next() const { ShouldNotReachHere(); return NULL; }
+ MemPointer* peek_prev() const { ShouldNotReachHere(); return NULL; }
+ void remove() { ShouldNotReachHere(); }
+ bool insert(MemPointer* ptr) { ShouldNotReachHere(); return false; }
+ bool insert_after(MemPointer* ptr) { ShouldNotReachHere(); return false; }
+
private:
bool is_duplicated_record(MemPointerRecord* p1, MemPointerRecord* p2) const {
bool ret = (p1->addr() == p2->addr() && p1->size() == p2->size() && p1->flags() == p2->flags());
@@ -348,8 +375,10 @@ class MemSnapshot : public CHeapObj<mtNMT> {
DEBUG_ONLY( void dump_all_vm_pointers();)
private:
- // copy pointer data from src to dest
- void copy_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
+ // copy sequenced pointer from src to dest
+ void copy_seq_pointer(MemPointerRecord* dest, const MemPointerRecord* src);
+ // assign a sequenced pointer to non-sequenced pointer
+ void assign_pointer(MemPointerRecord*dest, const MemPointerRecord* src);
bool promote_malloc_records(MemPointerArrayIterator* itr);
bool promote_virtual_memory_records(MemPointerArrayIterator* itr);
diff --git a/src/share/vm/services/memTracker.hpp b/src/share/vm/services/memTracker.hpp
index c42f01bf6..13eb4caf8 100644
--- a/src/share/vm/services/memTracker.hpp
+++ b/src/share/vm/services/memTracker.hpp
@@ -284,14 +284,14 @@ class MemTracker : AllStatic {
}
}
- // record arena size
+ // record arena memory size
static inline void record_arena_size(address addr, size_t size) {
- // we add a positive offset to arena address, so we can have arena size record
+ // we add a positive offset to arena address, so we can have arena memory record
// sorted after arena record
if (is_on() && !UseMallocOnly) {
assert(addr != NULL, "Sanity check");
create_memory_record((addr + sizeof(void*)), MemPointerRecord::arena_size_tag(), size,
- 0, NULL);
+ DEBUG_CALLER_PC, NULL);
}
}