diff options
Diffstat (limited to 'libsanitizer/sanitizer_common/sanitizer_stackdepot.cpp')
-rw-r--r-- | libsanitizer/sanitizer_common/sanitizer_stackdepot.cpp | 145 |
1 files changed, 61 insertions, 84 deletions
diff --git a/libsanitizer/sanitizer_common/sanitizer_stackdepot.cpp b/libsanitizer/sanitizer_common/sanitizer_stackdepot.cpp index fc2ea2fc768..02855459922 100644 --- a/libsanitizer/sanitizer_common/sanitizer_stackdepot.cpp +++ b/libsanitizer/sanitizer_common/sanitizer_stackdepot.cpp @@ -14,93 +14,93 @@ #include "sanitizer_common.h" #include "sanitizer_hash.h" +#include "sanitizer_persistent_allocator.h" #include "sanitizer_stackdepotbase.h" namespace __sanitizer { +static PersistentAllocator<uptr> traceAllocator; + struct StackDepotNode { - using hash_type = u32; - StackDepotNode *link; - u32 id; + using hash_type = u64; hash_type stack_hash; - u32 size; - atomic_uint32_t tag_and_use_count; // tag : 12 high bits; use_count : 20; - uptr stack[1]; // [size] + u32 link; static const u32 kTabSizeLog = SANITIZER_ANDROID ? 16 : 20; - static const u32 kUseCountBits = 20; - static const u32 kMaxUseCount = 1 << kUseCountBits; - static const u32 kUseCountMask = (1 << kUseCountBits) - 1; + static const u32 kStackSizeBits = 16; typedef StackTrace args_type; bool eq(hash_type hash, const args_type &args) const { - u32 tag = - atomic_load(&tag_and_use_count, memory_order_relaxed) >> kUseCountBits; - if (stack_hash != hash || args.size != size || args.tag != tag) - return false; - uptr i = 0; - for (; i < size; i++) { - if (stack[i] != args.trace[i]) return false; - } - return true; - } - static uptr storage_size(const args_type &args) { - return sizeof(StackDepotNode) + (args.size - 1) * sizeof(uptr); + return hash == stack_hash; } + static uptr allocated(); static hash_type hash(const args_type &args) { - MurMur2HashBuilder H(args.size * sizeof(uptr)); + MurMur2Hash64Builder H(args.size * sizeof(uptr)); for (uptr i = 0; i < args.size; i++) H.add(args.trace[i]); + H.add(args.tag); return H.get(); } static bool is_valid(const args_type &args) { return args.size > 0 && args.trace; } - void store(const args_type &args, hash_type hash) { - CHECK_EQ(args.tag & (~kUseCountMask >> kUseCountBits), args.tag); - atomic_store(&tag_and_use_count, args.tag << kUseCountBits, - memory_order_relaxed); - stack_hash = hash; - size = args.size; - internal_memcpy(stack, args.trace, size * sizeof(uptr)); - } - args_type load() const { - u32 tag = - atomic_load(&tag_and_use_count, memory_order_relaxed) >> kUseCountBits; - return args_type(&stack[0], size, tag); - } - StackDepotHandle get_handle() { return StackDepotHandle(this); } + void store(u32 id, const args_type &args, hash_type hash); + args_type load(u32 id) const; + static StackDepotHandle get_handle(u32 id); typedef StackDepotHandle handle_type; }; -COMPILER_CHECK(StackDepotNode::kMaxUseCount >= (u32)kStackDepotMaxUseCount); - -u32 StackDepotHandle::id() { return node_->id; } -int StackDepotHandle::use_count() { - return atomic_load(&node_->tag_and_use_count, memory_order_relaxed) & - StackDepotNode::kUseCountMask; -} -void StackDepotHandle::inc_use_count_unsafe() { - u32 prev = - atomic_fetch_add(&node_->tag_and_use_count, 1, memory_order_relaxed) & - StackDepotNode::kUseCountMask; - CHECK_LT(prev + 1, StackDepotNode::kMaxUseCount); -} - // FIXME(dvyukov): this single reserved bit is used in TSan. typedef StackDepotBase<StackDepotNode, 1, StackDepotNode::kTabSizeLog> StackDepot; static StackDepot theDepot; +// Keep rarely accessed stack traces out of frequently access nodes to improve +// caching efficiency. +static TwoLevelMap<uptr *, StackDepot::kNodesSize1, StackDepot::kNodesSize2> + tracePtrs; +// Keep mutable data out of frequently access nodes to improve caching +// efficiency. +static TwoLevelMap<atomic_uint32_t, StackDepot::kNodesSize1, + StackDepot::kNodesSize2> + useCounts; + +int StackDepotHandle::use_count() const { + return atomic_load_relaxed(&useCounts[id_]); +} -StackDepotStats StackDepotGetStats() { return theDepot.GetStats(); } +void StackDepotHandle::inc_use_count_unsafe() { + atomic_fetch_add(&useCounts[id_], 1, memory_order_relaxed); +} + +uptr StackDepotNode::allocated() { + return traceAllocator.allocated() + tracePtrs.MemoryUsage() + + useCounts.MemoryUsage(); +} -u32 StackDepotPut(StackTrace stack) { - StackDepotHandle h = theDepot.Put(stack); - return h.valid() ? h.id() : 0; +void StackDepotNode::store(u32 id, const args_type &args, hash_type hash) { + stack_hash = hash; + uptr *stack_trace = traceAllocator.alloc(args.size + 1); + CHECK_LT(args.size, 1 << kStackSizeBits); + *stack_trace = args.size + (args.tag << kStackSizeBits); + internal_memcpy(stack_trace + 1, args.trace, args.size * sizeof(uptr)); + tracePtrs[id] = stack_trace; } +StackDepotNode::args_type StackDepotNode::load(u32 id) const { + const uptr *stack_trace = tracePtrs[id]; + if (!stack_trace) + return {}; + uptr size = *stack_trace & ((1 << kStackSizeBits) - 1); + uptr tag = *stack_trace >> kStackSizeBits; + return args_type(stack_trace + 1, size, tag); +} + +StackDepotStats StackDepotGetStats() { return theDepot.GetStats(); } + +u32 StackDepotPut(StackTrace stack) { return theDepot.Put(stack); } + StackDepotHandle StackDepotPut_WithHandle(StackTrace stack) { - return theDepot.Put(stack); + return StackDepotNode::get_handle(theDepot.Put(stack)); } StackTrace StackDepotGet(u32 id) { @@ -121,37 +121,14 @@ void StackDepotPrintAll() { #endif } -bool StackDepotReverseMap::IdDescPair::IdComparator( - const StackDepotReverseMap::IdDescPair &a, - const StackDepotReverseMap::IdDescPair &b) { - return a.id < b.id; -} - -void StackDepotReverseMap::Init() const { - if (LIKELY(map_.capacity())) - return; - map_.reserve(StackDepotGetStats().n_uniq_ids + 100); - for (int idx = 0; idx < StackDepot::kTabSize; idx++) { - atomic_uintptr_t *p = &theDepot.tab[idx]; - uptr v = atomic_load(p, memory_order_consume); - StackDepotNode *s = (StackDepotNode*)(v & ~1); - for (; s; s = s->link) { - IdDescPair pair = {s->id, s}; - map_.push_back(pair); - } - } - Sort(map_.data(), map_.size(), &IdDescPair::IdComparator); +StackDepotHandle StackDepotNode::get_handle(u32 id) { + return StackDepotHandle(&theDepot.nodes[id], id); } -StackTrace StackDepotReverseMap::Get(u32 id) const { - Init(); - if (!map_.size()) - return StackTrace(); - IdDescPair pair = {id, nullptr}; - uptr idx = InternalLowerBound(map_, pair, IdDescPair::IdComparator); - if (idx > map_.size() || map_[idx].id != id) - return StackTrace(); - return map_[idx].desc->load(); +void StackDepotTestOnlyUnmap() { + theDepot.TestOnlyUnmap(); + tracePtrs.TestOnlyUnmap(); + traceAllocator.TestOnlyUnmap(); } } // namespace __sanitizer |