diff options
author | Kostya Serebryany <kcc@google.com> | 2014-09-23 17:59:53 +0000 |
---|---|---|
committer | Kostya Serebryany <kcc@gcc.gnu.org> | 2014-09-23 17:59:53 +0000 |
commit | 866e32ad336f1698809cc03c48f884379d6b39e0 (patch) | |
tree | dfe8acd36f160811afc54c8eaf16e8160ba8bd70 /libsanitizer/sanitizer_common/sanitizer_persistent_allocator.h | |
parent | e8ee40544aef309d4225852dda191bf0f986f761 (diff) |
[libsanitizer merge from upstream r218156]
From-SVN: r215527
Diffstat (limited to 'libsanitizer/sanitizer_common/sanitizer_persistent_allocator.h')
-rw-r--r-- | libsanitizer/sanitizer_common/sanitizer_persistent_allocator.h | 69 |
1 files changed, 69 insertions, 0 deletions
diff --git a/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.h b/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.h new file mode 100644 index 00000000000..e29b7bd57aa --- /dev/null +++ b/libsanitizer/sanitizer_common/sanitizer_persistent_allocator.h @@ -0,0 +1,69 @@ +//===-- sanitizer_persistent_allocator.h ------------------------*- C++ -*-===// +// +// This file is distributed under the University of Illinois Open Source +// License. See LICENSE.TXT for details. +// +//===----------------------------------------------------------------------===// +// +// A fast memory allocator that does not support free() nor realloc(). +// All allocations are forever. +//===----------------------------------------------------------------------===// +#ifndef SANITIZER_PERSISTENT_ALLOCATOR_H +#define SANITIZER_PERSISTENT_ALLOCATOR_H + +#include "sanitizer_internal_defs.h" +#include "sanitizer_mutex.h" +#include "sanitizer_atomic.h" +#include "sanitizer_common.h" + +namespace __sanitizer { + +class PersistentAllocator { + public: + void *alloc(uptr size); + + private: + void *tryAlloc(uptr size); + StaticSpinMutex mtx; // Protects alloc of new blocks for region allocator. + atomic_uintptr_t region_pos; // Region allocator for Node's. + atomic_uintptr_t region_end; +}; + +inline void *PersistentAllocator::tryAlloc(uptr size) { + // Optimisic lock-free allocation, essentially try to bump the region ptr. + for (;;) { + uptr cmp = atomic_load(®ion_pos, memory_order_acquire); + uptr end = atomic_load(®ion_end, memory_order_acquire); + if (cmp == 0 || cmp + size > end) return 0; + if (atomic_compare_exchange_weak(®ion_pos, &cmp, cmp + size, + memory_order_acquire)) + return (void *)cmp; + } +} + +inline void *PersistentAllocator::alloc(uptr size) { + // First, try to allocate optimisitically. + void *s = tryAlloc(size); + if (s) return s; + // If failed, lock, retry and alloc new superblock. + SpinMutexLock l(&mtx); + for (;;) { + s = tryAlloc(size); + if (s) return s; + atomic_store(®ion_pos, 0, memory_order_relaxed); + uptr allocsz = 64 * 1024; + if (allocsz < size) allocsz = size; + uptr mem = (uptr)MmapOrDie(allocsz, "stack depot"); + atomic_store(®ion_end, mem + allocsz, memory_order_release); + atomic_store(®ion_pos, mem, memory_order_release); + } +} + +extern PersistentAllocator thePersistentAllocator; +inline void *PersistentAlloc(uptr sz) { + return thePersistentAllocator.alloc(sz); +} + +} // namespace __sanitizer + +#endif // SANITIZER_PERSISTENT_ALLOCATOR_H |