1 // Copyright (c) 2005, 2007, Google Inc.
2 // All rights reserved.
3 // Copyright (C) 2005, 2006, 2007, 2008, 2009, 2011 Apple Inc. All rights reserved.
5 // Redistribution and use in source and binary forms, with or without
6 // modification, are permitted provided that the following conditions are
9 // * Redistributions of source code must retain the above copyright
10 // notice, this list of conditions and the following disclaimer.
11 // * Redistributions in binary form must reproduce the above
12 // copyright notice, this list of conditions and the following disclaimer
13 // in the documentation and/or other materials provided with the
15 // * Neither the name of Google Inc. nor the names of its
16 // contributors may be used to endorse or promote products derived from
17 // this software without specific prior written permission.
19 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
20 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
21 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
22 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
23 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
24 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
25 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
26 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
27 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
28 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
29 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
32 // Author: Sanjay Ghemawat <opensource@google.com>
34 // A malloc that uses a per-thread cache to satisfy small malloc requests.
35 // (The time for malloc/free of a small object drops from 300 ns to 50 ns.)
37 // See doc/tcmalloc.html for a high-level
38 // description of how this malloc works.
41 // 1. The thread-specific lists are accessed without acquiring any locks.
42 // This is safe because each such list is only accessed by one thread.
43 // 2. We have a lock per central free-list, and hold it while manipulating
44 // the central free list for a particular size.
45 // 3. The central page allocator is protected by "pageheap_lock".
46 // 4. The pagemap (which maps from page-number to descriptor),
47 // can be read without holding any locks, and written while holding
48 // the "pageheap_lock".
49 // 5. To improve performance, a subset of the information one can get
50 // from the pagemap is cached in a data structure, pagemap_cache_,
51 // that atomically reads and writes its entries. This cache can be
52 // read and written without locking.
54 // This multi-threaded access to the pagemap is safe for fairly
55 // subtle reasons. We basically assume that when an object X is
56 // allocated by thread A and deallocated by thread B, there must
57 // have been appropriate synchronization in the handoff of object
58 // X from thread A to thread B. The same logic applies to pagemap_cache_.
60 // THE PAGEID-TO-SIZECLASS CACHE
61 // Hot PageID-to-sizeclass mappings are held by pagemap_cache_. If this cache
62 // returns 0 for a particular PageID then that means "no information," not that
63 // the sizeclass is 0. The cache may have stale information for pages that do
64 // not hold the beginning of any free()'able object. Staleness is eliminated
65 // in Populate() for pages with sizeclass > 0 objects, and in do_malloc() and
66 // do_memalign() for all other relevant pages.
68 // TODO: Bias reclamation to larger addresses
69 // TODO: implement mallinfo/mallopt
70 // TODO: Better testing
72 // 9/28/2003 (new page-level allocator replaces ptmalloc2):
73 // * malloc/free of small objects goes from ~300 ns to ~50 ns.
74 // * allocation of a reasonably complicated struct
75 // goes from about 1100 ns to about 300 ns.
78 #include "FastMalloc.h"
80 #include "Assertions.h"
82 #if ENABLE(JSC_MULTIPLE_THREADS)
85 #include <wtf/StdLibExtras.h>
87 #ifndef NO_TCMALLOC_SAMPLES
89 #define NO_TCMALLOC_SAMPLES
93 #if !(defined(USE_SYSTEM_MALLOC) && USE_SYSTEM_MALLOC) && defined(NDEBUG)
94 #define FORCE_SYSTEM_MALLOC 0
96 #define FORCE_SYSTEM_MALLOC 1
99 // Use a background thread to periodically scavenge memory to release back to the system
100 // https://bugs.webkit.org/show_bug.cgi?id=27900: don't turn this on for Tiger until we have figured out why it caused a crash.
101 #if defined(BUILDING_ON_TIGER)
102 #define USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 0
104 #define USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY 1
110 #if ENABLE(JSC_MULTIPLE_THREADS)
111 static pthread_key_t isForbiddenKey;
112 static pthread_once_t isForbiddenKeyOnce = PTHREAD_ONCE_INIT;
113 static void initializeIsForbiddenKey()
115 pthread_key_create(&isForbiddenKey, 0);
119 static bool isForbidden()
121 pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
122 return !!pthread_getspecific(isForbiddenKey);
126 void fastMallocForbid()
128 pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
129 pthread_setspecific(isForbiddenKey, &isForbiddenKey);
132 void fastMallocAllow()
134 pthread_once(&isForbiddenKeyOnce, initializeIsForbiddenKey);
135 pthread_setspecific(isForbiddenKey, 0);
140 static bool staticIsForbidden;
141 static bool isForbidden()
143 return staticIsForbidden;
146 void fastMallocForbid()
148 staticIsForbidden = true;
151 void fastMallocAllow()
153 staticIsForbidden = false;
155 #endif // ENABLE(JSC_MULTIPLE_THREADS)
164 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
168 void fastMallocMatchFailed(void*)
173 } // namespace Internal
177 void* fastZeroedMalloc(size_t n)
179 void* result = fastMalloc(n);
180 memset(result, 0, n);
184 char* fastStrDup(const char* src)
186 int len = strlen(src) + 1;
187 char* dup = static_cast<char*>(fastMalloc(len));
190 memcpy(dup, src, len);
195 TryMallocReturnValue tryFastZeroedMalloc(size_t n)
198 if (!tryFastMalloc(n).getValue(result))
200 memset(result, 0, n);
206 #if FORCE_SYSTEM_MALLOC
209 #include "brew/SystemMallocBrew.h"
213 #include <malloc/malloc.h>
220 TryMallocReturnValue tryFastMalloc(size_t n)
222 ASSERT(!isForbidden());
224 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
225 if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= n) // If overflow would occur...
228 void* result = malloc(n + sizeof(AllocAlignmentInteger));
232 *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
233 result = static_cast<AllocAlignmentInteger*>(result) + 1;
241 void* fastMalloc(size_t n)
243 ASSERT(!isForbidden());
245 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
246 TryMallocReturnValue returnValue = tryFastMalloc(n);
248 returnValue.getValue(result);
250 void* result = malloc(n);
255 // The behavior of malloc(0) is implementation defined.
256 // To make sure that fastMalloc never returns 0, retry with fastMalloc(1).
258 return fastMalloc(1);
266 TryMallocReturnValue tryFastCalloc(size_t n_elements, size_t element_size)
268 ASSERT(!isForbidden());
270 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
271 size_t totalBytes = n_elements * element_size;
272 if (n_elements > 1 && element_size && (totalBytes / element_size) != n_elements || (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= totalBytes))
275 totalBytes += sizeof(AllocAlignmentInteger);
276 void* result = malloc(totalBytes);
280 memset(result, 0, totalBytes);
281 *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
282 result = static_cast<AllocAlignmentInteger*>(result) + 1;
285 return calloc(n_elements, element_size);
289 void* fastCalloc(size_t n_elements, size_t element_size)
291 ASSERT(!isForbidden());
293 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
294 TryMallocReturnValue returnValue = tryFastCalloc(n_elements, element_size);
296 returnValue.getValue(result);
298 void* result = calloc(n_elements, element_size);
303 // If either n_elements or element_size is 0, the behavior of calloc is implementation defined.
304 // To make sure that fastCalloc never returns 0, retry with fastCalloc(1, 1).
305 if (!n_elements || !element_size)
306 return fastCalloc(1, 1);
314 void fastFree(void* p)
316 ASSERT(!isForbidden());
318 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
322 AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(p);
323 if (*header != Internal::AllocTypeMalloc)
324 Internal::fastMallocMatchFailed(p);
331 TryMallocReturnValue tryFastRealloc(void* p, size_t n)
333 ASSERT(!isForbidden());
335 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
337 if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= n) // If overflow would occur...
339 AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(p);
340 if (*header != Internal::AllocTypeMalloc)
341 Internal::fastMallocMatchFailed(p);
342 void* result = realloc(header, n + sizeof(AllocAlignmentInteger));
346 // This should not be needed because the value is already there:
347 // *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
348 result = static_cast<AllocAlignmentInteger*>(result) + 1;
351 return fastMalloc(n);
354 return realloc(p, n);
358 void* fastRealloc(void* p, size_t n)
360 ASSERT(!isForbidden());
362 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
363 TryMallocReturnValue returnValue = tryFastRealloc(p, n);
365 returnValue.getValue(result);
367 void* result = realloc(p, n);
375 void releaseFastMallocFreeMemory() { }
377 FastMallocStatistics fastMallocStatistics()
379 FastMallocStatistics statistics = { 0, 0, 0 };
383 size_t fastMallocSize(const void* p)
386 return malloc_size(p);
387 #elif OS(WINDOWS) && !PLATFORM(BREWMP)
388 // Brew MP uses its own memory allocator, so _msize does not work on the Brew MP simulator.
389 return _msize(const_cast<void*>(p));
398 // This symbol is present in the JavaScriptCore exports file even when FastMalloc is disabled.
399 // It will never be used in this case, so it's type and value are less interesting than its presence.
400 extern "C" const int jscore_fastmalloc_introspection = 0;
403 #else // FORCE_SYSTEM_MALLOC
407 #elif HAVE(INTTYPES_H)
408 #include <inttypes.h>
410 #include <sys/types.h>
413 #include "AlwaysInline.h"
414 #include "Assertions.h"
415 #include "TCPackedCache.h"
416 #include "TCPageMap.h"
417 #include "TCSpinLock.h"
418 #include "TCSystemAlloc.h"
432 #ifndef WIN32_LEAN_AND_MEAN
433 #define WIN32_LEAN_AND_MEAN
441 #include "MallocZoneSupport.h"
442 #include <wtf/HashSet.h>
443 #include <wtf/Vector.h>
446 #if HAVE(HEADER_DETECTION_H)
447 #include "HeaderDetection.h"
451 #include <dispatch/dispatch.h>
454 #if HAVE(PTHREAD_MACHDEP_H)
455 #include <System/pthread_machdep.h>
462 // Calling pthread_getspecific through a global function pointer is faster than a normal
463 // call to the function on Mac OS X, and it's used in performance-critical code. So we
464 // use a function pointer. But that's not necessarily faster on other platforms, and we had
465 // problems with this technique on Windows, so we'll do this only on Mac OS X.
466 #if OS(DARWIN) && !defined(__PTK_FRAMEWORK_JAVASCRIPTCORE_KEY0)
467 static void* (*pthread_getspecific_function_pointer)(pthread_key_t) = pthread_getspecific;
468 #define pthread_getspecific(key) pthread_getspecific_function_pointer(key)
471 #define DEFINE_VARIABLE(type, name, value, meaning) \
472 namespace FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead { \
473 type FLAGS_##name(value); \
474 char FLAGS_no##name; \
476 using FLAG__namespace_do_not_use_directly_use_DECLARE_##type##_instead::FLAGS_##name
478 #define DEFINE_int64(name, value, meaning) \
479 DEFINE_VARIABLE(int64_t, name, value, meaning)
481 #define DEFINE_double(name, value, meaning) \
482 DEFINE_VARIABLE(double, name, value, meaning)
486 #define malloc fastMalloc
487 #define calloc fastCalloc
488 #define free fastFree
489 #define realloc fastRealloc
491 #define MESSAGE LOG_ERROR
492 #define CHECK_CONDITION ASSERT
496 class TCMalloc_Central_FreeListPadded;
497 class TCMalloc_PageHeap;
498 class TCMalloc_ThreadCache;
499 template <typename T> class PageHeapAllocator;
501 class FastMallocZone {
505 static kern_return_t enumerate(task_t, void*, unsigned typeMmask, vm_address_t zoneAddress, memory_reader_t, vm_range_recorder_t);
506 static size_t goodSize(malloc_zone_t*, size_t size) { return size; }
507 static boolean_t check(malloc_zone_t*) { return true; }
508 static void print(malloc_zone_t*, boolean_t) { }
509 static void log(malloc_zone_t*, void*) { }
510 static void forceLock(malloc_zone_t*) { }
511 static void forceUnlock(malloc_zone_t*) { }
512 static void statistics(malloc_zone_t*, malloc_statistics_t* stats) { memset(stats, 0, sizeof(malloc_statistics_t)); }
515 FastMallocZone(TCMalloc_PageHeap*, TCMalloc_ThreadCache**, TCMalloc_Central_FreeListPadded*, PageHeapAllocator<Span>*, PageHeapAllocator<TCMalloc_ThreadCache>*);
516 static size_t size(malloc_zone_t*, const void*);
517 static void* zoneMalloc(malloc_zone_t*, size_t);
518 static void* zoneCalloc(malloc_zone_t*, size_t numItems, size_t size);
519 static void zoneFree(malloc_zone_t*, void*);
520 static void* zoneRealloc(malloc_zone_t*, void*, size_t);
521 static void* zoneValloc(malloc_zone_t*, size_t) { LOG_ERROR("valloc is not supported"); return 0; }
522 static void zoneDestroy(malloc_zone_t*) { }
524 malloc_zone_t m_zone;
525 TCMalloc_PageHeap* m_pageHeap;
526 TCMalloc_ThreadCache** m_threadHeaps;
527 TCMalloc_Central_FreeListPadded* m_centralCaches;
528 PageHeapAllocator<Span>* m_spanAllocator;
529 PageHeapAllocator<TCMalloc_ThreadCache>* m_pageHeapAllocator;
537 // This #ifdef should almost never be set. Set NO_TCMALLOC_SAMPLES if
538 // you're porting to a system where you really can't get a stacktrace.
539 #ifdef NO_TCMALLOC_SAMPLES
540 // We use #define so code compiles even if you #include stacktrace.h somehow.
541 # define GetStackTrace(stack, depth, skip) (0)
543 # include <google/stacktrace.h>
547 // Even if we have support for thread-local storage in the compiler
548 // and linker, the OS may not support it. We need to check that at
549 // runtime. Right now, we have to keep a manual set of "bad" OSes.
550 #if defined(HAVE_TLS)
551 static bool kernel_supports_tls = false; // be conservative
552 static inline bool KernelSupportsTLS() {
553 return kernel_supports_tls;
555 # if !HAVE_DECL_UNAME // if too old for uname, probably too old for TLS
556 static void CheckIfKernelSupportsTLS() {
557 kernel_supports_tls = false;
560 # include <sys/utsname.h> // DECL_UNAME checked for <sys/utsname.h> too
561 static void CheckIfKernelSupportsTLS() {
563 if (uname(&buf) != 0) { // should be impossible
564 MESSAGE("uname failed assuming no TLS support (errno=%d)\n", errno);
565 kernel_supports_tls = false;
566 } else if (strcasecmp(buf.sysname, "linux") == 0) {
567 // The linux case: the first kernel to support TLS was 2.6.0
568 if (buf.release[0] < '2' && buf.release[1] == '.') // 0.x or 1.x
569 kernel_supports_tls = false;
570 else if (buf.release[0] == '2' && buf.release[1] == '.' &&
571 buf.release[2] >= '0' && buf.release[2] < '6' &&
572 buf.release[3] == '.') // 2.0 - 2.5
573 kernel_supports_tls = false;
575 kernel_supports_tls = true;
576 } else { // some other kernel, we'll be optimisitic
577 kernel_supports_tls = true;
579 // TODO(csilvers): VLOG(1) the tls status once we support RAW_VLOG
581 # endif // HAVE_DECL_UNAME
584 // __THROW is defined in glibc systems. It means, counter-intuitively,
585 // "This function will never throw an exception." It's an optional
586 // optimization tool, but we may need to use it to match glibc prototypes.
587 #ifndef __THROW // I guess we're not on a glibc system
588 # define __THROW // __THROW is just an optimization, so ok to make it ""
591 //-------------------------------------------------------------------
593 //-------------------------------------------------------------------
595 // Not all possible combinations of the following parameters make
596 // sense. In particular, if kMaxSize increases, you may have to
597 // increase kNumClasses as well.
598 static const size_t kPageShift = 12;
599 static const size_t kPageSize = 1 << kPageShift;
600 static const size_t kMaxSize = 8u * kPageSize;
601 static const size_t kAlignShift = 3;
602 static const size_t kAlignment = 1 << kAlignShift;
603 static const size_t kNumClasses = 68;
605 // Allocates a big block of memory for the pagemap once we reach more than
607 static const size_t kPageMapBigAllocationThreshold = 128 << 20;
609 // Minimum number of pages to fetch from system at a time. Must be
610 // significantly bigger than kPageSize to amortize system-call
611 // overhead, and also to reduce external fragementation. Also, we
612 // should keep this value big because various incarnations of Linux
613 // have small limits on the number of mmap() regions per
615 static const size_t kMinSystemAlloc = 1 << (20 - kPageShift);
617 // Number of objects to move between a per-thread list and a central
618 // list in one shot. We want this to be not too small so we can
619 // amortize the lock overhead for accessing the central list. Making
620 // it too big may temporarily cause unnecessary memory wastage in the
621 // per-thread free list until the scavenger cleans up the list.
622 static int num_objects_to_move[kNumClasses];
624 // Maximum length we allow a per-thread free-list to have before we
625 // move objects from it into the corresponding central free-list. We
626 // want this big to avoid locking the central free-list too often. It
627 // should not hurt to make this list somewhat big because the
628 // scavenging code will shrink it down when its contents are not in use.
629 static const int kMaxFreeListLength = 256;
631 // Lower and upper bounds on the per-thread cache sizes
632 static const size_t kMinThreadCacheSize = kMaxSize * 2;
633 static const size_t kMaxThreadCacheSize = 2 << 20;
635 // Default bound on the total amount of thread caches
636 static const size_t kDefaultOverallThreadCacheSize = 16 << 20;
638 // For all span-lengths < kMaxPages we keep an exact-size list.
639 // REQUIRED: kMaxPages >= kMinSystemAlloc;
640 static const size_t kMaxPages = kMinSystemAlloc;
642 /* The smallest prime > 2^n */
643 static int primes_list[] = {
644 // Small values might cause high rates of sampling
645 // and hence commented out.
646 // 2, 5, 11, 17, 37, 67, 131, 257,
647 // 521, 1031, 2053, 4099, 8209, 16411,
648 32771, 65537, 131101, 262147, 524309, 1048583,
649 2097169, 4194319, 8388617, 16777259, 33554467 };
651 // Twice the approximate gap between sampling actions.
652 // I.e., we take one sample approximately once every
653 // tcmalloc_sample_parameter/2
654 // bytes of allocation, i.e., ~ once every 128KB.
655 // Must be a prime number.
656 #ifdef NO_TCMALLOC_SAMPLES
657 DEFINE_int64(tcmalloc_sample_parameter, 0,
658 "Unused: code is compiled with NO_TCMALLOC_SAMPLES");
659 static size_t sample_period = 0;
661 DEFINE_int64(tcmalloc_sample_parameter, 262147,
662 "Twice the approximate gap between sampling actions."
663 " Must be a prime number. Otherwise will be rounded up to a "
664 " larger prime number");
665 static size_t sample_period = 262147;
668 // Protects sample_period above
669 static SpinLock sample_period_lock = SPINLOCK_INITIALIZER;
671 // Parameters for controlling how fast memory is returned to the OS.
673 DEFINE_double(tcmalloc_release_rate, 1,
674 "Rate at which we release unused memory to the system. "
675 "Zero means we never release memory back to the system. "
676 "Increase this flag to return memory faster; decrease it "
677 "to return memory slower. Reasonable rates are in the "
680 //-------------------------------------------------------------------
681 // Mapping from size to size_class and vice versa
682 //-------------------------------------------------------------------
684 // Sizes <= 1024 have an alignment >= 8. So for such sizes we have an
685 // array indexed by ceil(size/8). Sizes > 1024 have an alignment >= 128.
686 // So for these larger sizes we have an array indexed by ceil(size/128).
688 // We flatten both logical arrays into one physical array and use
689 // arithmetic to compute an appropriate index. The constants used by
690 // ClassIndex() were selected to make the flattening work.
693 // Size Expression Index
694 // -------------------------------------------------------
698 // 1024 (1024 + 7) / 8 128
699 // 1025 (1025 + 127 + (120<<7)) / 128 129
701 // 32768 (32768 + 127 + (120<<7)) / 128 376
702 static const size_t kMaxSmallSize = 1024;
703 static const int shift_amount[2] = { 3, 7 }; // For divides by 8 or 128
704 static const int add_amount[2] = { 7, 127 + (120 << 7) };
705 static unsigned char class_array[377];
707 // Compute index of the class_array[] entry for a given size
708 static inline int ClassIndex(size_t s) {
709 const int i = (s > kMaxSmallSize);
710 return static_cast<int>((s + add_amount[i]) >> shift_amount[i]);
713 // Mapping from size class to max size storable in that class
714 static size_t class_to_size[kNumClasses];
716 // Mapping from size class to number of pages to allocate at a time
717 static size_t class_to_pages[kNumClasses];
719 // TransferCache is used to cache transfers of num_objects_to_move[size_class]
720 // back and forth between thread caches and the central cache for a given size
723 void *head; // Head of chain of objects.
724 void *tail; // Tail of chain of objects.
726 // A central cache freelist can have anywhere from 0 to kNumTransferEntries
727 // slots to put link list chains into. To keep memory usage bounded the total
728 // number of TCEntries across size classes is fixed. Currently each size
729 // class is initially given one TCEntry which also means that the maximum any
730 // one class can have is kNumClasses.
731 static const int kNumTransferEntries = kNumClasses;
733 // Note: the following only works for "n"s that fit in 32-bits, but
734 // that is fine since we only use it for small sizes.
735 static inline int LgFloor(size_t n) {
737 for (int i = 4; i >= 0; --i) {
738 int shift = (1 << i);
739 size_t x = n >> shift;
749 // Some very basic linked list functions for dealing with using void * as
752 static inline void *SLL_Next(void *t) {
753 return *(reinterpret_cast<void**>(t));
756 static inline void SLL_SetNext(void *t, void *n) {
757 *(reinterpret_cast<void**>(t)) = n;
760 static inline void SLL_Push(void **list, void *element) {
761 SLL_SetNext(element, *list);
765 static inline void *SLL_Pop(void **list) {
766 void *result = *list;
767 *list = SLL_Next(*list);
772 // Remove N elements from a linked list to which head points. head will be
773 // modified to point to the new head. start and end will point to the first
774 // and last nodes of the range. Note that end will point to NULL after this
775 // function is called.
776 static inline void SLL_PopRange(void **head, int N, void **start, void **end) {
784 for (int i = 1; i < N; ++i) {
790 *head = SLL_Next(tmp);
791 // Unlink range from list.
792 SLL_SetNext(tmp, NULL);
795 static inline void SLL_PushRange(void **head, void *start, void *end) {
797 SLL_SetNext(end, *head);
801 static inline size_t SLL_Size(void *head) {
805 head = SLL_Next(head);
810 // Setup helper functions.
812 static ALWAYS_INLINE size_t SizeClass(size_t size) {
813 return class_array[ClassIndex(size)];
816 // Get the byte-size for a specified class
817 static ALWAYS_INLINE size_t ByteSizeForClass(size_t cl) {
818 return class_to_size[cl];
820 static int NumMoveSize(size_t size) {
821 if (size == 0) return 0;
822 // Use approx 64k transfers between thread and central caches.
823 int num = static_cast<int>(64.0 * 1024.0 / size);
824 if (num < 2) num = 2;
825 // Clamp well below kMaxFreeListLength to avoid ping pong between central
826 // and thread caches.
827 if (num > static_cast<int>(0.8 * kMaxFreeListLength))
828 num = static_cast<int>(0.8 * kMaxFreeListLength);
830 // Also, avoid bringing in too many objects into small object free
831 // lists. There are lots of such lists, and if we allow each one to
832 // fetch too many at a time, we end up having to scavenge too often
833 // (especially when there are lots of threads and each thread gets a
834 // small allowance for its thread cache).
836 // TODO: Make thread cache free list sizes dynamic so that we do not
837 // have to equally divide a fixed resource amongst lots of threads.
838 if (num > 32) num = 32;
843 // Initialize the mapping arrays
844 static void InitSizeClasses() {
845 // Do some sanity checking on add_amount[]/shift_amount[]/class_array[]
846 if (ClassIndex(0) < 0) {
847 MESSAGE("Invalid class index %d for size 0\n", ClassIndex(0));
850 if (static_cast<size_t>(ClassIndex(kMaxSize)) >= sizeof(class_array)) {
851 MESSAGE("Invalid class index %d for kMaxSize\n", ClassIndex(kMaxSize));
855 // Compute the size classes we want to use
856 size_t sc = 1; // Next size class to assign
857 unsigned char alignshift = kAlignShift;
859 for (size_t size = kAlignment; size <= kMaxSize; size += (1 << alignshift)) {
860 int lg = LgFloor(size);
862 // Increase alignment every so often.
864 // Since we double the alignment every time size doubles and
865 // size >= 128, this means that space wasted due to alignment is
866 // at most 16/128 i.e., 12.5%. Plus we cap the alignment at 256
867 // bytes, so the space wasted as a percentage starts falling for
869 if ((lg >= 7) && (alignshift < 8)) {
875 // Allocate enough pages so leftover is less than 1/8 of total.
876 // This bounds wasted space to at most 12.5%.
877 size_t psize = kPageSize;
878 while ((psize % size) > (psize >> 3)) {
881 const size_t my_pages = psize >> kPageShift;
883 if (sc > 1 && my_pages == class_to_pages[sc-1]) {
884 // See if we can merge this into the previous class without
885 // increasing the fragmentation of the previous class.
886 const size_t my_objects = (my_pages << kPageShift) / size;
887 const size_t prev_objects = (class_to_pages[sc-1] << kPageShift)
888 / class_to_size[sc-1];
889 if (my_objects == prev_objects) {
890 // Adjust last class to include this size
891 class_to_size[sc-1] = size;
897 class_to_pages[sc] = my_pages;
898 class_to_size[sc] = size;
901 if (sc != kNumClasses) {
902 MESSAGE("wrong number of size classes: found %" PRIuS " instead of %d\n",
903 sc, int(kNumClasses));
907 // Initialize the mapping arrays
909 for (unsigned char c = 1; c < kNumClasses; c++) {
910 const size_t max_size_in_class = class_to_size[c];
911 for (size_t s = next_size; s <= max_size_in_class; s += kAlignment) {
912 class_array[ClassIndex(s)] = c;
914 next_size = static_cast<int>(max_size_in_class + kAlignment);
917 // Double-check sizes just to be safe
918 for (size_t size = 0; size <= kMaxSize; size++) {
919 const size_t sc = SizeClass(size);
921 MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size);
924 if (sc > 1 && size <= class_to_size[sc-1]) {
925 MESSAGE("Allocating unnecessarily large class %" PRIuS " for %" PRIuS
929 if (sc >= kNumClasses) {
930 MESSAGE("Bad size class %" PRIuS " for %" PRIuS "\n", sc, size);
933 const size_t s = class_to_size[sc];
935 MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc);
939 MESSAGE("Bad size %" PRIuS " for %" PRIuS " (sc = %" PRIuS ")\n", s, size, sc);
944 // Initialize the num_objects_to_move array.
945 for (size_t cl = 1; cl < kNumClasses; ++cl) {
946 num_objects_to_move[cl] = NumMoveSize(ByteSizeForClass(cl));
951 // Dump class sizes and maximum external wastage per size class
952 for (size_t cl = 1; cl < kNumClasses; ++cl) {
953 const int alloc_size = class_to_pages[cl] << kPageShift;
954 const int alloc_objs = alloc_size / class_to_size[cl];
955 const int min_used = (class_to_size[cl-1] + 1) * alloc_objs;
956 const int max_waste = alloc_size - min_used;
957 MESSAGE("SC %3d [ %8d .. %8d ] from %8d ; %2.0f%% maxwaste\n",
959 int(class_to_size[cl-1] + 1),
960 int(class_to_size[cl]),
961 int(class_to_pages[cl] << kPageShift),
962 max_waste * 100.0 / alloc_size
969 // -------------------------------------------------------------------------
970 // Simple allocator for objects of a specified type. External locking
971 // is required before accessing one of these objects.
972 // -------------------------------------------------------------------------
974 // Metadata allocator -- keeps stats about how many bytes allocated
975 static uint64_t metadata_system_bytes = 0;
976 static void* MetaDataAlloc(size_t bytes) {
977 void* result = TCMalloc_SystemAlloc(bytes, 0);
978 if (result != NULL) {
979 metadata_system_bytes += bytes;
985 class PageHeapAllocator {
987 // How much to allocate from system at a time
988 static const size_t kAllocIncrement = 32 << 10;
991 static const size_t kAlignedSize
992 = (((sizeof(T) + kAlignment - 1) / kAlignment) * kAlignment);
994 // Free area from which to carve new objects
998 // Linked list of all regions allocated by this allocator
999 void* allocated_regions_;
1001 // Free list of already carved objects
1004 // Number of allocated but unfreed objects
1009 ASSERT(kAlignedSize <= kAllocIncrement);
1011 allocated_regions_ = 0;
1018 // Consult free list
1020 if (free_list_ != NULL) {
1021 result = free_list_;
1022 free_list_ = *(reinterpret_cast<void**>(result));
1024 if (free_avail_ < kAlignedSize) {
1026 char* new_allocation = reinterpret_cast<char*>(MetaDataAlloc(kAllocIncrement));
1027 if (!new_allocation)
1030 *reinterpret_cast_ptr<void**>(new_allocation) = allocated_regions_;
1031 allocated_regions_ = new_allocation;
1032 free_area_ = new_allocation + kAlignedSize;
1033 free_avail_ = kAllocIncrement - kAlignedSize;
1035 result = free_area_;
1036 free_area_ += kAlignedSize;
1037 free_avail_ -= kAlignedSize;
1040 return reinterpret_cast<T*>(result);
1044 *(reinterpret_cast<void**>(p)) = free_list_;
1049 int inuse() const { return inuse_; }
1051 #if defined(WTF_CHANGES) && OS(DARWIN)
1052 template <class Recorder>
1053 void recordAdministrativeRegions(Recorder& recorder, const RemoteMemoryReader& reader)
1055 vm_address_t adminAllocation = reinterpret_cast<vm_address_t>(allocated_regions_);
1056 while (adminAllocation) {
1057 recorder.recordRegion(adminAllocation, kAllocIncrement);
1058 adminAllocation = *reader(reinterpret_cast<vm_address_t*>(adminAllocation));
1064 // -------------------------------------------------------------------------
1065 // Span - a contiguous run of pages
1066 // -------------------------------------------------------------------------
1068 // Type that can hold a page number
1069 typedef uintptr_t PageID;
1071 // Type that can hold the length of a run of pages
1072 typedef uintptr_t Length;
1074 static const Length kMaxValidPages = (~static_cast<Length>(0)) >> kPageShift;
1076 // Convert byte size into pages. This won't overflow, but may return
1077 // an unreasonably large value if bytes is huge enough.
1078 static inline Length pages(size_t bytes) {
1079 return (bytes >> kPageShift) +
1080 ((bytes & (kPageSize - 1)) > 0 ? 1 : 0);
1083 // Convert a user size into the number of bytes that will actually be
1085 static size_t AllocationSize(size_t bytes) {
1086 if (bytes > kMaxSize) {
1087 // Large object: we allocate an integral number of pages
1088 ASSERT(bytes <= (kMaxValidPages << kPageShift));
1089 return pages(bytes) << kPageShift;
1091 // Small object: find the size class to which it belongs
1092 return ByteSizeForClass(SizeClass(bytes));
1096 // Information kept for a span (a contiguous run of pages).
1098 PageID start; // Starting page number
1099 Length length; // Number of pages in span
1100 Span* next; // Used when in link list
1101 Span* prev; // Used when in link list
1102 void* objects; // Linked list of free objects
1103 unsigned int free : 1; // Is the span free
1104 #ifndef NO_TCMALLOC_SAMPLES
1105 unsigned int sample : 1; // Sampled object?
1107 unsigned int sizeclass : 8; // Size-class for small objects (or 0)
1108 unsigned int refcount : 11; // Number of non-free objects
1109 bool decommitted : 1;
1113 // For debugging, we can keep a log events per span
1120 #define ASSERT_SPAN_COMMITTED(span) ASSERT(!span->decommitted)
1123 void Event(Span* span, char op, int v = 0) {
1124 span->history[span->nexthistory] = op;
1125 span->value[span->nexthistory] = v;
1126 span->nexthistory++;
1127 if (span->nexthistory == sizeof(span->history)) span->nexthistory = 0;
1130 #define Event(s,o,v) ((void) 0)
1133 // Allocator/deallocator for spans
1134 static PageHeapAllocator<Span> span_allocator;
1135 static Span* NewSpan(PageID p, Length len) {
1136 Span* result = span_allocator.New();
1137 memset(result, 0, sizeof(*result));
1139 result->length = len;
1141 result->nexthistory = 0;
1146 static inline void DeleteSpan(Span* span) {
1148 // In debug mode, trash the contents of deleted Spans
1149 memset(span, 0x3f, sizeof(*span));
1151 span_allocator.Delete(span);
1154 // -------------------------------------------------------------------------
1155 // Doubly linked list of spans.
1156 // -------------------------------------------------------------------------
1158 static inline void DLL_Init(Span* list) {
1163 static inline void DLL_Remove(Span* span) {
1164 span->prev->next = span->next;
1165 span->next->prev = span->prev;
1170 static ALWAYS_INLINE bool DLL_IsEmpty(const Span* list) {
1171 return list->next == list;
1174 static int DLL_Length(const Span* list) {
1176 for (Span* s = list->next; s != list; s = s->next) {
1182 #if 0 /* Not needed at the moment -- causes compiler warnings if not used */
1183 static void DLL_Print(const char* label, const Span* list) {
1184 MESSAGE("%-10s %p:", label, list);
1185 for (const Span* s = list->next; s != list; s = s->next) {
1186 MESSAGE(" <%p,%u,%u>", s, s->start, s->length);
1192 static inline void DLL_Prepend(Span* list, Span* span) {
1193 ASSERT(span->next == NULL);
1194 ASSERT(span->prev == NULL);
1195 span->next = list->next;
1197 list->next->prev = span;
1201 // -------------------------------------------------------------------------
1202 // Stack traces kept for sampled allocations
1203 // The following state is protected by pageheap_lock_.
1204 // -------------------------------------------------------------------------
1206 // size/depth are made the same size as a pointer so that some generic
1207 // code below can conveniently cast them back and forth to void*.
1208 static const int kMaxStackDepth = 31;
1210 uintptr_t size; // Size of object
1211 uintptr_t depth; // Number of PC values stored in array below
1212 void* stack[kMaxStackDepth];
1214 static PageHeapAllocator<StackTrace> stacktrace_allocator;
1215 static Span sampled_objects;
1217 // -------------------------------------------------------------------------
1218 // Map from page-id to per-page data
1219 // -------------------------------------------------------------------------
1221 // We use PageMap2<> for 32-bit and PageMap3<> for 64-bit machines.
1222 // We also use a simple one-level cache for hot PageID-to-sizeclass mappings,
1223 // because sometimes the sizeclass is all the information we need.
1225 // Selector class -- general selector uses 3-level map
1226 template <int BITS> class MapSelector {
1228 typedef TCMalloc_PageMap3<BITS-kPageShift> Type;
1229 typedef PackedCache<BITS, uint64_t> CacheType;
1232 #if defined(WTF_CHANGES)
1234 // On all known X86-64 platforms, the upper 16 bits are always unused and therefore
1235 // can be excluded from the PageMap key.
1236 // See http://en.wikipedia.org/wiki/X86-64#Virtual_address_space_details
1238 static const size_t kBitsUnusedOn64Bit = 16;
1240 static const size_t kBitsUnusedOn64Bit = 0;
1243 // A three-level map for 64-bit machines
1244 template <> class MapSelector<64> {
1246 typedef TCMalloc_PageMap3<64 - kPageShift - kBitsUnusedOn64Bit> Type;
1247 typedef PackedCache<64, uint64_t> CacheType;
1251 // A two-level map for 32-bit machines
1252 template <> class MapSelector<32> {
1254 typedef TCMalloc_PageMap2<32 - kPageShift> Type;
1255 typedef PackedCache<32 - kPageShift, uint16_t> CacheType;
1258 // -------------------------------------------------------------------------
1259 // Page-level allocator
1260 // * Eager coalescing
1262 // Heap for page-level allocation. We allow allocating and freeing a
1263 // contiguous runs of pages (called a "span").
1264 // -------------------------------------------------------------------------
1266 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1267 // The page heap maintains a free list for spans that are no longer in use by
1268 // the central cache or any thread caches. We use a background thread to
1269 // periodically scan the free list and release a percentage of it back to the OS.
1271 // If free_committed_pages_ exceeds kMinimumFreeCommittedPageCount, the
1272 // background thread:
1274 // - pauses for kScavengeDelayInSeconds
1275 // - returns to the OS a percentage of the memory that remained unused during
1276 // that pause (kScavengePercentage * min_free_committed_pages_since_last_scavenge_)
1277 // The goal of this strategy is to reduce memory pressure in a timely fashion
1278 // while avoiding thrashing the OS allocator.
1280 // Time delay before the page heap scavenger will consider returning pages to
1282 static const int kScavengeDelayInSeconds = 2;
1284 // Approximate percentage of free committed pages to return to the OS in one
1286 static const float kScavengePercentage = .5f;
1288 // number of span lists to keep spans in when memory is returned.
1289 static const int kMinSpanListsWithSpans = 32;
1291 // Number of free committed pages that we want to keep around. The minimum number of pages used when there
1292 // is 1 span in each of the first kMinSpanListsWithSpans spanlists. Currently 528 pages.
1293 static const size_t kMinimumFreeCommittedPageCount = kMinSpanListsWithSpans * ((1.0f+kMinSpanListsWithSpans) / 2.0f);
1297 class TCMalloc_PageHeap {
1301 // Allocate a run of "n" pages. Returns zero if out of memory.
1302 Span* New(Length n);
1304 // Delete the span "[p, p+n-1]".
1305 // REQUIRES: span was returned by earlier call to New() and
1306 // has not yet been deleted.
1307 void Delete(Span* span);
1309 // Mark an allocated span as being used for small objects of the
1310 // specified size-class.
1311 // REQUIRES: span was returned by an earlier call to New()
1312 // and has not yet been deleted.
1313 void RegisterSizeClass(Span* span, size_t sc);
1315 // Split an allocated span into two spans: one of length "n" pages
1316 // followed by another span of length "span->length - n" pages.
1317 // Modifies "*span" to point to the first span of length "n" pages.
1318 // Returns a pointer to the second span.
1320 // REQUIRES: "0 < n < span->length"
1321 // REQUIRES: !span->free
1322 // REQUIRES: span->sizeclass == 0
1323 Span* Split(Span* span, Length n);
1325 // Return the descriptor for the specified page.
1326 inline Span* GetDescriptor(PageID p) const {
1327 return reinterpret_cast<Span*>(pagemap_.get(p));
1331 inline Span* GetDescriptorEnsureSafe(PageID p)
1333 pagemap_.Ensure(p, 1);
1334 return GetDescriptor(p);
1337 size_t ReturnedBytes() const;
1340 // Dump state to stderr
1342 void Dump(TCMalloc_Printer* out);
1345 // Return number of bytes allocated from system
1346 inline uint64_t SystemBytes() const { return system_bytes_; }
1348 // Return number of free bytes in heap
1349 uint64_t FreeBytes() const {
1350 return (static_cast<uint64_t>(free_pages_) << kPageShift);
1354 bool CheckList(Span* list, Length min_pages, Length max_pages);
1356 // Release all pages on the free list for reuse by the OS:
1357 void ReleaseFreePages();
1359 // Return 0 if we have no information, or else the correct sizeclass for p.
1360 // Reads and writes to pagemap_cache_ do not require locking.
1361 // The entries are 64 bits on 64-bit hardware and 16 bits on
1362 // 32-bit hardware, and we don't mind raciness as long as each read of
1363 // an entry yields a valid entry, not a partially updated entry.
1364 size_t GetSizeClassIfCached(PageID p) const {
1365 return pagemap_cache_.GetOrDefault(p, 0);
1367 void CacheSizeClass(PageID p, size_t cl) const { pagemap_cache_.Put(p, cl); }
1370 // Pick the appropriate map and cache types based on pointer size
1371 typedef MapSelector<8*sizeof(uintptr_t)>::Type PageMap;
1372 typedef MapSelector<8*sizeof(uintptr_t)>::CacheType PageMapCache;
1374 mutable PageMapCache pagemap_cache_;
1376 // We segregate spans of a given size into two circular linked
1377 // lists: one for normal spans, and one for spans whose memory
1378 // has been returned to the system.
1384 // List of free spans of length >= kMaxPages
1387 // Array mapping from span length to a doubly linked list of free spans
1388 SpanList free_[kMaxPages];
1390 // Number of pages kept in free lists
1391 uintptr_t free_pages_;
1393 // Bytes allocated from system
1394 uint64_t system_bytes_;
1396 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1397 // Number of pages kept in free lists that are still committed.
1398 Length free_committed_pages_;
1400 // Minimum number of free committed pages since last scavenge. (Can be 0 if
1401 // we've committed new pages since the last scavenge.)
1402 Length min_free_committed_pages_since_last_scavenge_;
1405 bool GrowHeap(Length n);
1407 // REQUIRES span->length >= n
1408 // Remove span from its free list, and move any leftover part of
1409 // span into appropriate free lists. Also update "span" to have
1410 // length exactly "n" and mark it as non-free so it can be returned
1413 // "released" is true iff "span" was found on a "returned" list.
1414 void Carve(Span* span, Length n, bool released);
1416 void RecordSpan(Span* span) {
1417 pagemap_.set(span->start, span);
1418 if (span->length > 1) {
1419 pagemap_.set(span->start + span->length - 1, span);
1423 // Allocate a large span of length == n. If successful, returns a
1424 // span of exactly the specified length. Else, returns NULL.
1425 Span* AllocLarge(Length n);
1427 #if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1428 // Incrementally release some memory to the system.
1429 // IncrementalScavenge(n) is called whenever n pages are freed.
1430 void IncrementalScavenge(Length n);
1433 // Number of pages to deallocate before doing more scavenging
1434 int64_t scavenge_counter_;
1436 // Index of last free list we scavenged
1437 size_t scavenge_index_;
1439 #if defined(WTF_CHANGES) && OS(DARWIN)
1440 friend class FastMallocZone;
1443 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1444 void initializeScavenger();
1445 ALWAYS_INLINE void signalScavenger();
1447 ALWAYS_INLINE bool shouldScavenge() const;
1449 #if HAVE(DISPATCH_H) || OS(WINDOWS)
1450 void periodicScavenge();
1451 ALWAYS_INLINE bool isScavengerSuspended();
1452 ALWAYS_INLINE void scheduleScavenger();
1453 ALWAYS_INLINE void rescheduleScavenger();
1454 ALWAYS_INLINE void suspendScavenger();
1457 #if HAVE(DISPATCH_H)
1458 dispatch_queue_t m_scavengeQueue;
1459 dispatch_source_t m_scavengeTimer;
1460 bool m_scavengingSuspended;
1462 static void CALLBACK scavengerTimerFired(void*, BOOLEAN);
1463 HANDLE m_scavengeQueueTimer;
1465 static NO_RETURN_WITH_VALUE void* runScavengerThread(void*);
1466 NO_RETURN void scavengerThread();
1468 // Keeps track of whether the background thread is actively scavenging memory every kScavengeDelayInSeconds, or
1469 // it's blocked waiting for more pages to be deleted.
1470 bool m_scavengeThreadActive;
1472 pthread_mutex_t m_scavengeMutex;
1473 pthread_cond_t m_scavengeCondition;
1476 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1479 void TCMalloc_PageHeap::init()
1481 pagemap_.init(MetaDataAlloc);
1482 pagemap_cache_ = PageMapCache(0);
1486 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1487 free_committed_pages_ = 0;
1488 min_free_committed_pages_since_last_scavenge_ = 0;
1489 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1491 scavenge_counter_ = 0;
1492 // Start scavenging at kMaxPages list
1493 scavenge_index_ = kMaxPages-1;
1494 COMPILE_ASSERT(kNumClasses <= (1 << PageMapCache::kValuebits), valuebits);
1495 DLL_Init(&large_.normal);
1496 DLL_Init(&large_.returned);
1497 for (size_t i = 0; i < kMaxPages; i++) {
1498 DLL_Init(&free_[i].normal);
1499 DLL_Init(&free_[i].returned);
1502 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1503 initializeScavenger();
1504 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1507 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1509 #if HAVE(DISPATCH_H)
1511 void TCMalloc_PageHeap::initializeScavenger()
1513 m_scavengeQueue = dispatch_queue_create("com.apple.JavaScriptCore.FastMallocSavenger", NULL);
1514 m_scavengeTimer = dispatch_source_create(DISPATCH_SOURCE_TYPE_TIMER, 0, 0, m_scavengeQueue);
1515 dispatch_time_t startTime = dispatch_time(DISPATCH_TIME_NOW, kScavengeDelayInSeconds * NSEC_PER_SEC);
1516 dispatch_source_set_timer(m_scavengeTimer, startTime, kScavengeDelayInSeconds * NSEC_PER_SEC, 1000 * NSEC_PER_USEC);
1517 dispatch_source_set_event_handler(m_scavengeTimer, ^{ periodicScavenge(); });
1518 m_scavengingSuspended = true;
1521 ALWAYS_INLINE bool TCMalloc_PageHeap::isScavengerSuspended()
1523 ASSERT(IsHeld(pageheap_lock));
1524 return m_scavengingSuspended;
1527 ALWAYS_INLINE void TCMalloc_PageHeap::scheduleScavenger()
1529 ASSERT(IsHeld(pageheap_lock));
1530 m_scavengingSuspended = false;
1531 dispatch_resume(m_scavengeTimer);
1534 ALWAYS_INLINE void TCMalloc_PageHeap::rescheduleScavenger()
1536 // Nothing to do here for libdispatch.
1539 ALWAYS_INLINE void TCMalloc_PageHeap::suspendScavenger()
1541 ASSERT(IsHeld(pageheap_lock));
1542 m_scavengingSuspended = true;
1543 dispatch_suspend(m_scavengeTimer);
1548 void TCMalloc_PageHeap::scavengerTimerFired(void* context, BOOLEAN)
1550 static_cast<TCMalloc_PageHeap*>(context)->periodicScavenge();
1553 void TCMalloc_PageHeap::initializeScavenger()
1555 m_scavengeQueueTimer = 0;
1558 ALWAYS_INLINE bool TCMalloc_PageHeap::isScavengerSuspended()
1560 ASSERT(IsHeld(pageheap_lock));
1561 return !m_scavengeQueueTimer;
1564 ALWAYS_INLINE void TCMalloc_PageHeap::scheduleScavenger()
1566 // We need to use WT_EXECUTEONLYONCE here and reschedule the timer, because
1567 // Windows will fire the timer event even when the function is already running.
1568 ASSERT(IsHeld(pageheap_lock));
1569 CreateTimerQueueTimer(&m_scavengeQueueTimer, 0, scavengerTimerFired, this, kScavengeDelayInSeconds * 1000, 0, WT_EXECUTEONLYONCE);
1572 ALWAYS_INLINE void TCMalloc_PageHeap::rescheduleScavenger()
1574 // We must delete the timer and create it again, because it is not possible to retrigger a timer on Windows.
1576 scheduleScavenger();
1579 ALWAYS_INLINE void TCMalloc_PageHeap::suspendScavenger()
1581 ASSERT(IsHeld(pageheap_lock));
1582 HANDLE scavengeQueueTimer = m_scavengeQueueTimer;
1583 m_scavengeQueueTimer = 0;
1584 DeleteTimerQueueTimer(0, scavengeQueueTimer, 0);
1589 void TCMalloc_PageHeap::initializeScavenger()
1591 // Create a non-recursive mutex.
1592 #if !defined(PTHREAD_MUTEX_NORMAL) || PTHREAD_MUTEX_NORMAL == PTHREAD_MUTEX_DEFAULT
1593 pthread_mutex_init(&m_scavengeMutex, 0);
1595 pthread_mutexattr_t attr;
1596 pthread_mutexattr_init(&attr);
1597 pthread_mutexattr_settype(&attr, PTHREAD_MUTEX_NORMAL);
1599 pthread_mutex_init(&m_scavengeMutex, &attr);
1601 pthread_mutexattr_destroy(&attr);
1604 pthread_cond_init(&m_scavengeCondition, 0);
1605 m_scavengeThreadActive = true;
1607 pthread_create(&thread, 0, runScavengerThread, this);
1610 void* TCMalloc_PageHeap::runScavengerThread(void* context)
1612 static_cast<TCMalloc_PageHeap*>(context)->scavengerThread();
1614 // Without this, Visual Studio will complain that this method does not return a value.
1619 ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger()
1621 // m_scavengeMutex should be held before accessing m_scavengeThreadActive.
1622 ASSERT(pthread_mutex_trylock(m_scavengeMutex));
1623 if (!m_scavengeThreadActive && shouldScavenge())
1624 pthread_cond_signal(&m_scavengeCondition);
1629 void TCMalloc_PageHeap::scavenge()
1631 size_t pagesToRelease = min_free_committed_pages_since_last_scavenge_ * kScavengePercentage;
1632 size_t targetPageCount = std::max<size_t>(kMinimumFreeCommittedPageCount, free_committed_pages_ - pagesToRelease);
1634 while (free_committed_pages_ > targetPageCount) {
1635 for (int i = kMaxPages; i > 0 && free_committed_pages_ >= targetPageCount; i--) {
1636 SpanList* slist = (static_cast<size_t>(i) == kMaxPages) ? &large_ : &free_[i];
1637 // If the span size is bigger than kMinSpanListsWithSpans pages return all the spans in the list, else return all but 1 span.
1638 // Return only 50% of a spanlist at a time so spans of size 1 are not the only ones left.
1639 size_t length = DLL_Length(&slist->normal);
1640 size_t numSpansToReturn = (i > kMinSpanListsWithSpans) ? length : length / 2;
1641 for (int j = 0; static_cast<size_t>(j) < numSpansToReturn && !DLL_IsEmpty(&slist->normal) && free_committed_pages_ > targetPageCount; j++) {
1642 Span* s = slist->normal.prev;
1644 ASSERT(!s->decommitted);
1645 if (!s->decommitted) {
1646 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
1647 static_cast<size_t>(s->length << kPageShift));
1648 ASSERT(free_committed_pages_ >= s->length);
1649 free_committed_pages_ -= s->length;
1650 s->decommitted = true;
1652 DLL_Prepend(&slist->returned, s);
1657 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
1660 ALWAYS_INLINE bool TCMalloc_PageHeap::shouldScavenge() const
1662 return free_committed_pages_ > kMinimumFreeCommittedPageCount;
1665 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1667 inline Span* TCMalloc_PageHeap::New(Length n) {
1671 // Find first size >= n that has a non-empty list
1672 for (Length s = n; s < kMaxPages; s++) {
1674 bool released = false;
1675 if (!DLL_IsEmpty(&free_[s].normal)) {
1676 // Found normal span
1677 ll = &free_[s].normal;
1678 } else if (!DLL_IsEmpty(&free_[s].returned)) {
1679 // Found returned span; reallocate it
1680 ll = &free_[s].returned;
1683 // Keep looking in larger classes
1687 Span* result = ll->next;
1688 Carve(result, n, released);
1689 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1690 // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
1691 // free committed pages count.
1692 ASSERT(free_committed_pages_ >= n);
1693 free_committed_pages_ -= n;
1694 if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
1695 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
1696 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1702 Span* result = AllocLarge(n);
1703 if (result != NULL) {
1704 ASSERT_SPAN_COMMITTED(result);
1708 // Grow the heap and try again
1714 return AllocLarge(n);
1717 Span* TCMalloc_PageHeap::AllocLarge(Length n) {
1718 // find the best span (closest to n in size).
1719 // The following loops implements address-ordered best-fit.
1720 bool from_released = false;
1723 // Search through normal list
1724 for (Span* span = large_.normal.next;
1725 span != &large_.normal;
1726 span = span->next) {
1727 if (span->length >= n) {
1729 || (span->length < best->length)
1730 || ((span->length == best->length) && (span->start < best->start))) {
1732 from_released = false;
1737 // Search through released list in case it has a better fit
1738 for (Span* span = large_.returned.next;
1739 span != &large_.returned;
1740 span = span->next) {
1741 if (span->length >= n) {
1743 || (span->length < best->length)
1744 || ((span->length == best->length) && (span->start < best->start))) {
1746 from_released = true;
1752 Carve(best, n, from_released);
1753 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1754 // The newly allocated memory is from a span that's in the normal span list (already committed). Update the
1755 // free committed pages count.
1756 ASSERT(free_committed_pages_ >= n);
1757 free_committed_pages_ -= n;
1758 if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
1759 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
1760 #endif // USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1768 Span* TCMalloc_PageHeap::Split(Span* span, Length n) {
1770 ASSERT(n < span->length);
1771 ASSERT(!span->free);
1772 ASSERT(span->sizeclass == 0);
1773 Event(span, 'T', n);
1775 const Length extra = span->length - n;
1776 Span* leftover = NewSpan(span->start + n, extra);
1777 Event(leftover, 'U', extra);
1778 RecordSpan(leftover);
1779 pagemap_.set(span->start + n - 1, span); // Update map from pageid to span
1785 inline void TCMalloc_PageHeap::Carve(Span* span, Length n, bool released) {
1789 Event(span, 'A', n);
1792 // If the span chosen to carve from is decommited, commit the entire span at once to avoid committing spans 1 page at a time.
1793 ASSERT(span->decommitted);
1794 TCMalloc_SystemCommit(reinterpret_cast<void*>(span->start << kPageShift), static_cast<size_t>(span->length << kPageShift));
1795 span->decommitted = false;
1796 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1797 free_committed_pages_ += span->length;
1801 const int extra = static_cast<int>(span->length - n);
1804 Span* leftover = NewSpan(span->start + n, extra);
1806 leftover->decommitted = false;
1807 Event(leftover, 'S', extra);
1808 RecordSpan(leftover);
1810 // Place leftover span on appropriate free list
1811 SpanList* listpair = (static_cast<size_t>(extra) < kMaxPages) ? &free_[extra] : &large_;
1812 Span* dst = &listpair->normal;
1813 DLL_Prepend(dst, leftover);
1816 pagemap_.set(span->start + n - 1, span);
1820 static ALWAYS_INLINE void mergeDecommittedStates(Span* destination, Span* other)
1822 if (destination->decommitted && !other->decommitted) {
1823 TCMalloc_SystemRelease(reinterpret_cast<void*>(other->start << kPageShift),
1824 static_cast<size_t>(other->length << kPageShift));
1825 } else if (other->decommitted && !destination->decommitted) {
1826 TCMalloc_SystemRelease(reinterpret_cast<void*>(destination->start << kPageShift),
1827 static_cast<size_t>(destination->length << kPageShift));
1828 destination->decommitted = true;
1832 inline void TCMalloc_PageHeap::Delete(Span* span) {
1834 ASSERT(!span->free);
1835 ASSERT(span->length > 0);
1836 ASSERT(GetDescriptor(span->start) == span);
1837 ASSERT(GetDescriptor(span->start + span->length - 1) == span);
1838 span->sizeclass = 0;
1839 #ifndef NO_TCMALLOC_SAMPLES
1843 // Coalesce -- we guarantee that "p" != 0, so no bounds checking
1844 // necessary. We do not bother resetting the stale pagemap
1845 // entries for the pieces we are merging together because we only
1846 // care about the pagemap entries for the boundaries.
1847 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1848 // Track the total size of the neighboring free spans that are committed.
1849 Length neighboringCommittedSpansLength = 0;
1851 const PageID p = span->start;
1852 const Length n = span->length;
1853 Span* prev = GetDescriptor(p-1);
1854 if (prev != NULL && prev->free) {
1855 // Merge preceding span into this span
1856 ASSERT(prev->start + prev->length == p);
1857 const Length len = prev->length;
1858 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1859 if (!prev->decommitted)
1860 neighboringCommittedSpansLength += len;
1862 mergeDecommittedStates(span, prev);
1866 span->length += len;
1867 pagemap_.set(span->start, span);
1868 Event(span, 'L', len);
1870 Span* next = GetDescriptor(p+n);
1871 if (next != NULL && next->free) {
1872 // Merge next span into this span
1873 ASSERT(next->start == p+n);
1874 const Length len = next->length;
1875 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1876 if (!next->decommitted)
1877 neighboringCommittedSpansLength += len;
1879 mergeDecommittedStates(span, next);
1882 span->length += len;
1883 pagemap_.set(span->start + span->length - 1, span);
1884 Event(span, 'R', len);
1887 Event(span, 'D', span->length);
1889 if (span->decommitted) {
1890 if (span->length < kMaxPages)
1891 DLL_Prepend(&free_[span->length].returned, span);
1893 DLL_Prepend(&large_.returned, span);
1895 if (span->length < kMaxPages)
1896 DLL_Prepend(&free_[span->length].normal, span);
1898 DLL_Prepend(&large_.normal, span);
1902 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1903 if (span->decommitted) {
1904 // If the merged span is decommitted, that means we decommitted any neighboring spans that were
1905 // committed. Update the free committed pages count.
1906 free_committed_pages_ -= neighboringCommittedSpansLength;
1907 if (free_committed_pages_ < min_free_committed_pages_since_last_scavenge_)
1908 min_free_committed_pages_since_last_scavenge_ = free_committed_pages_;
1910 // If the merged span remains committed, add the deleted span's size to the free committed pages count.
1911 free_committed_pages_ += n;
1914 // Make sure the scavenge thread becomes active if we have enough freed pages to release some back to the system.
1917 IncrementalScavenge(n);
1923 #if !USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
1924 void TCMalloc_PageHeap::IncrementalScavenge(Length n) {
1925 // Fast path; not yet time to release memory
1926 scavenge_counter_ -= n;
1927 if (scavenge_counter_ >= 0) return; // Not yet time to scavenge
1929 // If there is nothing to release, wait for so many pages before
1930 // scavenging again. With 4K pages, this comes to 16MB of memory.
1931 static const size_t kDefaultReleaseDelay = 1 << 8;
1933 // Find index of free list to scavenge
1934 size_t index = scavenge_index_ + 1;
1935 for (size_t i = 0; i < kMaxPages+1; i++) {
1936 if (index > kMaxPages) index = 0;
1937 SpanList* slist = (index == kMaxPages) ? &large_ : &free_[index];
1938 if (!DLL_IsEmpty(&slist->normal)) {
1939 // Release the last span on the normal portion of this list
1940 Span* s = slist->normal.prev;
1942 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
1943 static_cast<size_t>(s->length << kPageShift));
1944 s->decommitted = true;
1945 DLL_Prepend(&slist->returned, s);
1947 scavenge_counter_ = std::max<size_t>(64UL, std::min<size_t>(kDefaultReleaseDelay, kDefaultReleaseDelay - (free_pages_ / kDefaultReleaseDelay)));
1949 if (index == kMaxPages && !DLL_IsEmpty(&slist->normal))
1950 scavenge_index_ = index - 1;
1952 scavenge_index_ = index;
1958 // Nothing to scavenge, delay for a while
1959 scavenge_counter_ = kDefaultReleaseDelay;
1963 void TCMalloc_PageHeap::RegisterSizeClass(Span* span, size_t sc) {
1964 // Associate span object with all interior pages as well
1965 ASSERT(!span->free);
1966 ASSERT(GetDescriptor(span->start) == span);
1967 ASSERT(GetDescriptor(span->start+span->length-1) == span);
1968 Event(span, 'C', sc);
1969 span->sizeclass = static_cast<unsigned int>(sc);
1970 for (Length i = 1; i < span->length-1; i++) {
1971 pagemap_.set(span->start+i, span);
1976 size_t TCMalloc_PageHeap::ReturnedBytes() const {
1978 for (unsigned s = 0; s < kMaxPages; s++) {
1979 const int r_length = DLL_Length(&free_[s].returned);
1980 unsigned r_pages = s * r_length;
1981 result += r_pages << kPageShift;
1984 for (Span* s = large_.returned.next; s != &large_.returned; s = s->next)
1985 result += s->length << kPageShift;
1991 static double PagesToMB(uint64_t pages) {
1992 return (pages << kPageShift) / 1048576.0;
1995 void TCMalloc_PageHeap::Dump(TCMalloc_Printer* out) {
1996 int nonempty_sizes = 0;
1997 for (int s = 0; s < kMaxPages; s++) {
1998 if (!DLL_IsEmpty(&free_[s].normal) || !DLL_IsEmpty(&free_[s].returned)) {
2002 out->printf("------------------------------------------------\n");
2003 out->printf("PageHeap: %d sizes; %6.1f MB free\n",
2004 nonempty_sizes, PagesToMB(free_pages_));
2005 out->printf("------------------------------------------------\n");
2006 uint64_t total_normal = 0;
2007 uint64_t total_returned = 0;
2008 for (int s = 0; s < kMaxPages; s++) {
2009 const int n_length = DLL_Length(&free_[s].normal);
2010 const int r_length = DLL_Length(&free_[s].returned);
2011 if (n_length + r_length > 0) {
2012 uint64_t n_pages = s * n_length;
2013 uint64_t r_pages = s * r_length;
2014 total_normal += n_pages;
2015 total_returned += r_pages;
2016 out->printf("%6u pages * %6u spans ~ %6.1f MB; %6.1f MB cum"
2017 "; unmapped: %6.1f MB; %6.1f MB cum\n",
2019 (n_length + r_length),
2020 PagesToMB(n_pages + r_pages),
2021 PagesToMB(total_normal + total_returned),
2023 PagesToMB(total_returned));
2027 uint64_t n_pages = 0;
2028 uint64_t r_pages = 0;
2031 out->printf("Normal large spans:\n");
2032 for (Span* s = large_.normal.next; s != &large_.normal; s = s->next) {
2033 out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n",
2034 s->length, PagesToMB(s->length));
2035 n_pages += s->length;
2038 out->printf("Unmapped large spans:\n");
2039 for (Span* s = large_.returned.next; s != &large_.returned; s = s->next) {
2040 out->printf(" [ %6" PRIuS " pages ] %6.1f MB\n",
2041 s->length, PagesToMB(s->length));
2042 r_pages += s->length;
2045 total_normal += n_pages;
2046 total_returned += r_pages;
2047 out->printf(">255 large * %6u spans ~ %6.1f MB; %6.1f MB cum"
2048 "; unmapped: %6.1f MB; %6.1f MB cum\n",
2049 (n_spans + r_spans),
2050 PagesToMB(n_pages + r_pages),
2051 PagesToMB(total_normal + total_returned),
2053 PagesToMB(total_returned));
2057 bool TCMalloc_PageHeap::GrowHeap(Length n) {
2058 ASSERT(kMaxPages >= kMinSystemAlloc);
2059 if (n > kMaxValidPages) return false;
2060 Length ask = (n>kMinSystemAlloc) ? n : static_cast<Length>(kMinSystemAlloc);
2062 void* ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
2065 // Try growing just "n" pages
2067 ptr = TCMalloc_SystemAlloc(ask << kPageShift, &actual_size, kPageSize);
2069 if (ptr == NULL) return false;
2071 ask = actual_size >> kPageShift;
2073 uint64_t old_system_bytes = system_bytes_;
2074 system_bytes_ += (ask << kPageShift);
2075 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
2078 // If we have already a lot of pages allocated, just pre allocate a bunch of
2079 // memory for the page map. This prevents fragmentation by pagemap metadata
2080 // when a program keeps allocating and freeing large blocks.
2082 if (old_system_bytes < kPageMapBigAllocationThreshold
2083 && system_bytes_ >= kPageMapBigAllocationThreshold) {
2084 pagemap_.PreallocateMoreMemory();
2087 // Make sure pagemap_ has entries for all of the new pages.
2088 // Plus ensure one before and one after so coalescing code
2089 // does not need bounds-checking.
2090 if (pagemap_.Ensure(p-1, ask+2)) {
2091 // Pretend the new area is allocated and then Delete() it to
2092 // cause any necessary coalescing to occur.
2094 // We do not adjust free_pages_ here since Delete() will do it for us.
2095 Span* span = NewSpan(p, ask);
2101 // We could not allocate memory within "pagemap_"
2102 // TODO: Once we can return memory to the system, return the new span
2107 bool TCMalloc_PageHeap::Check() {
2108 ASSERT(free_[0].normal.next == &free_[0].normal);
2109 ASSERT(free_[0].returned.next == &free_[0].returned);
2110 CheckList(&large_.normal, kMaxPages, 1000000000);
2111 CheckList(&large_.returned, kMaxPages, 1000000000);
2112 for (Length s = 1; s < kMaxPages; s++) {
2113 CheckList(&free_[s].normal, s, s);
2114 CheckList(&free_[s].returned, s, s);
2120 bool TCMalloc_PageHeap::CheckList(Span*, Length, Length) {
2124 bool TCMalloc_PageHeap::CheckList(Span* list, Length min_pages, Length max_pages) {
2125 for (Span* s = list->next; s != list; s = s->next) {
2126 CHECK_CONDITION(s->free);
2127 CHECK_CONDITION(s->length >= min_pages);
2128 CHECK_CONDITION(s->length <= max_pages);
2129 CHECK_CONDITION(GetDescriptor(s->start) == s);
2130 CHECK_CONDITION(GetDescriptor(s->start+s->length-1) == s);
2136 static void ReleaseFreeList(Span* list, Span* returned) {
2137 // Walk backwards through list so that when we push these
2138 // spans on the "returned" list, we preserve the order.
2139 while (!DLL_IsEmpty(list)) {
2140 Span* s = list->prev;
2142 DLL_Prepend(returned, s);
2143 TCMalloc_SystemRelease(reinterpret_cast<void*>(s->start << kPageShift),
2144 static_cast<size_t>(s->length << kPageShift));
2148 void TCMalloc_PageHeap::ReleaseFreePages() {
2149 for (Length s = 0; s < kMaxPages; s++) {
2150 ReleaseFreeList(&free_[s].normal, &free_[s].returned);
2152 ReleaseFreeList(&large_.normal, &large_.returned);
2156 //-------------------------------------------------------------------
2158 //-------------------------------------------------------------------
2160 class TCMalloc_ThreadCache_FreeList {
2162 void* list_; // Linked list of nodes
2163 uint16_t length_; // Current length
2164 uint16_t lowater_; // Low water mark for list length
2173 // Return current length of list
2174 int length() const {
2179 bool empty() const {
2180 return list_ == NULL;
2183 // Low-water mark management
2184 int lowwatermark() const { return lowater_; }
2185 void clear_lowwatermark() { lowater_ = length_; }
2187 ALWAYS_INLINE void Push(void* ptr) {
2188 SLL_Push(&list_, ptr);
2192 void PushRange(int N, void *start, void *end) {
2193 SLL_PushRange(&list_, start, end);
2194 length_ = length_ + static_cast<uint16_t>(N);
2197 void PopRange(int N, void **start, void **end) {
2198 SLL_PopRange(&list_, N, start, end);
2199 ASSERT(length_ >= N);
2200 length_ = length_ - static_cast<uint16_t>(N);
2201 if (length_ < lowater_) lowater_ = length_;
2204 ALWAYS_INLINE void* Pop() {
2205 ASSERT(list_ != NULL);
2207 if (length_ < lowater_) lowater_ = length_;
2208 return SLL_Pop(&list_);
2212 template <class Finder, class Reader>
2213 void enumerateFreeObjects(Finder& finder, const Reader& reader)
2215 for (void* nextObject = list_; nextObject; nextObject = *reader(reinterpret_cast<void**>(nextObject)))
2216 finder.visit(nextObject);
2221 //-------------------------------------------------------------------
2222 // Data kept per thread
2223 //-------------------------------------------------------------------
2225 class TCMalloc_ThreadCache {
2227 typedef TCMalloc_ThreadCache_FreeList FreeList;
2229 typedef DWORD ThreadIdentifier;
2231 typedef pthread_t ThreadIdentifier;
2234 size_t size_; // Combined size of data
2235 ThreadIdentifier tid_; // Which thread owns it
2236 bool in_setspecific_; // Called pthread_setspecific?
2237 FreeList list_[kNumClasses]; // Array indexed by size-class
2239 // We sample allocations, biased by the size of the allocation
2240 uint32_t rnd_; // Cheap random number generator
2241 size_t bytes_until_sample_; // Bytes until we sample next
2243 // Allocate a new heap. REQUIRES: pageheap_lock is held.
2244 static inline TCMalloc_ThreadCache* NewHeap(ThreadIdentifier tid);
2246 // Use only as pthread thread-specific destructor function.
2247 static void DestroyThreadCache(void* ptr);
2249 // All ThreadCache objects are kept in a linked list (for stats collection)
2250 TCMalloc_ThreadCache* next_;
2251 TCMalloc_ThreadCache* prev_;
2253 void Init(ThreadIdentifier tid);
2256 // Accessors (mostly just for printing stats)
2257 int freelist_length(size_t cl) const { return list_[cl].length(); }
2259 // Total byte size in cache
2260 size_t Size() const { return size_; }
2262 ALWAYS_INLINE void* Allocate(size_t size);
2263 void Deallocate(void* ptr, size_t size_class);
2265 ALWAYS_INLINE void FetchFromCentralCache(size_t cl, size_t allocationSize);
2266 void ReleaseToCentralCache(size_t cl, int N);
2270 // Record allocation of "k" bytes. Return true iff allocation
2271 // should be sampled
2272 bool SampleAllocation(size_t k);
2274 // Pick next sampling point
2275 void PickNextSample(size_t k);
2277 static void InitModule();
2278 static void InitTSD();
2279 static TCMalloc_ThreadCache* GetThreadHeap();
2280 static TCMalloc_ThreadCache* GetCache();
2281 static TCMalloc_ThreadCache* GetCacheIfPresent();
2282 static TCMalloc_ThreadCache* CreateCacheIfNecessary();
2283 static void DeleteCache(TCMalloc_ThreadCache* heap);
2284 static void BecomeIdle();
2285 static void RecomputeThreadCacheSize();
2288 template <class Finder, class Reader>
2289 void enumerateFreeObjects(Finder& finder, const Reader& reader)
2291 for (unsigned sizeClass = 0; sizeClass < kNumClasses; sizeClass++)
2292 list_[sizeClass].enumerateFreeObjects(finder, reader);
2297 //-------------------------------------------------------------------
2298 // Data kept per size-class in central cache
2299 //-------------------------------------------------------------------
2301 class TCMalloc_Central_FreeList {
2303 void Init(size_t cl);
2305 // These methods all do internal locking.
2307 // Insert the specified range into the central freelist. N is the number of
2308 // elements in the range.
2309 void InsertRange(void *start, void *end, int N);
2311 // Returns the actual number of fetched elements into N.
2312 void RemoveRange(void **start, void **end, int *N);
2314 // Returns the number of free objects in cache.
2316 SpinLockHolder h(&lock_);
2320 // Returns the number of free objects in the transfer cache.
2322 SpinLockHolder h(&lock_);
2323 return used_slots_ * num_objects_to_move[size_class_];
2327 template <class Finder, class Reader>
2328 void enumerateFreeObjects(Finder& finder, const Reader& reader, TCMalloc_Central_FreeList* remoteCentralFreeList)
2330 for (Span* span = &empty_; span && span != &empty_; span = (span->next ? reader(span->next) : 0))
2331 ASSERT(!span->objects);
2333 ASSERT(!nonempty_.objects);
2334 static const ptrdiff_t nonemptyOffset = reinterpret_cast<const char*>(&nonempty_) - reinterpret_cast<const char*>(this);
2336 Span* remoteNonempty = reinterpret_cast<Span*>(reinterpret_cast<char*>(remoteCentralFreeList) + nonemptyOffset);
2337 Span* remoteSpan = nonempty_.next;
2339 for (Span* span = reader(remoteSpan); span && remoteSpan != remoteNonempty; remoteSpan = span->next, span = (span->next ? reader(span->next) : 0)) {
2340 for (void* nextObject = span->objects; nextObject; nextObject = *reader(reinterpret_cast<void**>(nextObject)))
2341 finder.visit(nextObject);
2347 // REQUIRES: lock_ is held
2348 // Remove object from cache and return.
2349 // Return NULL if no free entries in cache.
2350 void* FetchFromSpans();
2352 // REQUIRES: lock_ is held
2353 // Remove object from cache and return. Fetches
2354 // from pageheap if cache is empty. Only returns
2355 // NULL on allocation failure.
2356 void* FetchFromSpansSafe();
2358 // REQUIRES: lock_ is held
2359 // Release a linked list of objects to spans.
2360 // May temporarily release lock_.
2361 void ReleaseListToSpans(void *start);
2363 // REQUIRES: lock_ is held
2364 // Release an object to spans.
2365 // May temporarily release lock_.
2366 ALWAYS_INLINE void ReleaseToSpans(void* object);
2368 // REQUIRES: lock_ is held
2369 // Populate cache by fetching from the page heap.
2370 // May temporarily release lock_.
2371 ALWAYS_INLINE void Populate();
2373 // REQUIRES: lock is held.
2374 // Tries to make room for a TCEntry. If the cache is full it will try to
2375 // expand it at the cost of some other cache size. Return false if there is
2377 bool MakeCacheSpace();
2379 // REQUIRES: lock_ for locked_size_class is held.
2380 // Picks a "random" size class to steal TCEntry slot from. In reality it
2381 // just iterates over the sizeclasses but does so without taking a lock.
2382 // Returns true on success.
2383 // May temporarily lock a "random" size class.
2384 static ALWAYS_INLINE bool EvictRandomSizeClass(size_t locked_size_class, bool force);
2386 // REQUIRES: lock_ is *not* held.
2387 // Tries to shrink the Cache. If force is true it will relase objects to
2388 // spans if it allows it to shrink the cache. Return false if it failed to
2389 // shrink the cache. Decrements cache_size_ on succeess.
2390 // May temporarily take lock_. If it takes lock_, the locked_size_class
2391 // lock is released to the thread from holding two size class locks
2392 // concurrently which could lead to a deadlock.
2393 bool ShrinkCache(int locked_size_class, bool force);
2395 // This lock protects all the data members. cached_entries and cache_size_
2396 // may be looked at without holding the lock.
2399 // We keep linked lists of empty and non-empty spans.
2400 size_t size_class_; // My size class
2401 Span empty_; // Dummy header for list of empty spans
2402 Span nonempty_; // Dummy header for list of non-empty spans
2403 size_t counter_; // Number of free objects in cache entry
2405 // Here we reserve space for TCEntry cache slots. Since one size class can
2406 // end up getting all the TCEntries quota in the system we just preallocate
2407 // sufficient number of entries here.
2408 TCEntry tc_slots_[kNumTransferEntries];
2410 // Number of currently used cached entries in tc_slots_. This variable is
2411 // updated under a lock but can be read without one.
2412 int32_t used_slots_;
2413 // The current number of slots for this size class. This is an
2414 // adaptive value that is increased if there is lots of traffic
2415 // on a given size class.
2416 int32_t cache_size_;
2419 // Pad each CentralCache object to multiple of 64 bytes
2420 class TCMalloc_Central_FreeListPadded : public TCMalloc_Central_FreeList {
2422 char pad_[(64 - (sizeof(TCMalloc_Central_FreeList) % 64)) % 64];
2425 //-------------------------------------------------------------------
2427 //-------------------------------------------------------------------
2429 // Central cache -- a collection of free-lists, one per size-class.
2430 // We have a separate lock per free-list to reduce contention.
2431 static TCMalloc_Central_FreeListPadded central_cache[kNumClasses];
2433 // Page-level allocator
2434 static SpinLock pageheap_lock = SPINLOCK_INITIALIZER;
2435 static AllocAlignmentInteger pageheap_memory[(sizeof(TCMalloc_PageHeap) + sizeof(AllocAlignmentInteger) - 1) / sizeof(AllocAlignmentInteger)];
2436 static bool phinited = false;
2438 // Avoid extra level of indirection by making "pageheap" be just an alias
2439 // of pageheap_memory.
2442 TCMalloc_PageHeap* m_pageHeap;
2445 static inline TCMalloc_PageHeap* getPageHeap()
2447 PageHeapUnion u = { &pageheap_memory[0] };
2448 return u.m_pageHeap;
2451 #define pageheap getPageHeap()
2453 #if USE_BACKGROUND_THREAD_TO_SCAVENGE_MEMORY
2455 #if HAVE(DISPATCH_H) || OS(WINDOWS)
2457 void TCMalloc_PageHeap::periodicScavenge()
2459 SpinLockHolder h(&pageheap_lock);
2460 pageheap->scavenge();
2462 if (shouldScavenge()) {
2463 rescheduleScavenger();
2470 ALWAYS_INLINE void TCMalloc_PageHeap::signalScavenger()
2472 ASSERT(IsHeld(pageheap_lock));
2473 if (isScavengerSuspended() && shouldScavenge())
2474 scheduleScavenger();
2479 void TCMalloc_PageHeap::scavengerThread()
2481 #if HAVE(PTHREAD_SETNAME_NP)
2482 pthread_setname_np("JavaScriptCore: FastMalloc scavenger");
2486 if (!shouldScavenge()) {
2487 pthread_mutex_lock(&m_scavengeMutex);
2488 m_scavengeThreadActive = false;
2489 // Block until there are enough free committed pages to release back to the system.
2490 pthread_cond_wait(&m_scavengeCondition, &m_scavengeMutex);
2491 m_scavengeThreadActive = true;
2492 pthread_mutex_unlock(&m_scavengeMutex);
2494 sleep(kScavengeDelayInSeconds);
2496 SpinLockHolder h(&pageheap_lock);
2497 pageheap->scavenge();
2506 // If TLS is available, we also store a copy
2507 // of the per-thread object in a __thread variable
2508 // since __thread variables are faster to read
2509 // than pthread_getspecific(). We still need
2510 // pthread_setspecific() because __thread
2511 // variables provide no way to run cleanup
2512 // code when a thread is destroyed.
2514 static __thread TCMalloc_ThreadCache *threadlocal_heap;
2516 // Thread-specific key. Initialization here is somewhat tricky
2517 // because some Linux startup code invokes malloc() before it
2518 // is in a good enough state to handle pthread_keycreate().
2519 // Therefore, we use TSD keys only after tsd_inited is set to true.
2520 // Until then, we use a slow path to get the heap object.
2521 static bool tsd_inited = false;
2522 static pthread_key_t heap_key;
2524 DWORD tlsIndex = TLS_OUT_OF_INDEXES;
2527 static ALWAYS_INLINE void setThreadHeap(TCMalloc_ThreadCache* heap)
2529 // Still do pthread_setspecific even if there's an alternate form
2530 // of thread-local storage in use, to benefit from the delete callback.
2531 pthread_setspecific(heap_key, heap);
2534 TlsSetValue(tlsIndex, heap);
2535 #elif defined(__PTK_FRAMEWORK_JAVASCRIPTCORE_KEY0)
2536 // Can't have two libraries both doing this in the same process,
2537 // so check and make this crash right away.
2538 if (_pthread_getspecific_direct(__PTK_FRAMEWORK_JAVASCRIPTCORE_KEY0))
2540 _pthread_setspecific_direct(__PTK_FRAMEWORK_JAVASCRIPTCORE_KEY0, heap);
2544 // Allocator for thread heaps
2545 static PageHeapAllocator<TCMalloc_ThreadCache> threadheap_allocator;
2547 // Linked list of heap objects. Protected by pageheap_lock.
2548 static TCMalloc_ThreadCache* thread_heaps = NULL;
2549 static int thread_heap_count = 0;
2551 // Overall thread cache size. Protected by pageheap_lock.
2552 static size_t overall_thread_cache_size = kDefaultOverallThreadCacheSize;
2554 // Global per-thread cache size. Writes are protected by
2555 // pageheap_lock. Reads are done without any locking, which should be
2556 // fine as long as size_t can be written atomically and we don't place
2557 // invariants between this variable and other pieces of state.
2558 static volatile size_t per_thread_cache_size = kMaxThreadCacheSize;
2560 //-------------------------------------------------------------------
2561 // Central cache implementation
2562 //-------------------------------------------------------------------
2564 void TCMalloc_Central_FreeList::Init(size_t cl) {
2568 DLL_Init(&nonempty_);
2573 ASSERT(cache_size_ <= kNumTransferEntries);
2576 void TCMalloc_Central_FreeList::ReleaseListToSpans(void* start) {
2578 void *next = SLL_Next(start);
2579 ReleaseToSpans(start);
2584 ALWAYS_INLINE void TCMalloc_Central_FreeList::ReleaseToSpans(void* object) {
2585 const PageID p = reinterpret_cast<uintptr_t>(object) >> kPageShift;
2586 Span* span = pageheap->GetDescriptor(p);
2587 ASSERT(span != NULL);
2588 ASSERT(span->refcount > 0);
2590 // If span is empty, move it to non-empty list
2591 if (span->objects == NULL) {
2593 DLL_Prepend(&nonempty_, span);
2594 Event(span, 'N', 0);
2597 // The following check is expensive, so it is disabled by default
2599 // Check that object does not occur in list
2601 for (void* p = span->objects; p != NULL; p = *((void**) p)) {
2602 ASSERT(p != object);
2605 ASSERT(got + span->refcount ==
2606 (span->length<<kPageShift)/ByteSizeForClass(span->sizeclass));
2611 if (span->refcount == 0) {
2612 Event(span, '#', 0);
2613 counter_ -= (span->length<<kPageShift) / ByteSizeForClass(span->sizeclass);
2616 // Release central list lock while operating on pageheap
2619 SpinLockHolder h(&pageheap_lock);
2620 pageheap->Delete(span);
2624 *(reinterpret_cast<void**>(object)) = span->objects;
2625 span->objects = object;
2629 ALWAYS_INLINE bool TCMalloc_Central_FreeList::EvictRandomSizeClass(
2630 size_t locked_size_class, bool force) {
2631 static int race_counter = 0;
2632 int t = race_counter++; // Updated without a lock, but who cares.
2633 if (t >= static_cast<int>(kNumClasses)) {
2634 while (t >= static_cast<int>(kNumClasses)) {
2640 ASSERT(t < static_cast<int>(kNumClasses));
2641 if (t == static_cast<int>(locked_size_class)) return false;
2642 return central_cache[t].ShrinkCache(static_cast<int>(locked_size_class), force);
2645 bool TCMalloc_Central_FreeList::MakeCacheSpace() {
2646 // Is there room in the cache?
2647 if (used_slots_ < cache_size_) return true;
2648 // Check if we can expand this cache?
2649 if (cache_size_ == kNumTransferEntries) return false;
2650 // Ok, we'll try to grab an entry from some other size class.
2651 if (EvictRandomSizeClass(size_class_, false) ||
2652 EvictRandomSizeClass(size_class_, true)) {
2653 // Succeeded in evicting, we're going to make our cache larger.
2662 class LockInverter {
2664 SpinLock *held_, *temp_;
2666 inline explicit LockInverter(SpinLock* held, SpinLock *temp)
2667 : held_(held), temp_(temp) { held_->Unlock(); temp_->Lock(); }
2668 inline ~LockInverter() { temp_->Unlock(); held_->Lock(); }
2672 bool TCMalloc_Central_FreeList::ShrinkCache(int locked_size_class, bool force) {
2673 // Start with a quick check without taking a lock.
2674 if (cache_size_ == 0) return false;
2675 // We don't evict from a full cache unless we are 'forcing'.
2676 if (force == false && used_slots_ == cache_size_) return false;
2678 // Grab lock, but first release the other lock held by this thread. We use
2679 // the lock inverter to ensure that we never hold two size class locks
2680 // concurrently. That can create a deadlock because there is no well
2681 // defined nesting order.
2682 LockInverter li(¢ral_cache[locked_size_class].lock_, &lock_);
2683 ASSERT(used_slots_ <= cache_size_);
2684 ASSERT(0 <= cache_size_);
2685 if (cache_size_ == 0) return false;
2686 if (used_slots_ == cache_size_) {
2687 if (force == false) return false;
2688 // ReleaseListToSpans releases the lock, so we have to make all the
2689 // updates to the central list before calling it.
2692 ReleaseListToSpans(tc_slots_[used_slots_].head);
2699 void TCMalloc_Central_FreeList::InsertRange(void *start, void *end, int N) {
2700 SpinLockHolder h(&lock_);
2701 if (N == num_objects_to_move[size_class_] &&
2703 int slot = used_slots_++;
2705 ASSERT(slot < kNumTransferEntries);
2706 TCEntry *entry = &tc_slots_[slot];
2707 entry->head = start;
2711 ReleaseListToSpans(start);
2714 void TCMalloc_Central_FreeList::RemoveRange(void **start, void **end, int *N) {
2718 SpinLockHolder h(&lock_);
2719 if (num == num_objects_to_move[size_class_] && used_slots_ > 0) {
2720 int slot = --used_slots_;
2722 TCEntry *entry = &tc_slots_[slot];
2723 *start = entry->head;
2728 // TODO: Prefetch multiple TCEntries?
2729 void *tail = FetchFromSpansSafe();
2731 // We are completely out of memory.
2732 *start = *end = NULL;
2737 SLL_SetNext(tail, NULL);
2740 while (count < num) {
2741 void *t = FetchFromSpans();
2752 void* TCMalloc_Central_FreeList::FetchFromSpansSafe() {
2753 void *t = FetchFromSpans();
2756 t = FetchFromSpans();
2761 void* TCMalloc_Central_FreeList::FetchFromSpans() {
2762 if (DLL_IsEmpty(&nonempty_)) return NULL;
2763 Span* span = nonempty_.next;
2765 ASSERT(span->objects != NULL);
2766 ASSERT_SPAN_COMMITTED(span);
2768 void* result = span->objects;
2769 span->objects = *(reinterpret_cast<void**>(result));
2770 if (span->objects == NULL) {
2771 // Move to empty list
2773 DLL_Prepend(&empty_, span);
2774 Event(span, 'E', 0);
2780 // Fetch memory from the system and add to the central cache freelist.
2781 ALWAYS_INLINE void TCMalloc_Central_FreeList::Populate() {
2782 // Release central list lock while operating on pageheap
2784 const size_t npages = class_to_pages[size_class_];
2788 SpinLockHolder h(&pageheap_lock);
2789 span = pageheap->New(npages);
2790 if (span) pageheap->RegisterSizeClass(span, size_class_);
2794 MESSAGE("allocation failed: %d\n", errno);
2796 MESSAGE("allocation failed: %d\n", ::GetLastError());
2798 MESSAGE("allocation failed\n");
2803 ASSERT_SPAN_COMMITTED(span);
2804 ASSERT(span->length == npages);
2805 // Cache sizeclass info eagerly. Locking is not necessary.
2806 // (Instead of being eager, we could just replace any stale info
2807 // about this span, but that seems to be no better in practice.)
2808 for (size_t i = 0; i < npages; i++) {
2809 pageheap->CacheSizeClass(span->start + i, size_class_);
2812 // Split the block into pieces and add to the free-list
2813 // TODO: coloring of objects to avoid cache conflicts?
2814 void** tail = &span->objects;
2815 char* ptr = reinterpret_cast<char*>(span->start << kPageShift);
2816 char* limit = ptr + (npages << kPageShift);
2817 const size_t size = ByteSizeForClass(size_class_);
2820 while ((nptr = ptr + size) <= limit) {
2822 tail = reinterpret_cast_ptr<void**>(ptr);
2826 ASSERT(ptr <= limit);
2828 span->refcount = 0; // No sub-object in use yet
2830 // Add span to list of non-empty spans
2832 DLL_Prepend(&nonempty_, span);
2836 //-------------------------------------------------------------------
2837 // TCMalloc_ThreadCache implementation
2838 //-------------------------------------------------------------------
2840 inline bool TCMalloc_ThreadCache::SampleAllocation(size_t k) {
2841 if (bytes_until_sample_ < k) {
2845 bytes_until_sample_ -= k;
2850 void TCMalloc_ThreadCache::Init(ThreadIdentifier tid) {
2855 in_setspecific_ = false;
2856 for (size_t cl = 0; cl < kNumClasses; ++cl) {
2860 // Initialize RNG -- run it for a bit to get to good values
2861 bytes_until_sample_ = 0;
2862 rnd_ = static_cast<uint32_t>(reinterpret_cast<uintptr_t>(this));
2863 for (int i = 0; i < 100; i++) {
2864 PickNextSample(static_cast<size_t>(FLAGS_tcmalloc_sample_parameter * 2));
2868 void TCMalloc_ThreadCache::Cleanup() {
2869 // Put unused memory back into central cache
2870 for (size_t cl = 0; cl < kNumClasses; ++cl) {
2871 if (list_[cl].length() > 0) {
2872 ReleaseToCentralCache(cl, list_[cl].length());
2877 ALWAYS_INLINE void* TCMalloc_ThreadCache::Allocate(size_t size) {
2878 ASSERT(size <= kMaxSize);
2879 const size_t cl = SizeClass(size);
2880 FreeList* list = &list_[cl];
2881 size_t allocationSize = ByteSizeForClass(cl);
2882 if (list->empty()) {
2883 FetchFromCentralCache(cl, allocationSize);
2884 if (list->empty()) return NULL;
2886 size_ -= allocationSize;
2890 inline void TCMalloc_ThreadCache::Deallocate(void* ptr, size_t cl) {
2891 size_ += ByteSizeForClass(cl);
2892 FreeList* list = &list_[cl];
2894 // If enough data is free, put back into central cache
2895 if (list->length() > kMaxFreeListLength) {
2896 ReleaseToCentralCache(cl, num_objects_to_move[cl]);
2898 if (size_ >= per_thread_cache_size) Scavenge();
2901 // Remove some objects of class "cl" from central cache and add to thread heap
2902 ALWAYS_INLINE void TCMalloc_ThreadCache::FetchFromCentralCache(size_t cl, size_t allocationSize) {
2903 int fetch_count = num_objects_to_move[cl];
2905 central_cache[cl].RemoveRange(&start, &end, &fetch_count);
2906 list_[cl].PushRange(fetch_count, start, end);
2907 size_ += allocationSize * fetch_count;
2910 // Remove some objects of class "cl" from thread heap and add to central cache
2911 inline void TCMalloc_ThreadCache::ReleaseToCentralCache(size_t cl, int N) {
2913 FreeList* src = &list_[cl];
2914 if (N > src->length()) N = src->length();
2915 size_ -= N*ByteSizeForClass(cl);
2917 // We return prepackaged chains of the correct size to the central cache.
2918 // TODO: Use the same format internally in the thread caches?
2919 int batch_size = num_objects_to_move[cl];
2920 while (N > batch_size) {
2922 src->PopRange(batch_size, &head, &tail);
2923 central_cache[cl].InsertRange(head, tail, batch_size);
2927 src->PopRange(N, &head, &tail);
2928 central_cache[cl].InsertRange(head, tail, N);
2931 // Release idle memory to the central cache
2932 inline void TCMalloc_ThreadCache::Scavenge() {
2933 // If the low-water mark for the free list is L, it means we would
2934 // not have had to allocate anything from the central cache even if
2935 // we had reduced the free list size by L. We aim to get closer to
2936 // that situation by dropping L/2 nodes from the free list. This
2937 // may not release much memory, but if so we will call scavenge again
2938 // pretty soon and the low-water marks will be high on that call.
2939 //int64 start = CycleClock::Now();
2941 for (size_t cl = 0; cl < kNumClasses; cl++) {
2942 FreeList* list = &list_[cl];
2943 const int lowmark = list->lowwatermark();
2945 const int drop = (lowmark > 1) ? lowmark/2 : 1;
2946 ReleaseToCentralCache(cl, drop);
2948 list->clear_lowwatermark();
2951 //int64 finish = CycleClock::Now();
2953 //MESSAGE("GC: %.0f ns\n", ct.CyclesToUsec(finish-start)*1000.0);
2956 void TCMalloc_ThreadCache::PickNextSample(size_t k) {
2957 // Make next "random" number
2958 // x^32+x^22+x^2+x^1+1 is a primitive polynomial for random numbers
2959 static const uint32_t kPoly = (1 << 22) | (1 << 2) | (1 << 1) | (1 << 0);
2961 rnd_ = (r << 1) ^ ((static_cast<int32_t>(r) >> 31) & kPoly);
2963 // Next point is "rnd_ % (sample_period)". I.e., average
2964 // increment is "sample_period/2".
2965 const int flag_value = static_cast<int>(FLAGS_tcmalloc_sample_parameter);
2966 static int last_flag_value = -1;
2968 if (flag_value != last_flag_value) {
2969 SpinLockHolder h(&sample_period_lock);
2971 for (i = 0; i < (static_cast<int>(sizeof(primes_list)/sizeof(primes_list[0])) - 1); i++) {
2972 if (primes_list[i] >= flag_value) {
2976 sample_period = primes_list[i];
2977 last_flag_value = flag_value;
2980 bytes_until_sample_ += rnd_ % sample_period;
2982 if (k > (static_cast<size_t>(-1) >> 2)) {
2983 // If the user has asked for a huge allocation then it is possible
2984 // for the code below to loop infinitely. Just return (note that
2985 // this throws off the sampling accuracy somewhat, but a user who
2986 // is allocating more than 1G of memory at a time can live with a
2987 // minor inaccuracy in profiling of small allocations, and also
2988 // would rather not wait for the loop below to terminate).
2992 while (bytes_until_sample_ < k) {
2993 // Increase bytes_until_sample_ by enough average sampling periods
2994 // (sample_period >> 1) to allow us to sample past the current
2996 bytes_until_sample_ += (sample_period >> 1);
2999 bytes_until_sample_ -= k;
3002 void TCMalloc_ThreadCache::InitModule() {
3003 // There is a slight potential race here because of double-checked
3004 // locking idiom. However, as long as the program does a small
3005 // allocation before switching to multi-threaded mode, we will be
3006 // fine. We increase the chances of doing such a small allocation
3007 // by doing one in the constructor of the module_enter_exit_hook
3008 // object declared below.
3009 SpinLockHolder h(&pageheap_lock);
3015 threadheap_allocator.Init();
3016 span_allocator.Init();
3017 span_allocator.New(); // Reduce cache conflicts
3018 span_allocator.New(); // Reduce cache conflicts
3019 stacktrace_allocator.Init();
3020 DLL_Init(&sampled_objects);
3021 for (size_t i = 0; i < kNumClasses; ++i) {
3022 central_cache[i].Init(i);
3026 #if defined(WTF_CHANGES) && OS(DARWIN)
3027 FastMallocZone::init();
3032 inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::NewHeap(ThreadIdentifier tid) {
3033 // Create the heap and add it to the linked list
3034 TCMalloc_ThreadCache *heap = threadheap_allocator.New();
3036 heap->next_ = thread_heaps;
3038 if (thread_heaps != NULL) thread_heaps->prev_ = heap;
3039 thread_heaps = heap;
3040 thread_heap_count++;
3041 RecomputeThreadCacheSize();
3045 inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetThreadHeap() {
3047 // __thread is faster, but only when the kernel supports it
3048 if (KernelSupportsTLS())
3049 return threadlocal_heap;
3051 return static_cast<TCMalloc_ThreadCache*>(TlsGetValue(tlsIndex));
3052 #elif defined(__PTK_FRAMEWORK_JAVASCRIPTCORE_KEY0)
3053 return static_cast<TCMalloc_ThreadCache*>(_pthread_getspecific_direct(__PTK_FRAMEWORK_JAVASCRIPTCORE_KEY0));
3055 return static_cast<TCMalloc_ThreadCache*>(pthread_getspecific(heap_key));
3059 inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCache() {
3060 TCMalloc_ThreadCache* ptr = NULL;
3064 ptr = GetThreadHeap();
3066 if (ptr == NULL) ptr = CreateCacheIfNecessary();
3070 // In deletion paths, we do not try to create a thread-cache. This is
3071 // because we may be in the thread destruction code and may have
3072 // already cleaned up the cache for this thread.
3073 inline TCMalloc_ThreadCache* TCMalloc_ThreadCache::GetCacheIfPresent() {
3074 if (!tsd_inited) return NULL;
3075 void* const p = GetThreadHeap();
3076 return reinterpret_cast<TCMalloc_ThreadCache*>(p);
3079 void TCMalloc_ThreadCache::InitTSD() {
3080 ASSERT(!tsd_inited);
3081 pthread_key_create(&heap_key, DestroyThreadCache);
3083 tlsIndex = TlsAlloc();
3088 // We may have used a fake pthread_t for the main thread. Fix it.
3090 memset(&zero, 0, sizeof(zero));
3093 SpinLockHolder h(&pageheap_lock);
3095 ASSERT(pageheap_lock.IsHeld());
3097 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
3100 h->tid_ = GetCurrentThreadId();
3103 if (pthread_equal(h->tid_, zero)) {
3104 h->tid_ = pthread_self();
3110 TCMalloc_ThreadCache* TCMalloc_ThreadCache::CreateCacheIfNecessary() {
3111 // Initialize per-thread data if necessary
3112 TCMalloc_ThreadCache* heap = NULL;
3114 SpinLockHolder h(&pageheap_lock);
3121 me = GetCurrentThreadId();
3124 // Early on in glibc's life, we cannot even call pthread_self()
3127 memset(&me, 0, sizeof(me));
3129 me = pthread_self();
3133 // This may be a recursive malloc call from pthread_setspecific()
3134 // In that case, the heap for this thread has already been created
3135 // and added to the linked list. So we search for that first.
3136 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
3138 if (h->tid_ == me) {
3140 if (pthread_equal(h->tid_, me)) {
3147 if (heap == NULL) heap = NewHeap(me);
3150 // We call pthread_setspecific() outside the lock because it may
3151 // call malloc() recursively. The recursive call will never get
3152 // here again because it will find the already allocated heap in the
3153 // linked list of heaps.
3154 if (!heap->in_setspecific_ && tsd_inited) {
3155 heap->in_setspecific_ = true;
3156 setThreadHeap(heap);
3161 void TCMalloc_ThreadCache::BecomeIdle() {
3162 if (!tsd_inited) return; // No caches yet
3163 TCMalloc_ThreadCache* heap = GetThreadHeap();
3164 if (heap == NULL) return; // No thread cache to remove
3165 if (heap->in_setspecific_) return; // Do not disturb the active caller
3167 heap->in_setspecific_ = true;
3168 setThreadHeap(NULL);
3170 // Also update the copy in __thread
3171 threadlocal_heap = NULL;
3173 heap->in_setspecific_ = false;
3174 if (GetThreadHeap() == heap) {
3175 // Somehow heap got reinstated by a recursive call to malloc
3176 // from pthread_setspecific. We give up in this case.
3180 // We can now get rid of the heap
3184 void TCMalloc_ThreadCache::DestroyThreadCache(void* ptr) {
3185 // Note that "ptr" cannot be NULL since pthread promises not
3186 // to invoke the destructor on NULL values, but for safety,
3188 if (ptr == NULL) return;
3190 // Prevent fast path of GetThreadHeap() from returning heap.
3191 threadlocal_heap = NULL;
3193 DeleteCache(reinterpret_cast<TCMalloc_ThreadCache*>(ptr));
3196 void TCMalloc_ThreadCache::DeleteCache(TCMalloc_ThreadCache* heap) {
3197 // Remove all memory from heap
3200 // Remove from linked list
3201 SpinLockHolder h(&pageheap_lock);
3202 if (heap->next_ != NULL) heap->next_->prev_ = heap->prev_;
3203 if (heap->prev_ != NULL) heap->prev_->next_ = heap->next_;
3204 if (thread_heaps == heap) thread_heaps = heap->next_;
3205 thread_heap_count--;
3206 RecomputeThreadCacheSize();
3208 threadheap_allocator.Delete(heap);
3211 void TCMalloc_ThreadCache::RecomputeThreadCacheSize() {
3212 // Divide available space across threads
3213 int n = thread_heap_count > 0 ? thread_heap_count : 1;
3214 size_t space = overall_thread_cache_size / n;
3216 // Limit to allowed range
3217 if (space < kMinThreadCacheSize) space = kMinThreadCacheSize;
3218 if (space > kMaxThreadCacheSize) space = kMaxThreadCacheSize;
3220 per_thread_cache_size = space;
3223 void TCMalloc_ThreadCache::Print() const {
3224 for (size_t cl = 0; cl < kNumClasses; ++cl) {
3225 MESSAGE(" %5" PRIuS " : %4d len; %4d lo\n",
3226 ByteSizeForClass(cl),
3228 list_[cl].lowwatermark());
3232 // Extract interesting stats
3233 struct TCMallocStats {
3234 uint64_t system_bytes; // Bytes alloced from system
3235 uint64_t thread_bytes; // Bytes in thread caches
3236 uint64_t central_bytes; // Bytes in central cache
3237 uint64_t transfer_bytes; // Bytes in central transfer cache
3238 uint64_t pageheap_bytes; // Bytes in page heap
3239 uint64_t metadata_bytes; // Bytes alloced for metadata
3243 // Get stats into "r". Also get per-size-class counts if class_count != NULL
3244 static void ExtractStats(TCMallocStats* r, uint64_t* class_count) {
3245 r->central_bytes = 0;
3246 r->transfer_bytes = 0;
3247 for (int cl = 0; cl < kNumClasses; ++cl) {
3248 const int length = central_cache[cl].length();
3249 const int tc_length = central_cache[cl].tc_length();
3250 r->central_bytes += static_cast<uint64_t>(ByteSizeForClass(cl)) * length;
3251 r->transfer_bytes +=
3252 static_cast<uint64_t>(ByteSizeForClass(cl)) * tc_length;
3253 if (class_count) class_count[cl] = length + tc_length;
3256 // Add stats from per-thread heaps
3257 r->thread_bytes = 0;
3259 SpinLockHolder h(&pageheap_lock);
3260 for (TCMalloc_ThreadCache* h = thread_heaps; h != NULL; h = h->next_) {
3261 r->thread_bytes += h->Size();
3263 for (size_t cl = 0; cl < kNumClasses; ++cl) {
3264 class_count[cl] += h->freelist_length(cl);
3271 SpinLockHolder h(&pageheap_lock);
3272 r->system_bytes = pageheap->SystemBytes();
3273 r->metadata_bytes = metadata_system_bytes;
3274 r->pageheap_bytes = pageheap->FreeBytes();
3280 // WRITE stats to "out"
3281 static void DumpStats(TCMalloc_Printer* out, int level) {
3282 TCMallocStats stats;
3283 uint64_t class_count[kNumClasses];
3284 ExtractStats(&stats, (level >= 2 ? class_count : NULL));
3287 out->printf("------------------------------------------------\n");
3288 uint64_t cumulative = 0;
3289 for (int cl = 0; cl < kNumClasses; ++cl) {
3290 if (class_count[cl] > 0) {
3291 uint64_t class_bytes = class_count[cl] * ByteSizeForClass(cl);
3292 cumulative += class_bytes;
3293 out->printf("class %3d [ %8" PRIuS " bytes ] : "
3294 "%8" PRIu64 " objs; %5.1f MB; %5.1f cum MB\n",
3295 cl, ByteSizeForClass(cl),
3297 class_bytes / 1048576.0,
3298 cumulative / 1048576.0);
3302 SpinLockHolder h(&pageheap_lock);
3303 pageheap->Dump(out);
3306 const uint64_t bytes_in_use = stats.system_bytes
3307 - stats.pageheap_bytes
3308 - stats.central_bytes
3309 - stats.transfer_bytes
3310 - stats.thread_bytes;
3312 out->printf("------------------------------------------------\n"
3313 "MALLOC: %12" PRIu64 " Heap size\n"
3314 "MALLOC: %12" PRIu64 " Bytes in use by application\n"
3315 "MALLOC: %12" PRIu64 " Bytes free in page heap\n"
3316 "MALLOC: %12" PRIu64 " Bytes free in central cache\n"
3317 "MALLOC: %12" PRIu64 " Bytes free in transfer cache\n"
3318 "MALLOC: %12" PRIu64 " Bytes free in thread caches\n"
3319 "MALLOC: %12" PRIu64 " Spans in use\n"
3320 "MALLOC: %12" PRIu64 " Thread heaps in use\n"
3321 "MALLOC: %12" PRIu64 " Metadata allocated\n"
3322 "------------------------------------------------\n",
3325 stats.pageheap_bytes,
3326 stats.central_bytes,
3327 stats.transfer_bytes,
3329 uint64_t(span_allocator.inuse()),
3330 uint64_t(threadheap_allocator.inuse()),
3331 stats.metadata_bytes);
3334 static void PrintStats(int level) {
3335 const int kBufferSize = 16 << 10;
3336 char* buffer = new char[kBufferSize];
3337 TCMalloc_Printer printer(buffer, kBufferSize);
3338 DumpStats(&printer, level);
3339 write(STDERR_FILENO, buffer, strlen(buffer));
3343 static void** DumpStackTraces() {
3344 // Count how much space we need
3345 int needed_slots = 0;
3347 SpinLockHolder h(&pageheap_lock);
3348 for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
3349 StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
3350 needed_slots += 3 + stack->depth;
3352 needed_slots += 100; // Slop in case sample grows
3353 needed_slots += needed_slots/8; // An extra 12.5% slop
3356 void** result = new void*[needed_slots];
3357 if (result == NULL) {
3358 MESSAGE("tcmalloc: could not allocate %d slots for stack traces\n",
3363 SpinLockHolder h(&pageheap_lock);
3365 for (Span* s = sampled_objects.next; s != &sampled_objects; s = s->next) {
3366 ASSERT(used_slots < needed_slots); // Need to leave room for terminator
3367 StackTrace* stack = reinterpret_cast<StackTrace*>(s->objects);
3368 if (used_slots + 3 + stack->depth >= needed_slots) {
3373 result[used_slots+0] = reinterpret_cast<void*>(static_cast<uintptr_t>(1));
3374 result[used_slots+1] = reinterpret_cast<void*>(stack->size);
3375 result[used_slots+2] = reinterpret_cast<void*>(stack->depth);
3376 for (int d = 0; d < stack->depth; d++) {
3377 result[used_slots+3+d] = stack->stack[d];
3379 used_slots += 3 + stack->depth;
3381 result[used_slots] = reinterpret_cast<void*>(static_cast<uintptr_t>(0));
3388 // TCMalloc's support for extra malloc interfaces
3389 class TCMallocImplementation : public MallocExtension {
3391 virtual void GetStats(char* buffer, int buffer_length) {
3392 ASSERT(buffer_length > 0);
3393 TCMalloc_Printer printer(buffer, buffer_length);
3395 // Print level one stats unless lots of space is available
3396 if (buffer_length < 10000) {
3397 DumpStats(&printer, 1);
3399 DumpStats(&printer, 2);
3403 virtual void** ReadStackTraces() {
3404 return DumpStackTraces();
3407 virtual bool GetNumericProperty(const char* name, size_t* value) {
3408 ASSERT(name != NULL);
3410 if (strcmp(name, "generic.current_allocated_bytes") == 0) {
3411 TCMallocStats stats;
3412 ExtractStats(&stats, NULL);
3413 *value = stats.system_bytes
3414 - stats.thread_bytes
3415 - stats.central_bytes
3416 - stats.pageheap_bytes;
3420 if (strcmp(name, "generic.heap_size") == 0) {
3421 TCMallocStats stats;
3422 ExtractStats(&stats, NULL);
3423 *value = stats.system_bytes;
3427 if (strcmp(name, "tcmalloc.slack_bytes") == 0) {
3428 // We assume that bytes in the page heap are not fragmented too
3429 // badly, and are therefore available for allocation.
3430 SpinLockHolder l(&pageheap_lock);
3431 *value = pageheap->FreeBytes();
3435 if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
3436 SpinLockHolder l(&pageheap_lock);
3437 *value = overall_thread_cache_size;
3441 if (strcmp(name, "tcmalloc.current_total_thread_cache_bytes") == 0) {
3442 TCMallocStats stats;
3443 ExtractStats(&stats, NULL);
3444 *value = stats.thread_bytes;
3451 virtual bool SetNumericProperty(const char* name, size_t value) {
3452 ASSERT(name != NULL);
3454 if (strcmp(name, "tcmalloc.max_total_thread_cache_bytes") == 0) {
3455 // Clip the value to a reasonable range
3456 if (value < kMinThreadCacheSize) value = kMinThreadCacheSize;
3457 if (value > (1<<30)) value = (1<<30); // Limit to 1GB
3459 SpinLockHolder l(&pageheap_lock);
3460 overall_thread_cache_size = static_cast<size_t>(value);
3461 TCMalloc_ThreadCache::RecomputeThreadCacheSize();
3468 virtual void MarkThreadIdle() {
3469 TCMalloc_ThreadCache::BecomeIdle();
3472 virtual void ReleaseFreeMemory() {
3473 SpinLockHolder h(&pageheap_lock);
3474 pageheap->ReleaseFreePages();
3479 // The constructor allocates an object to ensure that initialization
3480 // runs before main(), and therefore we do not have a chance to become
3481 // multi-threaded before initialization. We also create the TSD key
3482 // here. Presumably by the time this constructor runs, glibc is in
3483 // good enough shape to handle pthread_key_create().
3485 // The constructor also takes the opportunity to tell STL to use
3486 // tcmalloc. We want to do this early, before construct time, so
3487 // all user STL allocations go through tcmalloc (which works really
3490 // The destructor prints stats when the program exits.
3491 class TCMallocGuard {
3495 #ifdef HAVE_TLS // this is true if the cc/ld/libc combo support TLS
3496 // Check whether the kernel also supports TLS (needs to happen at runtime)
3497 CheckIfKernelSupportsTLS();
3500 #ifdef WIN32 // patch the windows VirtualAlloc, etc.
3501 PatchWindowsFunctions(); // defined in windows/patch_functions.cc
3505 TCMalloc_ThreadCache::InitTSD();
3508 MallocExtension::Register(new TCMallocImplementation);
3514 const char* env = getenv("MALLOCSTATS");
3516 int level = atoi(env);
3517 if (level < 1) level = 1;
3521 UnpatchWindowsFunctions();
3528 static TCMallocGuard module_enter_exit_hook;
3532 //-------------------------------------------------------------------
3533 // Helpers for the exported routines below
3534 //-------------------------------------------------------------------
3538 static Span* DoSampledAllocation(size_t size) {
3540 // Grab the stack trace outside the heap lock
3542 tmp.depth = GetStackTrace(tmp.stack, kMaxStackDepth, 1);
3545 SpinLockHolder h(&pageheap_lock);
3547 Span *span = pageheap->New(pages(size == 0 ? 1 : size));
3552 // Allocate stack trace
3553 StackTrace *stack = stacktrace_allocator.New();
3554 if (stack == NULL) {
3555 // Sampling failed because of lack of memory
3561 span->objects = stack;
3562 DLL_Prepend(&sampled_objects, span);
3568 static inline bool CheckCachedSizeClass(void *ptr) {
3569 PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
3570 size_t cached_value = pageheap->GetSizeClassIfCached(p);
3571 return cached_value == 0 ||
3572 cached_value == pageheap->GetDescriptor(p)->sizeclass;
3575 static inline void* CheckedMallocResult(void *result)
3577 ASSERT(result == 0 || CheckCachedSizeClass(result));
3581 static inline void* SpanToMallocResult(Span *span) {
3582 ASSERT_SPAN_COMMITTED(span);
3583 pageheap->CacheSizeClass(span->start, 0);
3585 CheckedMallocResult(reinterpret_cast<void*>(span->start << kPageShift));
3589 template <bool crashOnFailure>
3591 static ALWAYS_INLINE void* do_malloc(size_t size) {
3595 ASSERT(!isForbidden());
3598 // The following call forces module initialization
3599 TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
3601 if ((FLAGS_tcmalloc_sample_parameter > 0) && heap->SampleAllocation(size)) {
3602 Span* span = DoSampledAllocation(size);
3604 ret = SpanToMallocResult(span);
3608 if (size > kMaxSize) {
3609 // Use page-level allocator
3610 SpinLockHolder h(&pageheap_lock);
3611 Span* span = pageheap->New(pages(size));
3613 ret = SpanToMallocResult(span);
3616 // The common case, and also the simplest. This just pops the
3617 // size-appropriate freelist, afer replenishing it if it's empty.
3618 ret = CheckedMallocResult(heap->Allocate(size));
3622 if (crashOnFailure) // This branch should be optimized out by the compiler.
3631 static ALWAYS_INLINE void do_free(void* ptr) {
3632 if (ptr == NULL) return;
3633 ASSERT(pageheap != NULL); // Should not call free() before malloc()
3634 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
3636 size_t cl = pageheap->GetSizeClassIfCached(p);
3639 span = pageheap->GetDescriptor(p);
3640 cl = span->sizeclass;
3641 pageheap->CacheSizeClass(p, cl);
3644 #ifndef NO_TCMALLOC_SAMPLES
3645 ASSERT(!pageheap->GetDescriptor(p)->sample);
3647 TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCacheIfPresent();
3649 heap->Deallocate(ptr, cl);
3651 // Delete directly into central cache
3652 SLL_SetNext(ptr, NULL);
3653 central_cache[cl].InsertRange(ptr, ptr, 1);
3656 SpinLockHolder h(&pageheap_lock);
3657 ASSERT(reinterpret_cast<uintptr_t>(ptr) % kPageSize == 0);
3658 ASSERT(span != NULL && span->start == p);
3659 #ifndef NO_TCMALLOC_SAMPLES
3662 stacktrace_allocator.Delete(reinterpret_cast<StackTrace*>(span->objects));
3663 span->objects = NULL;
3666 pageheap->Delete(span);
3671 // For use by exported routines below that want specific alignments
3673 // Note: this code can be slow, and can significantly fragment memory.
3674 // The expectation is that memalign/posix_memalign/valloc/pvalloc will
3675 // not be invoked very often. This requirement simplifies our
3676 // implementation and allows us to tune for expected allocation
3678 static void* do_memalign(size_t align, size_t size) {
3679 ASSERT((align & (align - 1)) == 0);
3681 if (pageheap == NULL) TCMalloc_ThreadCache::InitModule();
3683 // Allocate at least one byte to avoid boundary conditions below
3684 if (size == 0) size = 1;
3686 if (size <= kMaxSize && align < kPageSize) {
3687 // Search through acceptable size classes looking for one with
3688 // enough alignment. This depends on the fact that
3689 // InitSizeClasses() currently produces several size classes that
3690 // are aligned at powers of two. We will waste time and space if
3691 // we miss in the size class array, but that is deemed acceptable
3692 // since memalign() should be used rarely.
3693 size_t cl = SizeClass(size);
3694 while (cl < kNumClasses && ((class_to_size[cl] & (align - 1)) != 0)) {
3697 if (cl < kNumClasses) {
3698 TCMalloc_ThreadCache* heap = TCMalloc_ThreadCache::GetCache();
3699 return CheckedMallocResult(heap->Allocate(class_to_size[cl]));
3703 // We will allocate directly from the page heap
3704 SpinLockHolder h(&pageheap_lock);
3706 if (align <= kPageSize) {
3707 // Any page-level allocation will be fine
3708 // TODO: We could put the rest of this page in the appropriate
3709 // TODO: cache but it does not seem worth it.
3710 Span* span = pageheap->New(pages(size));
3711 return span == NULL ? NULL : SpanToMallocResult(span);
3714 // Allocate extra pages and carve off an aligned portion
3715 const Length alloc = pages(size + align);
3716 Span* span = pageheap->New(alloc);
3717 if (span == NULL) return NULL;
3719 // Skip starting portion so that we end up aligned
3721 while ((((span->start+skip) << kPageShift) & (align - 1)) != 0) {
3724 ASSERT(skip < alloc);
3726 Span* rest = pageheap->Split(span, skip);
3727 pageheap->Delete(span);
3731 // Skip trailing portion that we do not need to return
3732 const Length needed = pages(size);
3733 ASSERT(span->length >= needed);
3734 if (span->length > needed) {
3735 Span* trailer = pageheap->Split(span, needed);
3736 pageheap->Delete(trailer);
3738 return SpanToMallocResult(span);
3742 // Helpers for use by exported routines below:
3745 static inline void do_malloc_stats() {
3750 static inline int do_mallopt(int, int) {
3751 return 1; // Indicates error
3754 #ifdef HAVE_STRUCT_MALLINFO // mallinfo isn't defined on freebsd, for instance
3755 static inline struct mallinfo do_mallinfo() {
3756 TCMallocStats stats;
3757 ExtractStats(&stats, NULL);
3759 // Just some of the fields are filled in.
3760 struct mallinfo info;
3761 memset(&info, 0, sizeof(info));
3763 // Unfortunately, the struct contains "int" field, so some of the
3764 // size values will be truncated.
3765 info.arena = static_cast<int>(stats.system_bytes);
3766 info.fsmblks = static_cast<int>(stats.thread_bytes
3767 + stats.central_bytes
3768 + stats.transfer_bytes);
3769 info.fordblks = static_cast<int>(stats.pageheap_bytes);
3770 info.uordblks = static_cast<int>(stats.system_bytes
3771 - stats.thread_bytes
3772 - stats.central_bytes
3773 - stats.transfer_bytes
3774 - stats.pageheap_bytes);
3780 //-------------------------------------------------------------------
3781 // Exported routines
3782 //-------------------------------------------------------------------
3784 // CAVEAT: The code structure below ensures that MallocHook methods are always
3785 // called from the stack frame of the invoked allocation function.
3786 // heap-checker.cc depends on this to start a stack trace from
3787 // the call to the (de)allocation function.
3792 #define do_malloc do_malloc<crashOnFailure>
3794 template <bool crashOnFailure>
3795 ALWAYS_INLINE void* malloc(size_t);
3797 void* fastMalloc(size_t size)
3799 return malloc<true>(size);
3802 TryMallocReturnValue tryFastMalloc(size_t size)
3804 return malloc<false>(size);
3807 template <bool crashOnFailure>
3810 void* malloc(size_t size) {
3811 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3812 if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= size) // If overflow would occur...
3814 size += sizeof(AllocAlignmentInteger);
3815 void* result = do_malloc(size);
3819 *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
3820 result = static_cast<AllocAlignmentInteger*>(result) + 1;
3822 void* result = do_malloc(size);
3826 MallocHook::InvokeNewHook(result, size);
3834 void free(void* ptr) {
3836 MallocHook::InvokeDeleteHook(ptr);
3839 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3843 AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(ptr);
3844 if (*header != Internal::AllocTypeMalloc)
3845 Internal::fastMallocMatchFailed(ptr);
3855 template <bool crashOnFailure>
3856 ALWAYS_INLINE void* calloc(size_t, size_t);
3858 void* fastCalloc(size_t n, size_t elem_size)
3860 return calloc<true>(n, elem_size);
3863 TryMallocReturnValue tryFastCalloc(size_t n, size_t elem_size)
3865 return calloc<false>(n, elem_size);
3868 template <bool crashOnFailure>
3871 void* calloc(size_t n, size_t elem_size) {
3872 size_t totalBytes = n * elem_size;
3874 // Protect against overflow
3875 if (n > 1 && elem_size && (totalBytes / elem_size) != n)
3878 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3879 if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= totalBytes) // If overflow would occur...
3882 totalBytes += sizeof(AllocAlignmentInteger);
3883 void* result = do_malloc(totalBytes);
3887 memset(result, 0, totalBytes);
3888 *static_cast<AllocAlignmentInteger*>(result) = Internal::AllocTypeMalloc;
3889 result = static_cast<AllocAlignmentInteger*>(result) + 1;
3891 void* result = do_malloc(totalBytes);
3892 if (result != NULL) {
3893 memset(result, 0, totalBytes);
3898 MallocHook::InvokeNewHook(result, totalBytes);
3903 // Since cfree isn't used anywhere, we don't compile it in.
3908 void cfree(void* ptr) {
3910 MallocHook::InvokeDeleteHook(ptr);
3919 template <bool crashOnFailure>
3920 ALWAYS_INLINE void* realloc(void*, size_t);
3922 void* fastRealloc(void* old_ptr, size_t new_size)
3924 return realloc<true>(old_ptr, new_size);
3927 TryMallocReturnValue tryFastRealloc(void* old_ptr, size_t new_size)
3929 return realloc<false>(old_ptr, new_size);
3932 template <bool crashOnFailure>
3935 void* realloc(void* old_ptr, size_t new_size) {
3936 if (old_ptr == NULL) {
3937 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3938 void* result = malloc(new_size);
3940 void* result = do_malloc(new_size);
3942 MallocHook::InvokeNewHook(result, new_size);
3947 if (new_size == 0) {
3949 MallocHook::InvokeDeleteHook(old_ptr);
3955 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
3956 if (std::numeric_limits<size_t>::max() - sizeof(AllocAlignmentInteger) <= new_size) // If overflow would occur...
3958 new_size += sizeof(AllocAlignmentInteger);
3959 AllocAlignmentInteger* header = Internal::fastMallocMatchValidationValue(old_ptr);
3960 if (*header != Internal::AllocTypeMalloc)
3961 Internal::fastMallocMatchFailed(old_ptr);
3965 // Get the size of the old entry
3966 const PageID p = reinterpret_cast<uintptr_t>(old_ptr) >> kPageShift;
3967 size_t cl = pageheap->GetSizeClassIfCached(p);
3971 span = pageheap->GetDescriptor(p);
3972 cl = span->sizeclass;
3973 pageheap->CacheSizeClass(p, cl);
3976 old_size = ByteSizeForClass(cl);
3978 ASSERT(span != NULL);
3979 old_size = span->length << kPageShift;
3982 // Reallocate if the new size is larger than the old size,
3983 // or if the new size is significantly smaller than the old size.
3984 if ((new_size > old_size) || (AllocationSize(new_size) < old_size)) {
3985 // Need to reallocate
3986 void* new_ptr = do_malloc(new_size);
3987 if (new_ptr == NULL) {
3991 MallocHook::InvokeNewHook(new_ptr, new_size);
3993 memcpy(new_ptr, old_ptr, ((old_size < new_size) ? old_size : new_size));
3995 MallocHook::InvokeDeleteHook(old_ptr);
3997 // We could use a variant of do_free() that leverages the fact
3998 // that we already know the sizeclass of old_ptr. The benefit
3999 // would be small, so don't bother.
4001 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
4002 new_ptr = static_cast<AllocAlignmentInteger*>(new_ptr) + 1;
4006 #if ENABLE(FAST_MALLOC_MATCH_VALIDATION)
4007 old_ptr = static_cast<AllocAlignmentInteger*>(old_ptr) + 1; // Set old_ptr back to the user pointer.
4017 static SpinLock set_new_handler_lock = SPINLOCK_INITIALIZER;
4019 static inline void* cpp_alloc(size_t size, bool nothrow) {
4021 void* p = do_malloc(size);
4025 if (p == NULL) { // allocation failed
4026 // Get the current new handler. NB: this function is not
4027 // thread-safe. We make a feeble stab at making it so here, but
4028 // this lock only protects against tcmalloc interfering with
4029 // itself, not with other libraries calling set_new_handler.
4030 std::new_handler nh;
4032 SpinLockHolder h(&set_new_handler_lock);
4033 nh = std::set_new_handler(0);
4034 (void) std::set_new_handler(nh);
4036 // If no new_handler is established, the allocation failed.
4038 if (nothrow) return 0;
4039 throw std::bad_alloc();
4041 // Otherwise, try the new_handler. If it returns, retry the
4042 // allocation. If it throws std::bad_alloc, fail the allocation.
4043 // if it throws something else, don't interfere.
4046 } catch (const std::bad_alloc&) {
4047 if (!nothrow) throw;
4050 } else { // allocation success
4057 #if ENABLE(GLOBAL_FASTMALLOC_NEW)
4059 void* operator new(size_t size) {
4060 void* p = cpp_alloc(size, false);
4061 // We keep this next instruction out of cpp_alloc for a reason: when
4062 // it's in, and new just calls cpp_alloc, the optimizer may fold the
4063 // new call into cpp_alloc, which messes up our whole section-based
4064 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
4065 // isn't the last thing this fn calls, and prevents the folding.
4066 MallocHook::InvokeNewHook(p, size);
4070 void* operator new(size_t size, const std::nothrow_t&) __THROW {
4071 void* p = cpp_alloc(size, true);
4072 MallocHook::InvokeNewHook(p, size);
4076 void operator delete(void* p) __THROW {
4077 MallocHook::InvokeDeleteHook(p);
4081 void operator delete(void* p, const std::nothrow_t&) __THROW {
4082 MallocHook::InvokeDeleteHook(p);
4086 void* operator new[](size_t size) {
4087 void* p = cpp_alloc(size, false);
4088 // We keep this next instruction out of cpp_alloc for a reason: when
4089 // it's in, and new just calls cpp_alloc, the optimizer may fold the
4090 // new call into cpp_alloc, which messes up our whole section-based
4091 // stacktracing (see ATTRIBUTE_SECTION, above). This ensures cpp_alloc
4092 // isn't the last thing this fn calls, and prevents the folding.
4093 MallocHook::InvokeNewHook(p, size);
4097 void* operator new[](size_t size, const std::nothrow_t&) __THROW {
4098 void* p = cpp_alloc(size, true);
4099 MallocHook::InvokeNewHook(p, size);
4103 void operator delete[](void* p) __THROW {
4104 MallocHook::InvokeDeleteHook(p);
4108 void operator delete[](void* p, const std::nothrow_t&) __THROW {
4109 MallocHook::InvokeDeleteHook(p);
4115 extern "C" void* memalign(size_t align, size_t size) __THROW {
4116 void* result = do_memalign(align, size);
4117 MallocHook::InvokeNewHook(result, size);
4121 extern "C" int posix_memalign(void** result_ptr, size_t align, size_t size)
4123 if (((align % sizeof(void*)) != 0) ||
4124 ((align & (align - 1)) != 0) ||
4129 void* result = do_memalign(align, size);
4130 MallocHook::InvokeNewHook(result, size);
4131 if (result == NULL) {
4134 *result_ptr = result;
4139 static size_t pagesize = 0;
4141 extern "C" void* valloc(size_t size) __THROW {
4142 // Allocate page-aligned object of length >= size bytes
4143 if (pagesize == 0) pagesize = getpagesize();
4144 void* result = do_memalign(pagesize, size);
4145 MallocHook::InvokeNewHook(result, size);
4149 extern "C" void* pvalloc(size_t size) __THROW {
4150 // Round up size to a multiple of pagesize
4151 if (pagesize == 0) pagesize = getpagesize();
4152 size = (size + pagesize - 1) & ~(pagesize - 1);
4153 void* result = do_memalign(pagesize, size);
4154 MallocHook::InvokeNewHook(result, size);
4158 extern "C" void malloc_stats(void) {
4162 extern "C" int mallopt(int cmd, int value) {
4163 return do_mallopt(cmd, value);
4166 #ifdef HAVE_STRUCT_MALLINFO
4167 extern "C" struct mallinfo mallinfo(void) {
4168 return do_mallinfo();
4172 //-------------------------------------------------------------------
4173 // Some library routines on RedHat 9 allocate memory using malloc()
4174 // and free it using __libc_free() (or vice-versa). Since we provide
4175 // our own implementations of malloc/free, we need to make sure that
4176 // the __libc_XXX variants (defined as part of glibc) also point to
4177 // the same implementations.
4178 //-------------------------------------------------------------------
4180 #if defined(__GLIBC__)
4182 #if COMPILER(GCC) && !defined(__MACH__) && defined(HAVE___ATTRIBUTE__)
4183 // Potentially faster variants that use the gcc alias extension.
4184 // Mach-O (Darwin) does not support weak aliases, hence the __MACH__ check.
4185 # define ALIAS(x) __attribute__ ((weak, alias (x)))
4186 void* __libc_malloc(size_t size) ALIAS("malloc");
4187 void __libc_free(void* ptr) ALIAS("free");
4188 void* __libc_realloc(void* ptr, size_t size) ALIAS("realloc");
4189 void* __libc_calloc(size_t n, size_t size) ALIAS("calloc");
4190 void __libc_cfree(void* ptr) ALIAS("cfree");
4191 void* __libc_memalign(size_t align, size_t s) ALIAS("memalign");
4192 void* __libc_valloc(size_t size) ALIAS("valloc");
4193 void* __libc_pvalloc(size_t size) ALIAS("pvalloc");
4194 int __posix_memalign(void** r, size_t a, size_t s) ALIAS("posix_memalign");
4196 # else /* not __GNUC__ */
4197 // Portable wrappers
4198 void* __libc_malloc(size_t size) { return malloc(size); }
4199 void __libc_free(void* ptr) { free(ptr); }
4200 void* __libc_realloc(void* ptr, size_t size) { return realloc(ptr, size); }
4201 void* __libc_calloc(size_t n, size_t size) { return calloc(n, size); }
4202 void __libc_cfree(void* ptr) { cfree(ptr); }
4203 void* __libc_memalign(size_t align, size_t s) { return memalign(align, s); }
4204 void* __libc_valloc(size_t size) { return valloc(size); }
4205 void* __libc_pvalloc(size_t size) { return pvalloc(size); }
4206 int __posix_memalign(void** r, size_t a, size_t s) {
4207 return posix_memalign(r, a, s);
4209 # endif /* __GNUC__ */
4211 #endif /* __GLIBC__ */
4213 // Override __libc_memalign in libc on linux boxes specially.
4214 // They have a bug in libc that causes them to (very rarely) allocate
4215 // with __libc_memalign() yet deallocate with free() and the
4216 // definitions above don't catch it.
4217 // This function is an exception to the rule of calling MallocHook method
4218 // from the stack frame of the allocation function;
4219 // heap-checker handles this special case explicitly.
4220 static void *MemalignOverride(size_t align, size_t size, const void *caller)
4222 void* result = do_memalign(align, size);
4223 MallocHook::InvokeNewHook(result, size);
4226 void *(*__memalign_hook)(size_t, size_t, const void *) = MemalignOverride;
4231 void releaseFastMallocFreeMemory()
4233 // Flush free pages in the current thread cache back to the page heap.
4234 // Low watermark mechanism in Scavenge() prevents full return on the first pass.
4235 // The second pass flushes everything.
4236 if (TCMalloc_ThreadCache* threadCache = TCMalloc_ThreadCache::GetCacheIfPresent()) {
4237 threadCache->Scavenge();
4238 threadCache->Scavenge();
4241 SpinLockHolder h(&pageheap_lock);
4242 pageheap->ReleaseFreePages();
4245 FastMallocStatistics fastMallocStatistics()
4247 FastMallocStatistics statistics;
4249 SpinLockHolder lockHolder(&pageheap_lock);
4250 statistics.reservedVMBytes = static_cast<size_t>(pageheap->SystemBytes());
4251 statistics.committedVMBytes = statistics.reservedVMBytes - pageheap->ReturnedBytes();
4253 statistics.freeListBytes = 0;
4254 for (unsigned cl = 0; cl < kNumClasses; ++cl) {
4255 const int length = central_cache[cl].length();
4256 const int tc_length = central_cache[cl].tc_length();
4258 statistics.freeListBytes += ByteSizeForClass(cl) * (length + tc_length);
4260 for (TCMalloc_ThreadCache* threadCache = thread_heaps; threadCache ; threadCache = threadCache->next_)
4261 statistics.freeListBytes += threadCache->Size();
4266 size_t fastMallocSize(const void* ptr)
4268 const PageID p = reinterpret_cast<uintptr_t>(ptr) >> kPageShift;
4269 Span* span = pageheap->GetDescriptorEnsureSafe(p);
4271 if (!span || span->free)
4274 for (void* free = span->objects; free != NULL; free = *((void**) free)) {
4279 if (size_t cl = span->sizeclass)
4280 return ByteSizeForClass(cl);
4282 return span->length << kPageShift;
4287 class FreeObjectFinder {
4288 const RemoteMemoryReader& m_reader;
4289 HashSet<void*> m_freeObjects;
4292 FreeObjectFinder(const RemoteMemoryReader& reader) : m_reader(reader) { }
4294 void visit(void* ptr) { m_freeObjects.add(ptr); }
4295 bool isFreeObject(void* ptr) const { return m_freeObjects.contains(ptr); }
4296 bool isFreeObject(vm_address_t ptr) const { return isFreeObject(reinterpret_cast<void*>(ptr)); }
4297 size_t freeObjectCount() const { return m_freeObjects.size(); }
4299 void findFreeObjects(TCMalloc_ThreadCache* threadCache)
4301 for (; threadCache; threadCache = (threadCache->next_ ? m_reader(threadCache->next_) : 0))
4302 threadCache->enumerateFreeObjects(*this, m_reader);
4305 void findFreeObjects(TCMalloc_Central_FreeListPadded* centralFreeList, size_t numSizes, TCMalloc_Central_FreeListPadded* remoteCentralFreeList)
4307 for (unsigned i = 0; i < numSizes; i++)
4308 centralFreeList[i].enumerateFreeObjects(*this, m_reader, remoteCentralFreeList + i);
4312 class PageMapFreeObjectFinder {
4313 const RemoteMemoryReader& m_reader;
4314 FreeObjectFinder& m_freeObjectFinder;
4317 PageMapFreeObjectFinder(const RemoteMemoryReader& reader, FreeObjectFinder& freeObjectFinder)
4319 , m_freeObjectFinder(freeObjectFinder)
4322 int visit(void* ptr) const
4327 Span* span = m_reader(reinterpret_cast<Span*>(ptr));
4329 void* ptr = reinterpret_cast<void*>(span->start << kPageShift);
4330 m_freeObjectFinder.visit(ptr);
4331 } else if (span->sizeclass) {
4332 // Walk the free list of the small-object span, keeping track of each object seen
4333 for (void* nextObject = span->objects; nextObject; nextObject = *m_reader(reinterpret_cast<void**>(nextObject)))
4334 m_freeObjectFinder.visit(nextObject);
4336 return span->length;
4340 class PageMapMemoryUsageRecorder {
4343 unsigned m_typeMask;
4344 vm_range_recorder_t* m_recorder;
4345 const RemoteMemoryReader& m_reader;
4346 const FreeObjectFinder& m_freeObjectFinder;
4348 HashSet<void*> m_seenPointers;
4349 Vector<Span*> m_coalescedSpans;
4352 PageMapMemoryUsageRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader, const FreeObjectFinder& freeObjectFinder)
4354 , m_context(context)
4355 , m_typeMask(typeMask)
4356 , m_recorder(recorder)
4358 , m_freeObjectFinder(freeObjectFinder)
4361 ~PageMapMemoryUsageRecorder()
4363 ASSERT(!m_coalescedSpans.size());
4366 void recordPendingRegions()
4368 Span* lastSpan = m_coalescedSpans[m_coalescedSpans.size() - 1];
4369 vm_range_t ptrRange = { m_coalescedSpans[0]->start << kPageShift, 0 };
4370 ptrRange.size = (lastSpan->start << kPageShift) - ptrRange.address + (lastSpan->length * kPageSize);
4372 // Mark the memory region the spans represent as a candidate for containing pointers
4373 if (m_typeMask & MALLOC_PTR_REGION_RANGE_TYPE)
4374 (*m_recorder)(m_task, m_context, MALLOC_PTR_REGION_RANGE_TYPE, &ptrRange, 1);
4376 if (!(m_typeMask & MALLOC_PTR_IN_USE_RANGE_TYPE)) {
4377 m_coalescedSpans.clear();
4381 Vector<vm_range_t, 1024> allocatedPointers;
4382 for (size_t i = 0; i < m_coalescedSpans.size(); ++i) {
4383 Span *theSpan = m_coalescedSpans[i];
4387 vm_address_t spanStartAddress = theSpan->start << kPageShift;
4388 vm_size_t spanSizeInBytes = theSpan->length * kPageSize;
4390 if (!theSpan->sizeclass) {
4391 // If it's an allocated large object span, mark it as in use
4392 if (!m_freeObjectFinder.isFreeObject(spanStartAddress))
4393 allocatedPointers.append((vm_range_t){spanStartAddress, spanSizeInBytes});
4395 const size_t objectSize = ByteSizeForClass(theSpan->sizeclass);
4397 // Mark each allocated small object within the span as in use
4398 const vm_address_t endOfSpan = spanStartAddress + spanSizeInBytes;
4399 for (vm_address_t object = spanStartAddress; object + objectSize <= endOfSpan; object += objectSize) {
4400 if (!m_freeObjectFinder.isFreeObject(object))
4401 allocatedPointers.append((vm_range_t){object, objectSize});
4406 (*m_recorder)(m_task, m_context, MALLOC_PTR_IN_USE_RANGE_TYPE, allocatedPointers.data(), allocatedPointers.size());
4408 m_coalescedSpans.clear();
4411 int visit(void* ptr)
4416 Span* span = m_reader(reinterpret_cast<Span*>(ptr));
4420 if (m_seenPointers.contains(ptr))
4421 return span->length;
4422 m_seenPointers.add(ptr);
4424 if (!m_coalescedSpans.size()) {
4425 m_coalescedSpans.append(span);
4426 return span->length;
4429 Span* previousSpan = m_coalescedSpans[m_coalescedSpans.size() - 1];
4430 vm_address_t previousSpanStartAddress = previousSpan->start << kPageShift;
4431 vm_size_t previousSpanSizeInBytes = previousSpan->length * kPageSize;
4433 // If the new span is adjacent to the previous span, do nothing for now.
4434 vm_address_t spanStartAddress = span->start << kPageShift;
4435 if (spanStartAddress == previousSpanStartAddress + previousSpanSizeInBytes) {
4436 m_coalescedSpans.append(span);
4437 return span->length;
4440 // New span is not adjacent to previous span, so record the spans coalesced so far.
4441 recordPendingRegions();
4442 m_coalescedSpans.append(span);
4444 return span->length;
4448 class AdminRegionRecorder {
4451 unsigned m_typeMask;
4452 vm_range_recorder_t* m_recorder;
4453 const RemoteMemoryReader& m_reader;
4455 Vector<vm_range_t, 1024> m_pendingRegions;
4458 AdminRegionRecorder(task_t task, void* context, unsigned typeMask, vm_range_recorder_t* recorder, const RemoteMemoryReader& reader)
4460 , m_context(context)
4461 , m_typeMask(typeMask)
4462 , m_recorder(recorder)
4466 void recordRegion(vm_address_t ptr, size_t size)
4468 if (m_typeMask & MALLOC_ADMIN_REGION_RANGE_TYPE)
4469 m_pendingRegions.append((vm_range_t){ ptr, size });
4472 void visit(void *ptr, size_t size)
4474 recordRegion(reinterpret_cast<vm_address_t>(ptr), size);
4477 void recordPendingRegions()
4479 if (m_pendingRegions.size()) {
4480 (*m_recorder)(m_task, m_context, MALLOC_ADMIN_REGION_RANGE_TYPE, m_pendingRegions.data(), m_pendingRegions.size());
4481 m_pendingRegions.clear();
4485 ~AdminRegionRecorder()
4487 ASSERT(!m_pendingRegions.size());
4491 kern_return_t FastMallocZone::enumerate(task_t task, void* context, unsigned typeMask, vm_address_t zoneAddress, memory_reader_t reader, vm_range_recorder_t recorder)
4493 RemoteMemoryReader memoryReader(task, reader);
4497 FastMallocZone* mzone = memoryReader(reinterpret_cast<FastMallocZone*>(zoneAddress));
4498 TCMalloc_PageHeap* pageHeap = memoryReader(mzone->m_pageHeap);
4499 TCMalloc_ThreadCache** threadHeapsPointer = memoryReader(mzone->m_threadHeaps);
4500 TCMalloc_ThreadCache* threadHeaps = memoryReader(*threadHeapsPointer);
4502 TCMalloc_Central_FreeListPadded* centralCaches = memoryReader(mzone->m_centralCaches, sizeof(TCMalloc_Central_FreeListPadded) * kNumClasses);
4504 FreeObjectFinder finder(memoryReader);
4505 finder.findFreeObjects(threadHeaps);
4506 finder.findFreeObjects(centralCaches, kNumClasses, mzone->m_centralCaches);
4508 TCMalloc_PageHeap::PageMap* pageMap = &pageHeap->pagemap_;
4509 PageMapFreeObjectFinder pageMapFinder(memoryReader, finder);
4510 pageMap->visitValues(pageMapFinder, memoryReader);
4512 PageMapMemoryUsageRecorder usageRecorder(task, context, typeMask, recorder, memoryReader, finder);
4513 pageMap->visitValues(usageRecorder, memoryReader);
4514 usageRecorder.recordPendingRegions();
4516 AdminRegionRecorder adminRegionRecorder(task, context, typeMask, recorder, memoryReader);
4517 pageMap->visitAllocations(adminRegionRecorder, memoryReader);
4519 PageHeapAllocator<Span>* spanAllocator = memoryReader(mzone->m_spanAllocator);
4520 PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator = memoryReader(mzone->m_pageHeapAllocator);
4522 spanAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
4523 pageHeapAllocator->recordAdministrativeRegions(adminRegionRecorder, memoryReader);
4525 adminRegionRecorder.recordPendingRegions();
4530 size_t FastMallocZone::size(malloc_zone_t*, const void*)
4535 void* FastMallocZone::zoneMalloc(malloc_zone_t*, size_t)
4540 void* FastMallocZone::zoneCalloc(malloc_zone_t*, size_t, size_t)
4545 void FastMallocZone::zoneFree(malloc_zone_t*, void* ptr)
4547 // Due to <rdar://problem/5671357> zoneFree may be called by the system free even if the pointer
4548 // is not in this zone. When this happens, the pointer being freed was not allocated by any
4549 // zone so we need to print a useful error for the application developer.
4550 malloc_printf("*** error for object %p: pointer being freed was not allocated\n", ptr);
4553 void* FastMallocZone::zoneRealloc(malloc_zone_t*, void*, size_t)
4565 malloc_introspection_t jscore_fastmalloc_introspection = { &FastMallocZone::enumerate, &FastMallocZone::goodSize, &FastMallocZone::check, &FastMallocZone::print,
4566 &FastMallocZone::log, &FastMallocZone::forceLock, &FastMallocZone::forceUnlock, &FastMallocZone::statistics
4568 #if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD)
4569 , 0 // zone_locked will not be called on the zone unless it advertises itself as version five or higher.
4571 #if !defined(BUILDING_ON_TIGER) && !defined(BUILDING_ON_LEOPARD) && !defined(BUILDING_ON_SNOW_LEOPARD)
4572 , 0, 0, 0, 0 // These members will not be used unless the zone advertises itself as version seven or higher.
4578 FastMallocZone::FastMallocZone(TCMalloc_PageHeap* pageHeap, TCMalloc_ThreadCache** threadHeaps, TCMalloc_Central_FreeListPadded* centralCaches, PageHeapAllocator<Span>* spanAllocator, PageHeapAllocator<TCMalloc_ThreadCache>* pageHeapAllocator)
4579 : m_pageHeap(pageHeap)
4580 , m_threadHeaps(threadHeaps)
4581 , m_centralCaches(centralCaches)
4582 , m_spanAllocator(spanAllocator)
4583 , m_pageHeapAllocator(pageHeapAllocator)
4585 memset(&m_zone, 0, sizeof(m_zone));
4587 m_zone.zone_name = "JavaScriptCore FastMalloc";
4588 m_zone.size = &FastMallocZone::size;
4589 m_zone.malloc = &FastMallocZone::zoneMalloc;
4590 m_zone.calloc = &FastMallocZone::zoneCalloc;
4591 m_zone.realloc = &FastMallocZone::zoneRealloc;
4592 m_zone.free = &FastMallocZone::zoneFree;
4593 m_zone.valloc = &FastMallocZone::zoneValloc;
4594 m_zone.destroy = &FastMallocZone::zoneDestroy;
4595 m_zone.introspect = &jscore_fastmalloc_introspection;
4596 malloc_zone_register(&m_zone);
4600 void FastMallocZone::init()
4602 static FastMallocZone zone(pageheap, &thread_heaps, static_cast<TCMalloc_Central_FreeListPadded*>(central_cache), &span_allocator, &threadheap_allocator);
4605 #endif // OS(DARWIN)
4608 #endif // WTF_CHANGES
4610 #endif // FORCE_SYSTEM_MALLOC