Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(67)

Unified Diff: base/allocator/malloc_zone_aggregator_mac.cc

Issue 2703803004: macOS: Shim all malloc zones. (Closed)
Patch Set: Add a memory barrier. Created 3 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: base/allocator/malloc_zone_aggregator_mac.cc
diff --git a/base/allocator/malloc_zone_aggregator_mac.cc b/base/allocator/malloc_zone_aggregator_mac.cc
new file mode 100644
index 0000000000000000000000000000000000000000..7b40b7487331713d8c3c35b3adf01ae299938e7d
--- /dev/null
+++ b/base/allocator/malloc_zone_aggregator_mac.cc
@@ -0,0 +1,203 @@
+// Copyright 2017 The Chromium Authors. All rights reserved.
+// Use of this source code is governed by a BSD-style license that can be
+// found in the LICENSE file.
+
+#include "base/allocator/malloc_zone_aggregator_mac.h"
+
+#include "base/atomicops.h"
+
+namespace base {
+namespace allocator {
+
+MallocZoneFunctions::MallocZoneFunctions() {}
+
+void StoreZoneFunctions(ChromeMallocZone* zone,
+ MallocZoneFunctions* functions) {
+ functions->malloc = zone->malloc;
+ functions->calloc = zone->calloc;
+ functions->valloc = zone->valloc;
+ functions->free = zone->free;
+ functions->realloc = zone->realloc;
+ functions->size = zone->size;
+ CHECK(functions->malloc && functions->calloc && functions->valloc &&
+ functions->free && functions->realloc && functions->size);
+
+ // These functions might be nullptr.
+ functions->batch_malloc = zone->batch_malloc;
+ functions->batch_free = zone->batch_free;
+
+ if (zone->version >= 5) {
+ // Not all custom malloc zones have a memalign.
Mark Mentovai 2017/02/21 22:52:51 Is this relevant to us anymore, or is this some ol
erikchen 2017/02/21 23:44:33 Happens on 10.11.
+ functions->memalign = zone->memalign;
+ }
+ if (zone->version >= 6) {
+ // This may be nullptr.
+ functions->free_definite_size = zone->free_definite_size;
+ }
+
+ functions->context = zone;
+}
+
+MallocZoneAggregator::MallocZoneAggregator() {
+ memset(zones_, 0, sizeof(MallocZoneFunctions) * kMaxZoneCount);
Mark Mentovai 2017/02/21 22:52:51 This one can be sizeof(zones_) or sizeof(MallocZon
erikchen 2017/02/21 23:44:33 Done.
+}
+MallocZoneAggregator::~MallocZoneAggregator() {}
+
+void* MallocZoneAggregator::DispatchMallocToZone(void* zone, size_t size) {
+ for (int i = 0; i < kMaxZoneCount; ++i) {
Mark Mentovai 2017/02/21 22:52:51 All of the rest can be arraysize(zones_).
erikchen 2017/02/21 23:44:33 Done.
+ if (zones_[i].context == zone) {
+ return zones_[i].malloc(reinterpret_cast<struct _malloc_zone_t*>(zone),
+ size);
+ }
+ }
+ CHECK(false);
+ return nullptr;
+}
+
+void* MallocZoneAggregator::DispatchCallocToZone(void* zone,
+ size_t num_items,
+ size_t size) {
+ for (int i = 0; i < kMaxZoneCount; ++i) {
Mark Mentovai 2017/02/21 22:52:51 You’re concerned with performance and avoiding one
erikchen 2017/02/21 23:44:33 premature optimization is the *mumble mumble mumbl
+ if (zones_[i].context == zone) {
+ return zones_[i].calloc(reinterpret_cast<struct _malloc_zone_t*>(zone),
+ num_items, size);
+ }
+ }
+ CHECK(false);
+ return nullptr;
+}
+void* MallocZoneAggregator::DispatchVallocToZone(void* zone, size_t size) {
+ for (int i = 0; i < kMaxZoneCount; ++i) {
+ if (zones_[i].context == zone) {
+ return zones_[i].valloc(reinterpret_cast<struct _malloc_zone_t*>(zone),
+ size);
+ }
+ }
+ CHECK(false);
+ return nullptr;
+}
+
+void MallocZoneAggregator::DispatchFreeToZone(void* zone, void* ptr) {
Mark Mentovai 2017/02/21 22:52:50 By the time I got here, I thought that you should
erikchen 2017/02/21 23:44:33 After factoring out GetFunctionsForZone(), I no lo
+ for (int i = 0; i < kMaxZoneCount; ++i) {
+ if (zones_[i].context == zone) {
+ zones_[i].free(reinterpret_cast<struct _malloc_zone_t*>(zone), ptr);
+ return;
+ }
+ }
+ CHECK(false);
+}
+
+void* MallocZoneAggregator::DispatchReallocToZone(void* zone,
+ void* ptr,
+ size_t size) {
+ for (int i = 0; i < kMaxZoneCount; ++i) {
+ if (zones_[i].context == zone) {
+ return zones_[i].realloc(reinterpret_cast<struct _malloc_zone_t*>(zone),
+ ptr, size);
+ }
+ }
+ CHECK(false);
+ return nullptr;
+}
+
+void* MallocZoneAggregator::DispatchMemalignToZone(void* zone,
+ size_t alignment,
+ size_t size) {
+ for (int i = 0; i < kMaxZoneCount; ++i) {
+ if (zones_[i].context == zone) {
+ return zones_[i].memalign(reinterpret_cast<struct _malloc_zone_t*>(zone),
+ alignment, size);
+ }
+ }
+ CHECK(false);
+ return nullptr;
+}
+
+unsigned MallocZoneAggregator::DispatchBatchMallocToZone(
+ void* zone,
+ size_t size,
+ void** results,
+ unsigned num_requested) {
+ for (int i = 0; i < kMaxZoneCount; ++i) {
+ if (zones_[i].context == zone) {
+ return zones_[i].batch_malloc(
+ reinterpret_cast<struct _malloc_zone_t*>(zone), size, results,
+ num_requested);
+ }
+ }
+ CHECK(false);
+ return 0;
+}
+void MallocZoneAggregator::DispatchBatchFreeToZone(void* zone,
Mark Mentovai 2017/02/21 22:52:51 Blank line between functions.
erikchen 2017/02/21 23:44:33 Done.
+ void** to_be_freed,
+ unsigned num_to_be_freed) {
+ for (int i = 0; i < kMaxZoneCount; ++i) {
+ if (zones_[i].context == zone) {
+ zones_[i].batch_free(reinterpret_cast<struct _malloc_zone_t*>(zone),
+ to_be_freed, num_to_be_freed);
+ return;
+ }
+ }
+ CHECK(false);
+}
+
+void MallocZoneAggregator::DispatchFreeDefiniteSizeToZone(void* zone,
+ void* ptr,
+ size_t size) {
+ for (int i = 0; i < kMaxZoneCount; ++i) {
+ if (zones_[i].context == zone) {
+ zones_[i].free_definite_size(
+ reinterpret_cast<struct _malloc_zone_t*>(zone), ptr, size);
+ return;
+ }
+ }
+ CHECK(false);
+}
+
+size_t MallocZoneAggregator::DispatchGetSizeEstimateToZone(void* zone,
+ void* ptr) {
+ for (int i = 0; i < kMaxZoneCount; ++i) {
+ if (zones_[i].context == zone) {
+ return zones_[i].size(reinterpret_cast<struct _malloc_zone_t*>(zone),
+ ptr);
+ }
+ }
+ CHECK(false);
+ return 0;
+}
+
+void MallocZoneAggregator::StoreZone(ChromeMallocZone* zone) {
+ base::AutoLock l(lock_);
+ if (IsZoneAlreadyStoredLockAcquired(zone))
+ return;
+
+ if (zone_count_ == kMaxZoneCount)
+ return;
+
+ StoreZoneFunctions(zone, &zones_[zone_count_]);
+ ++zone_count_;
+ base::subtle::MemoryBarrier();
+}
+
+bool MallocZoneAggregator::IsZoneAlreadyStored(ChromeMallocZone* zone) {
+ base::AutoLock l(lock_);
+ return IsZoneAlreadyStoredLockAcquired(zone);
+}
+
+int MallocZoneAggregator::GetZoneCount() {
+ base::AutoLock l(lock_);
+ return zone_count_;
+}
+
+bool MallocZoneAggregator::IsZoneAlreadyStoredLockAcquired(
+ ChromeMallocZone* zone) {
+ lock_.AssertAcquired();
+ for (int i = 0; i < zone_count_; ++i) {
+ if (zones_[i].context == reinterpret_cast<void*>(zone))
+ return true;
+ }
+ return false;
+}
+
+} // namespace allocator
+} // namespace base

Powered by Google App Engine
This is Rietveld 408576698