Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(468)

Side by Side Diff: third_party/tcmalloc/chromium/src/heap-profiler.cc

Issue 9320005: [NOT TO COMMIT!] Replace third_party/tcmalloc/chromium with tcmalloc r136 (the latest). (Closed) Base URL: http://git.chromium.org/git/chromium.git@trunk
Patch Set: Created 8 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright (c) 2005, Google Inc. 1 // Copyright (c) 2005, Google Inc.
2 // All rights reserved. 2 // All rights reserved.
3 // 3 //
4 // Redistribution and use in source and binary forms, with or without 4 // Redistribution and use in source and binary forms, with or without
5 // modification, are permitted provided that the following conditions are 5 // modification, are permitted provided that the following conditions are
6 // met: 6 // met:
7 // 7 //
8 // * Redistributions of source code must retain the above copyright 8 // * Redistributions of source code must retain the above copyright
9 // notice, this list of conditions and the following disclaimer. 9 // notice, this list of conditions and the following disclaimer.
10 // * Redistributions in binary form must reproduce the above 10 // * Redistributions in binary form must reproduce the above
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
100 "If non-zero, dump heap profiling information once every " 100 "If non-zero, dump heap profiling information once every "
101 "specified number of bytes deallocated by the program " 101 "specified number of bytes deallocated by the program "
102 "since the last dump."); 102 "since the last dump.");
103 // We could also add flags that report whenever inuse_bytes changes by 103 // We could also add flags that report whenever inuse_bytes changes by
104 // X or -X, but there hasn't been a need for that yet, so we haven't. 104 // X or -X, but there hasn't been a need for that yet, so we haven't.
105 DEFINE_int64(heap_profile_inuse_interval, 105 DEFINE_int64(heap_profile_inuse_interval,
106 EnvToInt64("HEAP_PROFILE_INUSE_INTERVAL", 100 << 20 /*100MB*/), 106 EnvToInt64("HEAP_PROFILE_INUSE_INTERVAL", 100 << 20 /*100MB*/),
107 "If non-zero, dump heap profiling information whenever " 107 "If non-zero, dump heap profiling information whenever "
108 "the high-water memory usage mark increases by the specified " 108 "the high-water memory usage mark increases by the specified "
109 "number of bytes."); 109 "number of bytes.");
110 DEFINE_int64(heap_profile_time_interval,
111 EnvToInt64("HEAP_PROFILE_TIME_INTERVAL", 0),
112 "If non-zero, dump heap profiling information once every "
113 "specified number of seconds since the last dump.");
114 DEFINE_bool(mmap_log, 110 DEFINE_bool(mmap_log,
115 EnvToBool("HEAP_PROFILE_MMAP_LOG", false), 111 EnvToBool("HEAP_PROFILE_MMAP_LOG", false),
116 "Should mmap/munmap calls be logged?"); 112 "Should mmap/munmap calls be logged?");
117 DEFINE_bool(mmap_profile, 113 DEFINE_bool(mmap_profile,
118 EnvToBool("HEAP_PROFILE_MMAP", false), 114 EnvToBool("HEAP_PROFILE_MMAP", false),
119 "If heap-profiling is on, also profile mmap, mremap, and sbrk)"); 115 "If heap-profiling is on, also profile mmap, mremap, and sbrk)");
120 DEFINE_bool(only_mmap_profile, 116 DEFINE_bool(only_mmap_profile,
121 EnvToBool("HEAP_PROFILE_ONLY_MMAP", false), 117 EnvToBool("HEAP_PROFILE_ONLY_MMAP", false),
122 "If heap-profiling is on, only profile mmap, mremap, and sbrk; " 118 "If heap-profiling is on, only profile mmap, mremap, and sbrk; "
123 "do not profile malloc/new/etc"); 119 "do not profile malloc/new/etc");
(...skipping 18 matching lines...) Expand all
142 static LowLevelAlloc::Arena *heap_profiler_memory; 138 static LowLevelAlloc::Arena *heap_profiler_memory;
143 139
144 static void* ProfilerMalloc(size_t bytes) { 140 static void* ProfilerMalloc(size_t bytes) {
145 return LowLevelAlloc::AllocWithArena(bytes, heap_profiler_memory); 141 return LowLevelAlloc::AllocWithArena(bytes, heap_profiler_memory);
146 } 142 }
147 static void ProfilerFree(void* p) { 143 static void ProfilerFree(void* p) {
148 LowLevelAlloc::Free(p); 144 LowLevelAlloc::Free(p);
149 } 145 }
150 146
151 // We use buffers of this size in DoGetHeapProfile. 147 // We use buffers of this size in DoGetHeapProfile.
152 // The size is 1 << 20 in the original google-perftools. Changed it to 148 static const int kProfileBufferSize = 1 << 20;
153 // 5 << 20 since a larger buffer is requried for deeper profiling in Chromium.
154 // The buffer is allocated only when the environment variable HEAPPROFILE is
155 // specified to dump heap information.
156 static const int kProfileBufferSize = 5 << 20;
157 149
158 // This is a last-ditch buffer we use in DumpProfileLocked in case we 150 // This is a last-ditch buffer we use in DumpProfileLocked in case we
159 // can't allocate more memory from ProfilerMalloc. We expect this 151 // can't allocate more memory from ProfilerMalloc. We expect this
160 // will be used by HeapProfileEndWriter when the application has to 152 // will be used by HeapProfileEndWriter when the application has to
161 // exit due to out-of-memory. This buffer is allocated in 153 // exit due to out-of-memory. This buffer is allocated in
162 // HeapProfilerStart. Access to this must be protected by heap_lock. 154 // HeapProfilerStart. Access to this must be protected by heap_lock.
163 static char* global_profiler_buffer = NULL; 155 static char* global_profiler_buffer = NULL;
164 156
165 157
166 //---------------------------------------------------------------------- 158 //----------------------------------------------------------------------
167 // Profiling control/state data 159 // Profiling control/state data
168 //---------------------------------------------------------------------- 160 //----------------------------------------------------------------------
169 161
170 // Access to all of these is protected by heap_lock. 162 // Access to all of these is protected by heap_lock.
171 static bool is_on = false; // If are on as a subsytem. 163 static bool is_on = false; // If are on as a subsytem.
172 static bool dumping = false; // Dumping status to prevent recursion 164 static bool dumping = false; // Dumping status to prevent recursion
173 static char* filename_prefix = NULL; // Prefix used for profile file names 165 static char* filename_prefix = NULL; // Prefix used for profile file names
174 // (NULL if no need for dumping yet) 166 // (NULL if no need for dumping yet)
175 static int dump_count = 0; // How many dumps so far 167 static int dump_count = 0; // How many dumps so far
176 static int64 last_dump_alloc = 0; // alloc_size when did we last dump 168 static int64 last_dump_alloc = 0; // alloc_size when did we last dump
177 static int64 last_dump_free = 0; // free_size when did we last dump 169 static int64 last_dump_free = 0; // free_size when did we last dump
178 static int64 high_water_mark = 0; // In-use-bytes at last high-water dump 170 static int64 high_water_mark = 0; // In-use-bytes at last high-water dump
179 static int64 last_dump_time = 0; // The time of the last dump
180 171
181 static HeapProfileTable* heap_profile = NULL; // the heap profile table 172 static HeapProfileTable* heap_profile = NULL; // the heap profile table
182 173
183 //---------------------------------------------------------------------- 174 //----------------------------------------------------------------------
184 // Profile generation 175 // Profile generation
185 //---------------------------------------------------------------------- 176 //----------------------------------------------------------------------
186 177
187 enum AddOrRemove { ADD, REMOVE };
188
189 // Add or remove all MMap-allocated regions to/from *heap_profile.
190 // Assumes heap_lock is held.
191 static void AddRemoveMMapDataLocked(AddOrRemove mode) {
192 RAW_DCHECK(heap_lock.IsHeld(), "");
193 if (!FLAGS_mmap_profile || !is_on) return;
194 // MemoryRegionMap maintained all the data we need for all
195 // mmap-like allocations, so we just use it here:
196 MemoryRegionMap::LockHolder l;
197 for (MemoryRegionMap::RegionIterator r = MemoryRegionMap::BeginRegionLocked();
198 r != MemoryRegionMap::EndRegionLocked(); ++r) {
199 if (mode == ADD) {
200 heap_profile->RecordAllocWithStack(
201 reinterpret_cast<const void*>(r->start_addr),
202 r->end_addr - r->start_addr,
203 r->call_stack_depth, r->call_stack);
204 } else {
205 heap_profile->RecordFree(reinterpret_cast<void*>(r->start_addr));
206 }
207 }
208 }
209
210 // Input must be a buffer of size at least 1MB. 178 // Input must be a buffer of size at least 1MB.
211 static char* DoGetHeapProfileLocked(char* buf, int buflen) { 179 static char* DoGetHeapProfileLocked(char* buf, int buflen) {
212 // We used to be smarter about estimating the required memory and 180 // We used to be smarter about estimating the required memory and
213 // then capping it to 1MB and generating the profile into that. 181 // then capping it to 1MB and generating the profile into that.
214 if (buf == NULL || buflen < 1) 182 if (buf == NULL || buflen < 1)
215 return NULL; 183 return NULL;
216 184
217 RAW_DCHECK(heap_lock.IsHeld(), ""); 185 RAW_DCHECK(heap_lock.IsHeld(), "");
218 int bytes_written = 0; 186 int bytes_written = 0;
219 if (is_on) { 187 if (is_on) {
220 HeapProfileTable::Stats const stats = heap_profile->total(); 188 if (FLAGS_mmap_profile) {
221 (void)stats; // avoid an unused-variable warning in non-debug mode. 189 heap_profile->RefreshMMapData();
222 AddRemoveMMapDataLocked(ADD); 190 }
223 bytes_written = heap_profile->FillOrderedProfile(buf, buflen - 1); 191 bytes_written = heap_profile->FillOrderedProfile(buf, buflen - 1);
224 // FillOrderedProfile should not reduce the set of active mmap-ed regions, 192 if (FLAGS_mmap_profile) {
225 // hence MemoryRegionMap will let us remove everything we've added above: 193 heap_profile->ClearMMapData();
226 AddRemoveMMapDataLocked(REMOVE); 194 }
227 RAW_DCHECK(stats.Equivalent(heap_profile->total()), "");
228 // if this fails, we somehow removed by AddRemoveMMapDataLocked
229 // more than we have added.
230 } 195 }
231 buf[bytes_written] = '\0'; 196 buf[bytes_written] = '\0';
232 RAW_DCHECK(bytes_written == strlen(buf), ""); 197 RAW_DCHECK(bytes_written == strlen(buf), "");
233 198
234 return buf; 199 return buf;
235 } 200 }
236 201
237 extern "C" char* GetHeapProfile() { 202 extern "C" char* GetHeapProfile() {
238 // Use normal malloc: we return the profile to the user to free it: 203 // Use normal malloc: we return the profile to the user to free it:
239 char* buffer = reinterpret_cast<char*>(malloc(kProfileBufferSize)); 204 char* buffer = reinterpret_cast<char*>(malloc(kProfileBufferSize));
(...skipping 11 matching lines...) Expand all
251 RAW_DCHECK(is_on, ""); 216 RAW_DCHECK(is_on, "");
252 RAW_DCHECK(!dumping, ""); 217 RAW_DCHECK(!dumping, "");
253 218
254 if (filename_prefix == NULL) return; // we do not yet need dumping 219 if (filename_prefix == NULL) return; // we do not yet need dumping
255 220
256 dumping = true; 221 dumping = true;
257 222
258 // Make file name 223 // Make file name
259 char file_name[1000]; 224 char file_name[1000];
260 dump_count++; 225 dump_count++;
261 snprintf(file_name, sizeof(file_name), "%s.%05d.%04d%s", 226 snprintf(file_name, sizeof(file_name), "%s.%04d%s",
262 filename_prefix, getpid(), dump_count, HeapProfileTable::kFileExt); 227 filename_prefix, dump_count, HeapProfileTable::kFileExt);
263 228
264 // Dump the profile 229 // Dump the profile
265 RAW_VLOG(0, "Dumping heap profile to %s (%s)", file_name, reason); 230 RAW_VLOG(0, "Dumping heap profile to %s (%s)", file_name, reason);
266 // We must use file routines that don't access memory, since we hold 231 // We must use file routines that don't access memory, since we hold
267 // a memory lock now. 232 // a memory lock now.
268 RawFD fd = RawOpenForWriting(file_name); 233 RawFD fd = RawOpenForWriting(file_name);
269 if (fd == kIllegalRawFD) { 234 if (fd == kIllegalRawFD) {
270 RAW_LOG(ERROR, "Failed dumping heap profile to %s", file_name); 235 RAW_LOG(ERROR, "Failed dumping heap profile to %s", file_name);
271 dumping = false; 236 dumping = false;
272 return; 237 return;
(...skipping 19 matching lines...) Expand all
292 //---------------------------------------------------------------------- 257 //----------------------------------------------------------------------
293 258
294 // Dump a profile after either an allocation or deallocation, if 259 // Dump a profile after either an allocation or deallocation, if
295 // the memory use has changed enough since the last dump. 260 // the memory use has changed enough since the last dump.
296 static void MaybeDumpProfileLocked() { 261 static void MaybeDumpProfileLocked() {
297 if (!dumping) { 262 if (!dumping) {
298 const HeapProfileTable::Stats& total = heap_profile->total(); 263 const HeapProfileTable::Stats& total = heap_profile->total();
299 const int64 inuse_bytes = total.alloc_size - total.free_size; 264 const int64 inuse_bytes = total.alloc_size - total.free_size;
300 bool need_to_dump = false; 265 bool need_to_dump = false;
301 char buf[128]; 266 char buf[128];
302 int64 current_time = time(NULL);
303 if (FLAGS_heap_profile_allocation_interval > 0 && 267 if (FLAGS_heap_profile_allocation_interval > 0 &&
304 total.alloc_size >= 268 total.alloc_size >=
305 last_dump_alloc + FLAGS_heap_profile_allocation_interval) { 269 last_dump_alloc + FLAGS_heap_profile_allocation_interval) {
306 snprintf(buf, sizeof(buf), ("%"PRId64" MB allocated cumulatively, " 270 snprintf(buf, sizeof(buf), ("%"PRId64" MB allocated cumulatively, "
307 "%"PRId64" MB currently in use"), 271 "%"PRId64" MB currently in use"),
308 total.alloc_size >> 20, inuse_bytes >> 20); 272 total.alloc_size >> 20, inuse_bytes >> 20);
309 need_to_dump = true; 273 need_to_dump = true;
310 } else if (FLAGS_heap_profile_deallocation_interval > 0 && 274 } else if (FLAGS_heap_profile_deallocation_interval > 0 &&
311 total.free_size >= 275 total.free_size >=
312 last_dump_free + FLAGS_heap_profile_deallocation_interval) { 276 last_dump_free + FLAGS_heap_profile_deallocation_interval) {
313 snprintf(buf, sizeof(buf), ("%"PRId64" MB freed cumulatively, " 277 snprintf(buf, sizeof(buf), ("%"PRId64" MB freed cumulatively, "
314 "%"PRId64" MB currently in use"), 278 "%"PRId64" MB currently in use"),
315 total.free_size >> 20, inuse_bytes >> 20); 279 total.free_size >> 20, inuse_bytes >> 20);
316 need_to_dump = true; 280 need_to_dump = true;
317 } else if (FLAGS_heap_profile_inuse_interval > 0 && 281 } else if (FLAGS_heap_profile_inuse_interval > 0 &&
318 inuse_bytes > 282 inuse_bytes >
319 high_water_mark + FLAGS_heap_profile_inuse_interval) { 283 high_water_mark + FLAGS_heap_profile_inuse_interval) {
320 snprintf(buf, sizeof(buf), "%"PRId64" MB currently in use", 284 snprintf(buf, sizeof(buf), "%"PRId64" MB currently in use",
321 inuse_bytes >> 20); 285 inuse_bytes >> 20);
322 need_to_dump = true; 286 need_to_dump = true;
323 } else if (FLAGS_heap_profile_time_interval > 0 &&
324 current_time - last_dump_time >=
325 FLAGS_heap_profile_time_interval) {
326 snprintf(buf, sizeof(buf), "%d sec since the last dump",
327 current_time - last_dump_time);
328 need_to_dump = true;
329 last_dump_time = current_time;
330 } 287 }
331 if (need_to_dump) { 288 if (need_to_dump) {
332 DumpProfileLocked(buf); 289 DumpProfileLocked(buf);
333 290
334 last_dump_alloc = total.alloc_size; 291 last_dump_alloc = total.alloc_size;
335 last_dump_free = total.free_size; 292 last_dump_free = total.free_size;
336 if (inuse_bytes > high_water_mark) 293 if (inuse_bytes > high_water_mark)
337 high_water_mark = inuse_bytes; 294 high_water_mark = inuse_bytes;
338 } 295 }
339 } 296 }
340 } 297 }
341 298
342 // Record an allocation in the profile. 299 // Record an allocation in the profile.
343 static void RecordAlloc(const void* ptr, size_t bytes, int skip_count) { 300 static void RecordAlloc(const void* ptr, size_t bytes, int skip_count) {
301 // Take the stack trace outside the critical section.
302 void* stack[HeapProfileTable::kMaxStackDepth];
303 int depth = HeapProfileTable::GetCallerStackTrace(skip_count + 1, stack);
344 SpinLockHolder l(&heap_lock); 304 SpinLockHolder l(&heap_lock);
345 if (is_on) { 305 if (is_on) {
346 heap_profile->RecordAlloc(ptr, bytes, skip_count + 1); 306 heap_profile->RecordAlloc(ptr, bytes, depth, stack);
347 MaybeDumpProfileLocked(); 307 MaybeDumpProfileLocked();
348 } 308 }
349 } 309 }
350 310
351 // Record a deallocation in the profile. 311 // Record a deallocation in the profile.
352 static void RecordFree(const void* ptr) { 312 static void RecordFree(const void* ptr) {
353 SpinLockHolder l(&heap_lock); 313 SpinLockHolder l(&heap_lock);
354 if (is_on) { 314 if (is_on) {
355 heap_profile->RecordFree(ptr); 315 heap_profile->RecordFree(ptr);
356 MaybeDumpProfileLocked(); 316 MaybeDumpProfileLocked();
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after
421 // in pretty-printing of NULL as "nil". 381 // in pretty-printing of NULL as "nil".
422 // TODO(maxim): instead should use a safe snprintf reimplementation 382 // TODO(maxim): instead should use a safe snprintf reimplementation
423 RAW_LOG(INFO, "munmap(start=0x%"PRIxPTR", len=%"PRIuS")", 383 RAW_LOG(INFO, "munmap(start=0x%"PRIxPTR", len=%"PRIuS")",
424 (uintptr_t) ptr, size); 384 (uintptr_t) ptr, size);
425 #ifdef TODO_REENABLE_STACK_TRACING 385 #ifdef TODO_REENABLE_STACK_TRACING
426 DumpStackTrace(1, RawInfoStackDumper, NULL); 386 DumpStackTrace(1, RawInfoStackDumper, NULL);
427 #endif 387 #endif
428 } 388 }
429 } 389 }
430 390
431 static void SbrkHook(const void* result, std::ptrdiff_t increment) { 391 static void SbrkHook(const void* result, ptrdiff_t increment) {
432 if (FLAGS_mmap_log) { // log it 392 if (FLAGS_mmap_log) { // log it
433 RAW_LOG(INFO, "sbrk(inc=%"PRIdS") = 0x%"PRIxPTR"", 393 RAW_LOG(INFO, "sbrk(inc=%"PRIdS") = 0x%"PRIxPTR"",
434 increment, (uintptr_t) result); 394 increment, (uintptr_t) result);
435 #ifdef TODO_REENABLE_STACK_TRACING 395 #ifdef TODO_REENABLE_STACK_TRACING
436 DumpStackTrace(1, RawInfoStackDumper, NULL); 396 DumpStackTrace(1, RawInfoStackDumper, NULL);
437 #endif 397 #endif
438 } 398 }
439 } 399 }
440 400
441 //---------------------------------------------------------------------- 401 //----------------------------------------------------------------------
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
480 // heap profile even if the application runs out of memory. 440 // heap profile even if the application runs out of memory.
481 global_profiler_buffer = 441 global_profiler_buffer =
482 reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize)); 442 reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize));
483 443
484 heap_profile = new(ProfilerMalloc(sizeof(HeapProfileTable))) 444 heap_profile = new(ProfilerMalloc(sizeof(HeapProfileTable)))
485 HeapProfileTable(ProfilerMalloc, ProfilerFree); 445 HeapProfileTable(ProfilerMalloc, ProfilerFree);
486 446
487 last_dump_alloc = 0; 447 last_dump_alloc = 0;
488 last_dump_free = 0; 448 last_dump_free = 0;
489 high_water_mark = 0; 449 high_water_mark = 0;
490 last_dump_time = 0;
491 450
492 // We do not reset dump_count so if the user does a sequence of 451 // We do not reset dump_count so if the user does a sequence of
493 // HeapProfilerStart/HeapProfileStop, we will get a continuous 452 // HeapProfilerStart/HeapProfileStop, we will get a continuous
494 // sequence of profiles. 453 // sequence of profiles.
495 454
496 if (FLAGS_only_mmap_profile == false) { 455 if (FLAGS_only_mmap_profile == false) {
497 // Now set the hooks that capture new/delete and malloc/free. 456 // Now set the hooks that capture new/delete and malloc/free.
498 RAW_CHECK(MallocHook::AddNewHook(&NewHook), ""); 457 RAW_CHECK(MallocHook::AddNewHook(&NewHook), "");
499 RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), ""); 458 RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), "");
500 } 459 }
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
587 546
588 // class used for finalization -- dumps the heap-profile at program exit 547 // class used for finalization -- dumps the heap-profile at program exit
589 struct HeapProfileEndWriter { 548 struct HeapProfileEndWriter {
590 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); } 549 ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); }
591 }; 550 };
592 551
593 // We want to make sure tcmalloc is up and running before starting the profiler 552 // We want to make sure tcmalloc is up and running before starting the profiler
594 static const TCMallocGuard tcmalloc_initializer; 553 static const TCMallocGuard tcmalloc_initializer;
595 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit()); 554 REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit());
596 static HeapProfileEndWriter heap_profile_end_writer; 555 static HeapProfileEndWriter heap_profile_end_writer;
OLDNEW
« no previous file with comments | « third_party/tcmalloc/chromium/src/heap-profile-table.cc ('k') | third_party/tcmalloc/chromium/src/internal_logging.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698