Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(246)

Side by Side Diff: src/spaces.cc

Issue 3161015: Tracks the usage of executable memory allocated by V8 and exposes the value t... (Closed) Base URL: http://v8.googlecode.com/svn/trunk/
Patch Set: '' Created 10 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 23 matching lines...) Expand all
34 namespace v8 { 34 namespace v8 {
35 namespace internal { 35 namespace internal {
36 36
37 // For contiguous spaces, top should be in the space (or at the end) and limit 37 // For contiguous spaces, top should be in the space (or at the end) and limit
38 // should be the end of the space. 38 // should be the end of the space.
39 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ 39 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
40 ASSERT((space).low() <= (info).top \ 40 ASSERT((space).low() <= (info).top \
41 && (info).top <= (space).high() \ 41 && (info).top <= (space).high() \
42 && (info).limit == (space).high()) 42 && (info).limit == (space).high())
43 43
44 intptr_t Page::watermark_invalidated_mark_ = Page::WATERMARK_INVALIDATED; 44 intptr_t Page::watermark_invalidated_mark_ = 1 << Page::WATERMARK_INVALIDATED;
45 45
46 // ---------------------------------------------------------------------------- 46 // ----------------------------------------------------------------------------
47 // HeapObjectIterator 47 // HeapObjectIterator
48 48
49 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { 49 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
50 Initialize(space->bottom(), space->top(), NULL); 50 Initialize(space->bottom(), space->top(), NULL);
51 } 51 }
52 52
53 53
54 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, 54 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after
259 free_list_.Free(); 259 free_list_.Free();
260 allocation_list_.Free(); 260 allocation_list_.Free();
261 } 261 }
262 262
263 263
264 // ----------------------------------------------------------------------------- 264 // -----------------------------------------------------------------------------
265 // MemoryAllocator 265 // MemoryAllocator
266 // 266 //
267 int MemoryAllocator::capacity_ = 0; 267 int MemoryAllocator::capacity_ = 0;
268 int MemoryAllocator::size_ = 0; 268 int MemoryAllocator::size_ = 0;
269 int MemoryAllocator::size_executable_ = 0;
269 270
270 VirtualMemory* MemoryAllocator::initial_chunk_ = NULL; 271 VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
271 272
272 // 270 is an estimate based on the static default heap size of a pair of 256K 273 // 270 is an estimate based on the static default heap size of a pair of 256K
273 // semispaces and a 64M old generation. 274 // semispaces and a 64M old generation.
274 const int kEstimatedNumberOfChunks = 270; 275 const int kEstimatedNumberOfChunks = 270;
275 List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_( 276 List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
276 kEstimatedNumberOfChunks); 277 kEstimatedNumberOfChunks);
277 List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks); 278 List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
278 int MemoryAllocator::max_nof_chunks_ = 0; 279 int MemoryAllocator::max_nof_chunks_ = 0;
279 int MemoryAllocator::top_ = 0; 280 int MemoryAllocator::top_ = 0;
280 281
281 282
282 void MemoryAllocator::Push(int free_chunk_id) { 283 void MemoryAllocator::Push(int free_chunk_id) {
283 ASSERT(max_nof_chunks_ > 0); 284 ASSERT(max_nof_chunks_ > 0);
284 ASSERT(top_ < max_nof_chunks_); 285 ASSERT(top_ < max_nof_chunks_);
285 free_chunk_ids_[top_++] = free_chunk_id; 286 free_chunk_ids_[top_++] = free_chunk_id;
286 } 287 }
287 288
288 289
289 int MemoryAllocator::Pop() { 290 int MemoryAllocator::Pop() {
290 ASSERT(top_ > 0); 291 ASSERT(top_ > 0);
291 return free_chunk_ids_[--top_]; 292 return free_chunk_ids_[--top_];
292 } 293 }
293 294
294 295
296 #if defined(V8_REPORT_EXECUTABLE_MEMORY_USAGE)
Søren Thygesen Gjesse 2010/08/18 08:20:01 I don't think we need this conditional compile unl
297 void *executable_memory_histogram = NULL;
298 #endif
295 bool MemoryAllocator::Setup(int capacity) { 299 bool MemoryAllocator::Setup(int capacity) {
296 capacity_ = RoundUp(capacity, Page::kPageSize); 300 capacity_ = RoundUp(capacity, Page::kPageSize);
297 301
298 // Over-estimate the size of chunks_ array. It assumes the expansion of old 302 // Over-estimate the size of chunks_ array. It assumes the expansion of old
299 // space is always in the unit of a chunk (kChunkSize) except the last 303 // space is always in the unit of a chunk (kChunkSize) except the last
300 // expansion. 304 // expansion.
301 // 305 //
302 // Due to alignment, allocated space might be one page less than required 306 // Due to alignment, allocated space might be one page less than required
303 // number (kPagesPerChunk) of pages for old spaces. 307 // number (kPagesPerChunk) of pages for old spaces.
304 // 308 //
305 // Reserve two chunk ids for semispaces, one for map space, one for old 309 // Reserve two chunk ids for semispaces, one for map space, one for old
306 // space, and one for code space. 310 // space, and one for code space.
307 max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5; 311 max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5;
308 if (max_nof_chunks_ > kMaxNofChunks) return false; 312 if (max_nof_chunks_ > kMaxNofChunks) return false;
309 313
310 size_ = 0; 314 size_ = 0;
315 size_executable_ = 0;
316 #if defined(V8_REPORT_EXECUTABLE_MEMORY_USAGE)
317 executable_memory_histogram =
318 StatsTable::CreateHistogram("V8.ExecutableMemoryMax", 0, MB * 512, 50);
319 #endif
311 ChunkInfo info; // uninitialized element. 320 ChunkInfo info; // uninitialized element.
312 for (int i = max_nof_chunks_ - 1; i >= 0; i--) { 321 for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
313 chunks_.Add(info); 322 chunks_.Add(info);
314 free_chunk_ids_.Add(i); 323 free_chunk_ids_.Add(i);
315 } 324 }
316 top_ = max_nof_chunks_; 325 top_ = max_nof_chunks_;
317 return true; 326 return true;
318 } 327 }
319 328
320 329
(...skipping 25 matching lines...) Expand all
346 return NULL; 355 return NULL;
347 } 356 }
348 void* mem; 357 void* mem;
349 if (executable == EXECUTABLE && CodeRange::exists()) { 358 if (executable == EXECUTABLE && CodeRange::exists()) {
350 mem = CodeRange::AllocateRawMemory(requested, allocated); 359 mem = CodeRange::AllocateRawMemory(requested, allocated);
351 } else { 360 } else {
352 mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE)); 361 mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE));
353 } 362 }
354 int alloced = static_cast<int>(*allocated); 363 int alloced = static_cast<int>(*allocated);
355 size_ += alloced; 364 size_ += alloced;
365
366 if (executable == EXECUTABLE) {
367 size_executable_ += alloced;
368 #if defined(V8_REPORT_EXECUTABLE_MEMORY_USAGE)
369 static int size_executable_max_observed_ = 0;
370 if (size_executable_max_observed_ < size_executable_) {
371 size_executable_max_observed_ = size_executable_;
372 StatsTable::AddHistogramSample(executable_memory_histogram,
373 size_executable_);
374 }
375 #endif
376 }
356 #ifdef DEBUG 377 #ifdef DEBUG
357 ZapBlock(reinterpret_cast<Address>(mem), alloced); 378 ZapBlock(reinterpret_cast<Address>(mem), alloced);
358 #endif 379 #endif
359 Counters::memory_allocated.Increment(alloced); 380 Counters::memory_allocated.Increment(alloced);
360 return mem; 381 return mem;
361 } 382 }
362 383
363 384
364 void MemoryAllocator::FreeRawMemory(void* mem, size_t length) { 385 void MemoryAllocator::FreeRawMemory(void* mem,
386 size_t length,
387 Executability executable) {
365 #ifdef DEBUG 388 #ifdef DEBUG
366 ZapBlock(reinterpret_cast<Address>(mem), length); 389 ZapBlock(reinterpret_cast<Address>(mem), length);
367 #endif 390 #endif
368 if (CodeRange::contains(static_cast<Address>(mem))) { 391 if (CodeRange::contains(static_cast<Address>(mem))) {
369 CodeRange::FreeRawMemory(mem, length); 392 CodeRange::FreeRawMemory(mem, length);
370 } else { 393 } else {
371 OS::Free(mem, length); 394 OS::Free(mem, length);
372 } 395 }
373 Counters::memory_allocated.Decrement(static_cast<int>(length)); 396 Counters::memory_allocated.Decrement(static_cast<int>(length));
374 size_ -= static_cast<int>(length); 397 size_ -= static_cast<int>(length);
398 if (executable == EXECUTABLE) size_executable_ -= length;
375 ASSERT(size_ >= 0); 399 ASSERT(size_ >= 0);
376 } 400 }
377 401
378 402
379 void* MemoryAllocator::ReserveInitialChunk(const size_t requested) { 403 void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
380 ASSERT(initial_chunk_ == NULL); 404 ASSERT(initial_chunk_ == NULL);
381 405
382 initial_chunk_ = new VirtualMemory(requested); 406 initial_chunk_ = new VirtualMemory(requested);
383 CHECK(initial_chunk_ != NULL); 407 CHECK(initial_chunk_ != NULL);
384 if (!initial_chunk_->IsReserved()) { 408 if (!initial_chunk_->IsReserved()) {
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
418 requested_pages = static_cast<int>(chunk_size >> kPageSizeBits); 442 requested_pages = static_cast<int>(chunk_size >> kPageSizeBits);
419 443
420 if (requested_pages <= 0) return Page::FromAddress(NULL); 444 if (requested_pages <= 0) return Page::FromAddress(NULL);
421 } 445 }
422 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable()); 446 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
423 if (chunk == NULL) return Page::FromAddress(NULL); 447 if (chunk == NULL) return Page::FromAddress(NULL);
424 LOG(NewEvent("PagedChunk", chunk, chunk_size)); 448 LOG(NewEvent("PagedChunk", chunk, chunk_size));
425 449
426 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size); 450 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
427 if (*allocated_pages == 0) { 451 if (*allocated_pages == 0) {
428 FreeRawMemory(chunk, chunk_size); 452 FreeRawMemory(chunk, chunk_size, owner->executable());
429 LOG(DeleteEvent("PagedChunk", chunk)); 453 LOG(DeleteEvent("PagedChunk", chunk));
430 return Page::FromAddress(NULL); 454 return Page::FromAddress(NULL);
431 } 455 }
432 456
433 int chunk_id = Pop(); 457 int chunk_id = Pop();
434 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner); 458 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
435 459
436 return InitializePagesInChunk(chunk_id, *allocated_pages, owner); 460 return InitializePagesInChunk(chunk_id, *allocated_pages, owner);
437 } 461 }
438 462
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after
584 // We cannot free a chunk contained in the initial chunk because it was not 608 // We cannot free a chunk contained in the initial chunk because it was not
585 // allocated with AllocateRawMemory. Instead we uncommit the virtual 609 // allocated with AllocateRawMemory. Instead we uncommit the virtual
586 // memory. 610 // memory.
587 if (InInitialChunk(c.address())) { 611 if (InInitialChunk(c.address())) {
588 // TODO(1240712): VirtualMemory::Uncommit has a return value which 612 // TODO(1240712): VirtualMemory::Uncommit has a return value which
589 // is ignored here. 613 // is ignored here.
590 initial_chunk_->Uncommit(c.address(), c.size()); 614 initial_chunk_->Uncommit(c.address(), c.size());
591 Counters::memory_allocated.Decrement(static_cast<int>(c.size())); 615 Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
592 } else { 616 } else {
593 LOG(DeleteEvent("PagedChunk", c.address())); 617 LOG(DeleteEvent("PagedChunk", c.address()));
594 FreeRawMemory(c.address(), c.size()); 618 FreeRawMemory(c.address(), c.size(), c.owner()->executable());
595 } 619 }
596 c.init(NULL, 0, NULL); 620 c.init(NULL, 0, NULL);
597 Push(chunk_id); 621 Push(chunk_id);
598 } 622 }
599 623
600 624
601 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) { 625 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
602 int chunk_id = GetChunkId(p); 626 int chunk_id = GetChunkId(p);
603 ASSERT(IsValidChunk(chunk_id)); 627 ASSERT(IsValidChunk(chunk_id));
604 628
(...skipping 1940 matching lines...) Expand 10 before | Expand all | Expand 10 after
2545 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes, 2569 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
2546 size_t* chunk_size, 2570 size_t* chunk_size,
2547 Executability executable) { 2571 Executability executable) {
2548 size_t requested = ChunkSizeFor(size_in_bytes); 2572 size_t requested = ChunkSizeFor(size_in_bytes);
2549 void* mem = MemoryAllocator::AllocateRawMemory(requested, 2573 void* mem = MemoryAllocator::AllocateRawMemory(requested,
2550 chunk_size, 2574 chunk_size,
2551 executable); 2575 executable);
2552 if (mem == NULL) return NULL; 2576 if (mem == NULL) return NULL;
2553 LOG(NewEvent("LargeObjectChunk", mem, *chunk_size)); 2577 LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
2554 if (*chunk_size < requested) { 2578 if (*chunk_size < requested) {
2555 MemoryAllocator::FreeRawMemory(mem, *chunk_size); 2579 MemoryAllocator::FreeRawMemory(mem, *chunk_size, executable);
2556 LOG(DeleteEvent("LargeObjectChunk", mem)); 2580 LOG(DeleteEvent("LargeObjectChunk", mem));
2557 return NULL; 2581 return NULL;
2558 } 2582 }
2559 return reinterpret_cast<LargeObjectChunk*>(mem); 2583 return reinterpret_cast<LargeObjectChunk*>(mem);
2560 } 2584 }
2561 2585
2562 2586
2563 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) { 2587 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
2564 int os_alignment = static_cast<int>(OS::AllocateAlignment()); 2588 int os_alignment = static_cast<int>(OS::AllocateAlignment());
2565 if (os_alignment < Page::kPageSize) 2589 if (os_alignment < Page::kPageSize)
(...skipping 17 matching lines...) Expand all
2583 page_count_ = 0; 2607 page_count_ = 0;
2584 return true; 2608 return true;
2585 } 2609 }
2586 2610
2587 2611
2588 void LargeObjectSpace::TearDown() { 2612 void LargeObjectSpace::TearDown() {
2589 while (first_chunk_ != NULL) { 2613 while (first_chunk_ != NULL) {
2590 LargeObjectChunk* chunk = first_chunk_; 2614 LargeObjectChunk* chunk = first_chunk_;
2591 first_chunk_ = first_chunk_->next(); 2615 first_chunk_ = first_chunk_->next();
2592 LOG(DeleteEvent("LargeObjectChunk", chunk->address())); 2616 LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
2593 MemoryAllocator::FreeRawMemory(chunk->address(), chunk->size()); 2617 Page* page = Page::FromAddress(chunk->address());
Søren Thygesen Gjesse 2010/08/18 09:36:27 This line should be: Page* page = Page::FromAddre
2618 Executability executable =
2619 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
2620 MemoryAllocator::FreeRawMemory(chunk->address(),
2621 chunk->size(),
2622 executable);
2594 } 2623 }
2595 2624
2596 size_ = 0; 2625 size_ = 0;
2597 page_count_ = 0; 2626 page_count_ = 0;
2598 } 2627 }
2599 2628
2600 2629
2601 #ifdef ENABLE_HEAP_PROTECTION 2630 #ifdef ENABLE_HEAP_PROTECTION
2602 2631
2603 void LargeObjectSpace::Protect() { 2632 void LargeObjectSpace::Protect() {
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
2647 first_chunk_ = chunk; 2676 first_chunk_ = chunk;
2648 2677
2649 // Initialize page header. 2678 // Initialize page header.
2650 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); 2679 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2651 Address object_address = page->ObjectAreaStart(); 2680 Address object_address = page->ObjectAreaStart();
2652 // Clear the low order bit of the second word in the page to flag it as a 2681 // Clear the low order bit of the second word in the page to flag it as a
2653 // large object page. If the chunk_size happened to be written there, its 2682 // large object page. If the chunk_size happened to be written there, its
2654 // low order bit should already be clear. 2683 // low order bit should already be clear.
2655 ASSERT((chunk_size & 0x1) == 0); 2684 ASSERT((chunk_size & 0x1) == 0);
2656 page->SetIsLargeObjectPage(true); 2685 page->SetIsLargeObjectPage(true);
2686 page->SetIsPageExecutable(executable);
2657 page->SetRegionMarks(Page::kAllRegionsCleanMarks); 2687 page->SetRegionMarks(Page::kAllRegionsCleanMarks);
2658 return HeapObject::FromAddress(object_address); 2688 return HeapObject::FromAddress(object_address);
2659 } 2689 }
2660 2690
2661 2691
2662 Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) { 2692 Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
2663 ASSERT(0 < size_in_bytes); 2693 ASSERT(0 < size_in_bytes);
2664 return AllocateRawInternal(size_in_bytes, 2694 return AllocateRawInternal(size_in_bytes,
2665 size_in_bytes, 2695 size_in_bytes,
2666 EXECUTABLE); 2696 EXECUTABLE);
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
2761 LargeObjectChunk* previous = NULL; 2791 LargeObjectChunk* previous = NULL;
2762 LargeObjectChunk* current = first_chunk_; 2792 LargeObjectChunk* current = first_chunk_;
2763 while (current != NULL) { 2793 while (current != NULL) {
2764 HeapObject* object = current->GetObject(); 2794 HeapObject* object = current->GetObject();
2765 if (object->IsMarked()) { 2795 if (object->IsMarked()) {
2766 object->ClearMark(); 2796 object->ClearMark();
2767 MarkCompactCollector::tracer()->decrement_marked_count(); 2797 MarkCompactCollector::tracer()->decrement_marked_count();
2768 previous = current; 2798 previous = current;
2769 current = current->next(); 2799 current = current->next();
2770 } else { 2800 } else {
2801 Page* page = Page::FromAddress(RoundUp(current->address(),
2802 Page::kPageSize));
2803 Executability executable =
2804 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
2771 Address chunk_address = current->address(); 2805 Address chunk_address = current->address();
2772 size_t chunk_size = current->size(); 2806 size_t chunk_size = current->size();
2773 2807
2774 // Cut the chunk out from the chunk list. 2808 // Cut the chunk out from the chunk list.
2775 current = current->next(); 2809 current = current->next();
2776 if (previous == NULL) { 2810 if (previous == NULL) {
2777 first_chunk_ = current; 2811 first_chunk_ = current;
2778 } else { 2812 } else {
2779 previous->set_next(current); 2813 previous->set_next(current);
2780 } 2814 }
2781 2815
2782 // Free the chunk. 2816 // Free the chunk.
2783 MarkCompactCollector::ReportDeleteIfNeeded(object); 2817 MarkCompactCollector::ReportDeleteIfNeeded(object);
2784 size_ -= static_cast<int>(chunk_size); 2818 size_ -= static_cast<int>(chunk_size);
2785 page_count_--; 2819 page_count_--;
2786 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size); 2820 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size, executable);
2787 LOG(DeleteEvent("LargeObjectChunk", chunk_address)); 2821 LOG(DeleteEvent("LargeObjectChunk", chunk_address));
2788 } 2822 }
2789 } 2823 }
2790 } 2824 }
2791 2825
2792 2826
2793 bool LargeObjectSpace::Contains(HeapObject* object) { 2827 bool LargeObjectSpace::Contains(HeapObject* object) {
2794 Address address = object->address(); 2828 Address address = object->address();
2795 if (Heap::new_space()->Contains(address)) { 2829 if (Heap::new_space()->Contains(address)) {
2796 return false; 2830 return false;
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
2892 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { 2926 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
2893 if (obj->IsCode()) { 2927 if (obj->IsCode()) {
2894 Code* code = Code::cast(obj); 2928 Code* code = Code::cast(obj);
2895 code_kind_statistics[code->kind()] += code->Size(); 2929 code_kind_statistics[code->kind()] += code->Size();
2896 } 2930 }
2897 } 2931 }
2898 } 2932 }
2899 #endif // DEBUG 2933 #endif // DEBUG
2900 2934
2901 } } // namespace v8::internal 2935 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698