Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(48)

Side by Side Diff: src/spaces.cc

Issue 3197010: Version 2.3.10... (Closed) Base URL: http://v8.googlecode.com/svn/trunk/
Patch Set: Created 10 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 23 matching lines...) Expand all
34 namespace v8 { 34 namespace v8 {
35 namespace internal { 35 namespace internal {
36 36
37 // For contiguous spaces, top should be in the space (or at the end) and limit 37 // For contiguous spaces, top should be in the space (or at the end) and limit
38 // should be the end of the space. 38 // should be the end of the space.
39 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \ 39 #define ASSERT_SEMISPACE_ALLOCATION_INFO(info, space) \
40 ASSERT((space).low() <= (info).top \ 40 ASSERT((space).low() <= (info).top \
41 && (info).top <= (space).high() \ 41 && (info).top <= (space).high() \
42 && (info).limit == (space).high()) 42 && (info).limit == (space).high())
43 43
44 intptr_t Page::watermark_invalidated_mark_ = Page::WATERMARK_INVALIDATED; 44 intptr_t Page::watermark_invalidated_mark_ = 1 << Page::WATERMARK_INVALIDATED;
45 45
46 // ---------------------------------------------------------------------------- 46 // ----------------------------------------------------------------------------
47 // HeapObjectIterator 47 // HeapObjectIterator
48 48
49 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) { 49 HeapObjectIterator::HeapObjectIterator(PagedSpace* space) {
50 Initialize(space->bottom(), space->top(), NULL); 50 Initialize(space->bottom(), space->top(), NULL);
51 } 51 }
52 52
53 53
54 HeapObjectIterator::HeapObjectIterator(PagedSpace* space, 54 HeapObjectIterator::HeapObjectIterator(PagedSpace* space,
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after
259 free_list_.Free(); 259 free_list_.Free();
260 allocation_list_.Free(); 260 allocation_list_.Free();
261 } 261 }
262 262
263 263
264 // ----------------------------------------------------------------------------- 264 // -----------------------------------------------------------------------------
265 // MemoryAllocator 265 // MemoryAllocator
266 // 266 //
267 int MemoryAllocator::capacity_ = 0; 267 int MemoryAllocator::capacity_ = 0;
268 int MemoryAllocator::size_ = 0; 268 int MemoryAllocator::size_ = 0;
269 int MemoryAllocator::size_executable_ = 0;
269 270
270 VirtualMemory* MemoryAllocator::initial_chunk_ = NULL; 271 VirtualMemory* MemoryAllocator::initial_chunk_ = NULL;
271 272
272 // 270 is an estimate based on the static default heap size of a pair of 256K 273 // 270 is an estimate based on the static default heap size of a pair of 256K
273 // semispaces and a 64M old generation. 274 // semispaces and a 64M old generation.
274 const int kEstimatedNumberOfChunks = 270; 275 const int kEstimatedNumberOfChunks = 270;
275 List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_( 276 List<MemoryAllocator::ChunkInfo> MemoryAllocator::chunks_(
276 kEstimatedNumberOfChunks); 277 kEstimatedNumberOfChunks);
277 List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks); 278 List<int> MemoryAllocator::free_chunk_ids_(kEstimatedNumberOfChunks);
278 int MemoryAllocator::max_nof_chunks_ = 0; 279 int MemoryAllocator::max_nof_chunks_ = 0;
279 int MemoryAllocator::top_ = 0; 280 int MemoryAllocator::top_ = 0;
280 281
281 282
282 void MemoryAllocator::Push(int free_chunk_id) { 283 void MemoryAllocator::Push(int free_chunk_id) {
283 ASSERT(max_nof_chunks_ > 0); 284 ASSERT(max_nof_chunks_ > 0);
284 ASSERT(top_ < max_nof_chunks_); 285 ASSERT(top_ < max_nof_chunks_);
285 free_chunk_ids_[top_++] = free_chunk_id; 286 free_chunk_ids_[top_++] = free_chunk_id;
286 } 287 }
287 288
288 289
289 int MemoryAllocator::Pop() { 290 int MemoryAllocator::Pop() {
290 ASSERT(top_ > 0); 291 ASSERT(top_ > 0);
291 return free_chunk_ids_[--top_]; 292 return free_chunk_ids_[--top_];
292 } 293 }
293 294
294 295
296 void *executable_memory_histogram = NULL;
297
295 bool MemoryAllocator::Setup(int capacity) { 298 bool MemoryAllocator::Setup(int capacity) {
296 capacity_ = RoundUp(capacity, Page::kPageSize); 299 capacity_ = RoundUp(capacity, Page::kPageSize);
297 300
298 // Over-estimate the size of chunks_ array. It assumes the expansion of old 301 // Over-estimate the size of chunks_ array. It assumes the expansion of old
299 // space is always in the unit of a chunk (kChunkSize) except the last 302 // space is always in the unit of a chunk (kChunkSize) except the last
300 // expansion. 303 // expansion.
301 // 304 //
302 // Due to alignment, allocated space might be one page less than required 305 // Due to alignment, allocated space might be one page less than required
303 // number (kPagesPerChunk) of pages for old spaces. 306 // number (kPagesPerChunk) of pages for old spaces.
304 // 307 //
305 // Reserve two chunk ids for semispaces, one for map space, one for old 308 // Reserve two chunk ids for semispaces, one for map space, one for old
306 // space, and one for code space. 309 // space, and one for code space.
307 max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5; 310 max_nof_chunks_ = (capacity_ / (kChunkSize - Page::kPageSize)) + 5;
308 if (max_nof_chunks_ > kMaxNofChunks) return false; 311 if (max_nof_chunks_ > kMaxNofChunks) return false;
309 312
310 size_ = 0; 313 size_ = 0;
314 size_executable_ = 0;
315 executable_memory_histogram =
316 StatsTable::CreateHistogram("V8.ExecutableMemoryMax", 0, MB * 512, 50);
311 ChunkInfo info; // uninitialized element. 317 ChunkInfo info; // uninitialized element.
312 for (int i = max_nof_chunks_ - 1; i >= 0; i--) { 318 for (int i = max_nof_chunks_ - 1; i >= 0; i--) {
313 chunks_.Add(info); 319 chunks_.Add(info);
314 free_chunk_ids_.Add(i); 320 free_chunk_ids_.Add(i);
315 } 321 }
316 top_ = max_nof_chunks_; 322 top_ = max_nof_chunks_;
317 return true; 323 return true;
318 } 324 }
319 325
320 326
(...skipping 25 matching lines...) Expand all
346 return NULL; 352 return NULL;
347 } 353 }
348 void* mem; 354 void* mem;
349 if (executable == EXECUTABLE && CodeRange::exists()) { 355 if (executable == EXECUTABLE && CodeRange::exists()) {
350 mem = CodeRange::AllocateRawMemory(requested, allocated); 356 mem = CodeRange::AllocateRawMemory(requested, allocated);
351 } else { 357 } else {
352 mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE)); 358 mem = OS::Allocate(requested, allocated, (executable == EXECUTABLE));
353 } 359 }
354 int alloced = static_cast<int>(*allocated); 360 int alloced = static_cast<int>(*allocated);
355 size_ += alloced; 361 size_ += alloced;
362
363 if (executable == EXECUTABLE) {
364 size_executable_ += alloced;
365 static int size_executable_max_observed_ = 0;
366 if (size_executable_max_observed_ < size_executable_) {
367 size_executable_max_observed_ = size_executable_;
368 StatsTable::AddHistogramSample(executable_memory_histogram,
369 size_executable_);
370 }
371 }
356 #ifdef DEBUG 372 #ifdef DEBUG
357 ZapBlock(reinterpret_cast<Address>(mem), alloced); 373 ZapBlock(reinterpret_cast<Address>(mem), alloced);
358 #endif 374 #endif
359 Counters::memory_allocated.Increment(alloced); 375 Counters::memory_allocated.Increment(alloced);
360 return mem; 376 return mem;
361 } 377 }
362 378
363 379
364 void MemoryAllocator::FreeRawMemory(void* mem, size_t length) { 380 void MemoryAllocator::FreeRawMemory(void* mem,
381 size_t length,
382 Executability executable) {
365 #ifdef DEBUG 383 #ifdef DEBUG
366 ZapBlock(reinterpret_cast<Address>(mem), length); 384 ZapBlock(reinterpret_cast<Address>(mem), length);
367 #endif 385 #endif
368 if (CodeRange::contains(static_cast<Address>(mem))) { 386 if (CodeRange::contains(static_cast<Address>(mem))) {
369 CodeRange::FreeRawMemory(mem, length); 387 CodeRange::FreeRawMemory(mem, length);
370 } else { 388 } else {
371 OS::Free(mem, length); 389 OS::Free(mem, length);
372 } 390 }
373 Counters::memory_allocated.Decrement(static_cast<int>(length)); 391 Counters::memory_allocated.Decrement(static_cast<int>(length));
374 size_ -= static_cast<int>(length); 392 size_ -= static_cast<int>(length);
393 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
375 ASSERT(size_ >= 0); 394 ASSERT(size_ >= 0);
376 } 395 }
377 396
378 397
379 void* MemoryAllocator::ReserveInitialChunk(const size_t requested) { 398 void* MemoryAllocator::ReserveInitialChunk(const size_t requested) {
380 ASSERT(initial_chunk_ == NULL); 399 ASSERT(initial_chunk_ == NULL);
381 400
382 initial_chunk_ = new VirtualMemory(requested); 401 initial_chunk_ = new VirtualMemory(requested);
383 CHECK(initial_chunk_ != NULL); 402 CHECK(initial_chunk_ != NULL);
384 if (!initial_chunk_->IsReserved()) { 403 if (!initial_chunk_->IsReserved()) {
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
418 requested_pages = static_cast<int>(chunk_size >> kPageSizeBits); 437 requested_pages = static_cast<int>(chunk_size >> kPageSizeBits);
419 438
420 if (requested_pages <= 0) return Page::FromAddress(NULL); 439 if (requested_pages <= 0) return Page::FromAddress(NULL);
421 } 440 }
422 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable()); 441 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
423 if (chunk == NULL) return Page::FromAddress(NULL); 442 if (chunk == NULL) return Page::FromAddress(NULL);
424 LOG(NewEvent("PagedChunk", chunk, chunk_size)); 443 LOG(NewEvent("PagedChunk", chunk, chunk_size));
425 444
426 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size); 445 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
427 if (*allocated_pages == 0) { 446 if (*allocated_pages == 0) {
428 FreeRawMemory(chunk, chunk_size); 447 FreeRawMemory(chunk, chunk_size, owner->executable());
429 LOG(DeleteEvent("PagedChunk", chunk)); 448 LOG(DeleteEvent("PagedChunk", chunk));
430 return Page::FromAddress(NULL); 449 return Page::FromAddress(NULL);
431 } 450 }
432 451
433 int chunk_id = Pop(); 452 int chunk_id = Pop();
434 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner); 453 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
435 454
436 return InitializePagesInChunk(chunk_id, *allocated_pages, owner); 455 return InitializePagesInChunk(chunk_id, *allocated_pages, owner);
437 } 456 }
438 457
(...skipping 145 matching lines...) Expand 10 before | Expand all | Expand 10 after
584 // We cannot free a chunk contained in the initial chunk because it was not 603 // We cannot free a chunk contained in the initial chunk because it was not
585 // allocated with AllocateRawMemory. Instead we uncommit the virtual 604 // allocated with AllocateRawMemory. Instead we uncommit the virtual
586 // memory. 605 // memory.
587 if (InInitialChunk(c.address())) { 606 if (InInitialChunk(c.address())) {
588 // TODO(1240712): VirtualMemory::Uncommit has a return value which 607 // TODO(1240712): VirtualMemory::Uncommit has a return value which
589 // is ignored here. 608 // is ignored here.
590 initial_chunk_->Uncommit(c.address(), c.size()); 609 initial_chunk_->Uncommit(c.address(), c.size());
591 Counters::memory_allocated.Decrement(static_cast<int>(c.size())); 610 Counters::memory_allocated.Decrement(static_cast<int>(c.size()));
592 } else { 611 } else {
593 LOG(DeleteEvent("PagedChunk", c.address())); 612 LOG(DeleteEvent("PagedChunk", c.address()));
594 FreeRawMemory(c.address(), c.size()); 613 FreeRawMemory(c.address(), c.size(), c.owner()->executable());
595 } 614 }
596 c.init(NULL, 0, NULL); 615 c.init(NULL, 0, NULL);
597 Push(chunk_id); 616 Push(chunk_id);
598 } 617 }
599 618
600 619
601 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) { 620 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
602 int chunk_id = GetChunkId(p); 621 int chunk_id = GetChunkId(p);
603 ASSERT(IsValidChunk(chunk_id)); 622 ASSERT(IsValidChunk(chunk_id));
604 623
(...skipping 1940 matching lines...) Expand 10 before | Expand all | Expand 10 after
2545 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes, 2564 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
2546 size_t* chunk_size, 2565 size_t* chunk_size,
2547 Executability executable) { 2566 Executability executable) {
2548 size_t requested = ChunkSizeFor(size_in_bytes); 2567 size_t requested = ChunkSizeFor(size_in_bytes);
2549 void* mem = MemoryAllocator::AllocateRawMemory(requested, 2568 void* mem = MemoryAllocator::AllocateRawMemory(requested,
2550 chunk_size, 2569 chunk_size,
2551 executable); 2570 executable);
2552 if (mem == NULL) return NULL; 2571 if (mem == NULL) return NULL;
2553 LOG(NewEvent("LargeObjectChunk", mem, *chunk_size)); 2572 LOG(NewEvent("LargeObjectChunk", mem, *chunk_size));
2554 if (*chunk_size < requested) { 2573 if (*chunk_size < requested) {
2555 MemoryAllocator::FreeRawMemory(mem, *chunk_size); 2574 MemoryAllocator::FreeRawMemory(mem, *chunk_size, executable);
2556 LOG(DeleteEvent("LargeObjectChunk", mem)); 2575 LOG(DeleteEvent("LargeObjectChunk", mem));
2557 return NULL; 2576 return NULL;
2558 } 2577 }
2559 return reinterpret_cast<LargeObjectChunk*>(mem); 2578 return reinterpret_cast<LargeObjectChunk*>(mem);
2560 } 2579 }
2561 2580
2562 2581
2563 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) { 2582 int LargeObjectChunk::ChunkSizeFor(int size_in_bytes) {
2564 int os_alignment = static_cast<int>(OS::AllocateAlignment()); 2583 int os_alignment = static_cast<int>(OS::AllocateAlignment());
2565 if (os_alignment < Page::kPageSize) 2584 if (os_alignment < Page::kPageSize)
(...skipping 17 matching lines...) Expand all
2583 page_count_ = 0; 2602 page_count_ = 0;
2584 return true; 2603 return true;
2585 } 2604 }
2586 2605
2587 2606
2588 void LargeObjectSpace::TearDown() { 2607 void LargeObjectSpace::TearDown() {
2589 while (first_chunk_ != NULL) { 2608 while (first_chunk_ != NULL) {
2590 LargeObjectChunk* chunk = first_chunk_; 2609 LargeObjectChunk* chunk = first_chunk_;
2591 first_chunk_ = first_chunk_->next(); 2610 first_chunk_ = first_chunk_->next();
2592 LOG(DeleteEvent("LargeObjectChunk", chunk->address())); 2611 LOG(DeleteEvent("LargeObjectChunk", chunk->address()));
2593 MemoryAllocator::FreeRawMemory(chunk->address(), chunk->size()); 2612 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2613 Executability executable =
2614 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
2615 MemoryAllocator::FreeRawMemory(chunk->address(),
2616 chunk->size(),
2617 executable);
2594 } 2618 }
2595 2619
2596 size_ = 0; 2620 size_ = 0;
2597 page_count_ = 0; 2621 page_count_ = 0;
2598 } 2622 }
2599 2623
2600 2624
2601 #ifdef ENABLE_HEAP_PROTECTION 2625 #ifdef ENABLE_HEAP_PROTECTION
2602 2626
2603 void LargeObjectSpace::Protect() { 2627 void LargeObjectSpace::Protect() {
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
2647 first_chunk_ = chunk; 2671 first_chunk_ = chunk;
2648 2672
2649 // Initialize page header. 2673 // Initialize page header.
2650 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); 2674 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2651 Address object_address = page->ObjectAreaStart(); 2675 Address object_address = page->ObjectAreaStart();
2652 // Clear the low order bit of the second word in the page to flag it as a 2676 // Clear the low order bit of the second word in the page to flag it as a
2653 // large object page. If the chunk_size happened to be written there, its 2677 // large object page. If the chunk_size happened to be written there, its
2654 // low order bit should already be clear. 2678 // low order bit should already be clear.
2655 ASSERT((chunk_size & 0x1) == 0); 2679 ASSERT((chunk_size & 0x1) == 0);
2656 page->SetIsLargeObjectPage(true); 2680 page->SetIsLargeObjectPage(true);
2681 page->SetIsPageExecutable(executable);
2657 page->SetRegionMarks(Page::kAllRegionsCleanMarks); 2682 page->SetRegionMarks(Page::kAllRegionsCleanMarks);
2658 return HeapObject::FromAddress(object_address); 2683 return HeapObject::FromAddress(object_address);
2659 } 2684 }
2660 2685
2661 2686
2662 Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) { 2687 Object* LargeObjectSpace::AllocateRawCode(int size_in_bytes) {
2663 ASSERT(0 < size_in_bytes); 2688 ASSERT(0 < size_in_bytes);
2664 return AllocateRawInternal(size_in_bytes, 2689 return AllocateRawInternal(size_in_bytes,
2665 size_in_bytes, 2690 size_in_bytes,
2666 EXECUTABLE); 2691 EXECUTABLE);
(...skipping 94 matching lines...) Expand 10 before | Expand all | Expand 10 after
2761 LargeObjectChunk* previous = NULL; 2786 LargeObjectChunk* previous = NULL;
2762 LargeObjectChunk* current = first_chunk_; 2787 LargeObjectChunk* current = first_chunk_;
2763 while (current != NULL) { 2788 while (current != NULL) {
2764 HeapObject* object = current->GetObject(); 2789 HeapObject* object = current->GetObject();
2765 if (object->IsMarked()) { 2790 if (object->IsMarked()) {
2766 object->ClearMark(); 2791 object->ClearMark();
2767 MarkCompactCollector::tracer()->decrement_marked_count(); 2792 MarkCompactCollector::tracer()->decrement_marked_count();
2768 previous = current; 2793 previous = current;
2769 current = current->next(); 2794 current = current->next();
2770 } else { 2795 } else {
2796 Page* page = Page::FromAddress(RoundUp(current->address(),
2797 Page::kPageSize));
2798 Executability executable =
2799 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
2771 Address chunk_address = current->address(); 2800 Address chunk_address = current->address();
2772 size_t chunk_size = current->size(); 2801 size_t chunk_size = current->size();
2773 2802
2774 // Cut the chunk out from the chunk list. 2803 // Cut the chunk out from the chunk list.
2775 current = current->next(); 2804 current = current->next();
2776 if (previous == NULL) { 2805 if (previous == NULL) {
2777 first_chunk_ = current; 2806 first_chunk_ = current;
2778 } else { 2807 } else {
2779 previous->set_next(current); 2808 previous->set_next(current);
2780 } 2809 }
2781 2810
2782 // Free the chunk. 2811 // Free the chunk.
2783 MarkCompactCollector::ReportDeleteIfNeeded(object); 2812 MarkCompactCollector::ReportDeleteIfNeeded(object);
2784 size_ -= static_cast<int>(chunk_size); 2813 size_ -= static_cast<int>(chunk_size);
2785 page_count_--; 2814 page_count_--;
2786 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size); 2815 MemoryAllocator::FreeRawMemory(chunk_address, chunk_size, executable);
2787 LOG(DeleteEvent("LargeObjectChunk", chunk_address)); 2816 LOG(DeleteEvent("LargeObjectChunk", chunk_address));
2788 } 2817 }
2789 } 2818 }
2790 } 2819 }
2791 2820
2792 2821
2793 bool LargeObjectSpace::Contains(HeapObject* object) { 2822 bool LargeObjectSpace::Contains(HeapObject* object) {
2794 Address address = object->address(); 2823 Address address = object->address();
2795 if (Heap::new_space()->Contains(address)) { 2824 if (Heap::new_space()->Contains(address)) {
2796 return false; 2825 return false;
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
2892 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { 2921 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
2893 if (obj->IsCode()) { 2922 if (obj->IsCode()) {
2894 Code* code = Code::cast(obj); 2923 Code* code = Code::cast(obj);
2895 code_kind_statistics[code->kind()] += code->Size(); 2924 code_kind_statistics[code->kind()] += code->Size();
2896 } 2925 }
2897 } 2926 }
2898 } 2927 }
2899 #endif // DEBUG 2928 #endif // DEBUG
2900 2929
2901 } } // namespace v8::internal 2930 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | src/spaces-inl.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698