Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(75)

Side by Side Diff: src/spaces.cc

Issue 7379004: Add guard pages in front of platform allocations (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 384 matching lines...) Expand 10 before | Expand all | Expand 10 after
395 #endif 395 #endif
396 isolate_->counters()->memory_allocated()->Increment(alloced); 396 isolate_->counters()->memory_allocated()->Increment(alloced);
397 return mem; 397 return mem;
398 } 398 }
399 399
400 400
401 void MemoryAllocator::FreeRawMemory(void* mem, 401 void MemoryAllocator::FreeRawMemory(void* mem,
402 size_t length, 402 size_t length,
403 Executability executable) { 403 Executability executable) {
404 #ifdef DEBUG 404 #ifdef DEBUG
405 ZapBlock(reinterpret_cast<Address>(mem), length); 405 // Do not try to zap the guard page.
406 size_t guardsize = (executable == EXECUTABLE) ? Page::kPageSize : 0;
407 ZapBlock(reinterpret_cast<Address>(mem) + guardsize, length - guardsize);
406 #endif 408 #endif
407 if (isolate_->code_range()->contains(static_cast<Address>(mem))) { 409 if (isolate_->code_range()->contains(static_cast<Address>(mem))) {
408 isolate_->code_range()->FreeRawMemory(mem, length); 410 isolate_->code_range()->FreeRawMemory(mem, length);
409 } else { 411 } else {
410 OS::Free(mem, length); 412 OS::Free(mem, length);
411 } 413 }
412 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(length)); 414 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(length));
413 size_ -= static_cast<int>(length); 415 size_ -= static_cast<int>(length);
414 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length); 416 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
415 417
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
496 Page* MemoryAllocator::AllocatePages(int requested_pages, 498 Page* MemoryAllocator::AllocatePages(int requested_pages,
497 int* allocated_pages, 499 int* allocated_pages,
498 PagedSpace* owner) { 500 PagedSpace* owner) {
499 if (requested_pages <= 0) return Page::FromAddress(NULL); 501 if (requested_pages <= 0) return Page::FromAddress(NULL);
500 size_t chunk_size = requested_pages * Page::kPageSize; 502 size_t chunk_size = requested_pages * Page::kPageSize;
501 503
502 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable()); 504 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
503 if (chunk == NULL) return Page::FromAddress(NULL); 505 if (chunk == NULL) return Page::FromAddress(NULL);
504 LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size)); 506 LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
505 507
508 size_t guardsize = (owner->executable() == EXECUTABLE) ? Page::kPageSize : 0;
509
506 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size); 510 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
507 // We may 'lose' a page due to alignment. 511 // We may 'lose' a page due to alignment or for a guard page.
508 ASSERT(*allocated_pages >= kPagesPerChunk - 1); 512 ASSERT(*allocated_pages >=
509 if (*allocated_pages == 0) { 513 kPagesPerChunk - ((guardsize ? 1 : 0) + 1));
Mads Ager (chromium) 2011/07/20 07:24:16 Now that you have move the fiddling with the chunk
510 FreeRawMemory(chunk, chunk_size, owner->executable()); 514 if (*allocated_pages <= (guardsize ? 1 : 0)) {
515 FreeRawMemory(chunk,
516 chunk_size,
517 owner->executable());
511 LOG(isolate_, DeleteEvent("PagedChunk", chunk)); 518 LOG(isolate_, DeleteEvent("PagedChunk", chunk));
512 return Page::FromAddress(NULL); 519 return Page::FromAddress(NULL);
513 } 520 }
514 521
522 if (guardsize != 0) {
523 OS::Guard(chunk, guardsize);
524 chunk_size -= guardsize;
525 chunk = static_cast<Address>(chunk) + guardsize;
526 --*allocated_pages;
527 }
528
515 int chunk_id = Pop(); 529 int chunk_id = Pop();
516 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner); 530 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
517 531
518 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); 532 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
519 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); 533 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
520 Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner); 534 Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
521 535
522 return new_pages; 536 return new_pages;
523 } 537 }
524 538
(...skipping 149 matching lines...) Expand 10 before | Expand all | Expand 10 after
674 if (InInitialChunk(c.address())) { 688 if (InInitialChunk(c.address())) {
675 // TODO(1240712): VirtualMemory::Uncommit has a return value which 689 // TODO(1240712): VirtualMemory::Uncommit has a return value which
676 // is ignored here. 690 // is ignored here.
677 initial_chunk_->Uncommit(c.address(), c.size()); 691 initial_chunk_->Uncommit(c.address(), c.size());
678 Counters* counters = isolate_->counters(); 692 Counters* counters = isolate_->counters();
679 counters->memory_allocated()->Decrement(static_cast<int>(c.size())); 693 counters->memory_allocated()->Decrement(static_cast<int>(c.size()));
680 } else { 694 } else {
681 LOG(isolate_, DeleteEvent("PagedChunk", c.address())); 695 LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
682 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity()); 696 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
683 size_t size = c.size(); 697 size_t size = c.size();
684 FreeRawMemory(c.address(), size, c.executable()); 698 size_t guardsize = (c.executable() == EXECUTABLE) ? Page::kPageSize : 0;
699 FreeRawMemory(c.address() - guardsize, size + guardsize, c.executable());
685 PerformAllocationCallback(space, kAllocationActionFree, size); 700 PerformAllocationCallback(space, kAllocationActionFree, size);
686 } 701 }
687 c.init(NULL, 0, NULL); 702 c.init(NULL, 0, NULL);
688 Push(chunk_id); 703 Push(chunk_id);
689 } 704 }
690 705
691 706
692 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) { 707 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
693 int chunk_id = GetChunkId(p); 708 int chunk_id = GetChunkId(p);
694 ASSERT(IsValidChunk(chunk_id)); 709 ASSERT(IsValidChunk(chunk_id));
(...skipping 1970 matching lines...) Expand 10 before | Expand all | Expand 10 after
2665 } 2680 }
2666 2681
2667 2682
2668 // ----------------------------------------------------------------------------- 2683 // -----------------------------------------------------------------------------
2669 // LargeObjectChunk 2684 // LargeObjectChunk
2670 2685
2671 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes, 2686 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
2672 Executability executable) { 2687 Executability executable) {
2673 size_t requested = ChunkSizeFor(size_in_bytes); 2688 size_t requested = ChunkSizeFor(size_in_bytes);
2674 size_t size; 2689 size_t size;
2690 size_t guardsize = (executable == EXECUTABLE) ? Page::kPageSize : 0;
2675 Isolate* isolate = Isolate::Current(); 2691 Isolate* isolate = Isolate::Current();
2676 void* mem = isolate->memory_allocator()->AllocateRawMemory( 2692 void* mem = isolate->memory_allocator()->AllocateRawMemory(
2677 requested, &size, executable); 2693 requested + guardsize, &size, executable);
2678 if (mem == NULL) return NULL; 2694 if (mem == NULL) return NULL;
2679 2695
2680 // The start of the chunk may be overlayed with a page so we have to 2696 // The start of the chunk may be overlayed with a page so we have to
2681 // make sure that the page flags fit in the size field. 2697 // make sure that the page flags fit in the size field.
2682 ASSERT((size & Page::kPageFlagMask) == 0); 2698 ASSERT((size & Page::kPageFlagMask) == 0);
2683 2699
2684 LOG(isolate, NewEvent("LargeObjectChunk", mem, size)); 2700 LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
2685 if (size < requested) { 2701 if (size < requested + guardsize) {
2686 isolate->memory_allocator()->FreeRawMemory( 2702 isolate->memory_allocator()->FreeRawMemory(
2687 mem, size, executable); 2703 mem, size, executable);
2688 LOG(isolate, DeleteEvent("LargeObjectChunk", mem)); 2704 LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
2689 return NULL; 2705 return NULL;
2690 } 2706 }
2691 2707
2708 if (guardsize != 0) {
2709 OS::Guard(mem, guardsize);
2710 size -= guardsize;
2711 mem = static_cast<Address>(mem) + guardsize;
2712 }
2713
2692 ObjectSpace space = (executable == EXECUTABLE) 2714 ObjectSpace space = (executable == EXECUTABLE)
2693 ? kObjectSpaceCodeSpace 2715 ? kObjectSpaceCodeSpace
2694 : kObjectSpaceLoSpace; 2716 : kObjectSpaceLoSpace;
2695 isolate->memory_allocator()->PerformAllocationCallback( 2717 isolate->memory_allocator()->PerformAllocationCallback(
2696 space, kAllocationActionAllocate, size); 2718 space, kAllocationActionAllocate, size);
2697 2719
2698 LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem); 2720 LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
2699 chunk->size_ = size; 2721 chunk->size_ = size;
2700 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); 2722 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2701 page->heap_ = isolate->heap(); 2723 page->heap_ = isolate->heap();
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
2735 while (first_chunk_ != NULL) { 2757 while (first_chunk_ != NULL) {
2736 LargeObjectChunk* chunk = first_chunk_; 2758 LargeObjectChunk* chunk = first_chunk_;
2737 first_chunk_ = first_chunk_->next(); 2759 first_chunk_ = first_chunk_->next();
2738 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk->address())); 2760 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk->address()));
2739 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); 2761 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2740 Executability executable = 2762 Executability executable =
2741 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE; 2763 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
2742 ObjectSpace space = kObjectSpaceLoSpace; 2764 ObjectSpace space = kObjectSpaceLoSpace;
2743 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace; 2765 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
2744 size_t size = chunk->size(); 2766 size_t size = chunk->size();
2745 heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(), 2767 size_t guardsize = (executable == EXECUTABLE) ? Page::kPageSize : 0;
2746 size, 2768 heap()->isolate()->memory_allocator()->FreeRawMemory(
2747 executable); 2769 chunk->address() - guardsize,
2770 size + guardsize,
2771 executable);
2748 heap()->isolate()->memory_allocator()->PerformAllocationCallback( 2772 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2749 space, kAllocationActionFree, size); 2773 space, kAllocationActionFree, size);
2750 } 2774 }
2751 2775
2752 size_ = 0; 2776 size_ = 0;
2753 page_count_ = 0; 2777 page_count_ = 0;
2754 objects_size_ = 0; 2778 objects_size_ = 0;
2755 } 2779 }
2756 2780
2757 2781
(...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after
2934 2958
2935 // Free the chunk. 2959 // Free the chunk.
2936 heap()->mark_compact_collector()->ReportDeleteIfNeeded( 2960 heap()->mark_compact_collector()->ReportDeleteIfNeeded(
2937 object, heap()->isolate()); 2961 object, heap()->isolate());
2938 LiveObjectList::ProcessNonLive(object); 2962 LiveObjectList::ProcessNonLive(object);
2939 2963
2940 size_ -= static_cast<int>(chunk_size); 2964 size_ -= static_cast<int>(chunk_size);
2941 objects_size_ -= object->Size(); 2965 objects_size_ -= object->Size();
2942 page_count_--; 2966 page_count_--;
2943 ObjectSpace space = kObjectSpaceLoSpace; 2967 ObjectSpace space = kObjectSpaceLoSpace;
2944 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace; 2968 size_t guardsize = 0;
2945 heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address, 2969 if (executable == EXECUTABLE) {
2946 chunk_size, 2970 space = kObjectSpaceCodeSpace;
2947 executable); 2971 guardsize = Page::kPageSize;
2972 }
2973 heap()->isolate()->memory_allocator()->FreeRawMemory(
2974 chunk_address - guardsize,
2975 chunk_size + guardsize,
2976 executable);
2948 heap()->isolate()->memory_allocator()->PerformAllocationCallback( 2977 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2949 space, kAllocationActionFree, size_); 2978 space, kAllocationActionFree, size_);
2950 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address)); 2979 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address));
2951 } 2980 }
2952 } 2981 }
2953 } 2982 }
2954 2983
2955 2984
2956 bool LargeObjectSpace::Contains(HeapObject* object) { 2985 bool LargeObjectSpace::Contains(HeapObject* object) {
2957 Address address = object->address(); 2986 Address address = object->address();
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
3057 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { 3086 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
3058 if (obj->IsCode()) { 3087 if (obj->IsCode()) {
3059 Code* code = Code::cast(obj); 3088 Code* code = Code::cast(obj);
3060 isolate->code_kind_statistics()[code->kind()] += code->Size(); 3089 isolate->code_kind_statistics()[code->kind()] += code->Size();
3061 } 3090 }
3062 } 3091 }
3063 } 3092 }
3064 #endif // DEBUG 3093 #endif // DEBUG
3065 3094
3066 } } // namespace v8::internal 3095 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698