Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(269)

Side by Side Diff: src/spaces.cc

Issue 7379004: Add guard pages in front of platform allocations (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 384 matching lines...) Expand 10 before | Expand all | Expand 10 after
395 #endif 395 #endif
396 isolate_->counters()->memory_allocated()->Increment(alloced); 396 isolate_->counters()->memory_allocated()->Increment(alloced);
397 return mem; 397 return mem;
398 } 398 }
399 399
400 400
401 void MemoryAllocator::FreeRawMemory(void* mem, 401 void MemoryAllocator::FreeRawMemory(void* mem,
402 size_t length, 402 size_t length,
403 Executability executable) { 403 Executability executable) {
404 #ifdef DEBUG 404 #ifdef DEBUG
405 ZapBlock(reinterpret_cast<Address>(mem), length); 405 // Do not try to zap the guard page.
406 size_t guardsize = (executable == EXECUTABLE) ? Page::kPageSize : 0;
407 ZapBlock(reinterpret_cast<Address>(mem) + guardsize, length - guardsize);
406 #endif 408 #endif
407 if (isolate_->code_range()->contains(static_cast<Address>(mem))) { 409 if (isolate_->code_range()->contains(static_cast<Address>(mem))) {
408 isolate_->code_range()->FreeRawMemory(mem, length); 410 isolate_->code_range()->FreeRawMemory(mem, length);
409 } else { 411 } else {
410 OS::Free(mem, length); 412 OS::Free(mem, length);
411 } 413 }
412 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(length)); 414 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(length));
413 size_ -= static_cast<int>(length); 415 size_ -= static_cast<int>(length);
414 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length); 416 if (executable == EXECUTABLE) size_executable_ -= static_cast<int>(length);
415 417
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
496 Page* MemoryAllocator::AllocatePages(int requested_pages, 498 Page* MemoryAllocator::AllocatePages(int requested_pages,
497 int* allocated_pages, 499 int* allocated_pages,
498 PagedSpace* owner) { 500 PagedSpace* owner) {
499 if (requested_pages <= 0) return Page::FromAddress(NULL); 501 if (requested_pages <= 0) return Page::FromAddress(NULL);
500 size_t chunk_size = requested_pages * Page::kPageSize; 502 size_t chunk_size = requested_pages * Page::kPageSize;
501 503
502 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable()); 504 void* chunk = AllocateRawMemory(chunk_size, &chunk_size, owner->executable());
503 if (chunk == NULL) return Page::FromAddress(NULL); 505 if (chunk == NULL) return Page::FromAddress(NULL);
504 LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size)); 506 LOG(isolate_, NewEvent("PagedChunk", chunk, chunk_size));
505 507
508 size_t guardsize = 0;
509 if (owner->executable() == EXECUTABLE) {
510 guardsize = Page::kPageSize;
511 OS::Guard(chunk, guardsize);
512 chunk_size -= guardsize;
513 chunk = static_cast<Address>(chunk) + guardsize;
514 }
515
506 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size); 516 *allocated_pages = PagesInChunk(static_cast<Address>(chunk), chunk_size);
507 // We may 'lose' a page due to alignment. 517 // We may 'lose' a page due to alignment.
508 ASSERT(*allocated_pages >= kPagesPerChunk - 1); 518 ASSERT(*allocated_pages >= kPagesPerChunk - 1);
Mads Ager (chromium) 2011/07/19 09:11:14 Will this actually work? What if we lose a page du
Cris Neckar 2011/07/19 18:35:32 Done.
509 if (*allocated_pages == 0) { 519 if (*allocated_pages == 0) {
510 FreeRawMemory(chunk, chunk_size, owner->executable()); 520 FreeRawMemory(static_cast<Address>(chunk) - guardsize,
521 chunk_size + guardsize,
522 owner->executable());
511 LOG(isolate_, DeleteEvent("PagedChunk", chunk)); 523 LOG(isolate_, DeleteEvent("PagedChunk", chunk));
512 return Page::FromAddress(NULL); 524 return Page::FromAddress(NULL);
513 } 525 }
514 526
515 int chunk_id = Pop(); 527 int chunk_id = Pop();
516 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner); 528 chunks_[chunk_id].init(static_cast<Address>(chunk), chunk_size, owner);
517 529
518 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); 530 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
519 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); 531 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
520 Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner); 532 Page* new_pages = InitializePagesInChunk(chunk_id, *allocated_pages, owner);
(...skipping 153 matching lines...) Expand 10 before | Expand all | Expand 10 after
674 if (InInitialChunk(c.address())) { 686 if (InInitialChunk(c.address())) {
675 // TODO(1240712): VirtualMemory::Uncommit has a return value which 687 // TODO(1240712): VirtualMemory::Uncommit has a return value which
676 // is ignored here. 688 // is ignored here.
677 initial_chunk_->Uncommit(c.address(), c.size()); 689 initial_chunk_->Uncommit(c.address(), c.size());
678 Counters* counters = isolate_->counters(); 690 Counters* counters = isolate_->counters();
679 counters->memory_allocated()->Decrement(static_cast<int>(c.size())); 691 counters->memory_allocated()->Decrement(static_cast<int>(c.size()));
680 } else { 692 } else {
681 LOG(isolate_, DeleteEvent("PagedChunk", c.address())); 693 LOG(isolate_, DeleteEvent("PagedChunk", c.address()));
682 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity()); 694 ObjectSpace space = static_cast<ObjectSpace>(1 << c.owner_identity());
683 size_t size = c.size(); 695 size_t size = c.size();
684 FreeRawMemory(c.address(), size, c.executable()); 696 size_t guardsize = (c.executable() == EXECUTABLE) ? Page::kPageSize : 0;
697 FreeRawMemory(c.address() - guardsize, size + guardsize, c.executable());
685 PerformAllocationCallback(space, kAllocationActionFree, size); 698 PerformAllocationCallback(space, kAllocationActionFree, size);
686 } 699 }
687 c.init(NULL, 0, NULL); 700 c.init(NULL, 0, NULL);
688 Push(chunk_id); 701 Push(chunk_id);
689 } 702 }
690 703
691 704
692 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) { 705 Page* MemoryAllocator::FindFirstPageInSameChunk(Page* p) {
693 int chunk_id = GetChunkId(p); 706 int chunk_id = GetChunkId(p);
694 ASSERT(IsValidChunk(chunk_id)); 707 ASSERT(IsValidChunk(chunk_id));
(...skipping 1970 matching lines...) Expand 10 before | Expand all | Expand 10 after
2665 } 2678 }
2666 2679
2667 2680
2668 // ----------------------------------------------------------------------------- 2681 // -----------------------------------------------------------------------------
2669 // LargeObjectChunk 2682 // LargeObjectChunk
2670 2683
2671 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes, 2684 LargeObjectChunk* LargeObjectChunk::New(int size_in_bytes,
2672 Executability executable) { 2685 Executability executable) {
2673 size_t requested = ChunkSizeFor(size_in_bytes); 2686 size_t requested = ChunkSizeFor(size_in_bytes);
2674 size_t size; 2687 size_t size;
2688 size_t guardsize = (executable == EXECUTABLE) ? Page::kPageSize : 0;
2675 Isolate* isolate = Isolate::Current(); 2689 Isolate* isolate = Isolate::Current();
2676 void* mem = isolate->memory_allocator()->AllocateRawMemory( 2690 void* mem = isolate->memory_allocator()->AllocateRawMemory(
2677 requested, &size, executable); 2691 requested + guardsize, &size, executable);
2678 if (mem == NULL) return NULL; 2692 if (mem == NULL) return NULL;
2679 2693
2680 // The start of the chunk may be overlayed with a page so we have to 2694 // The start of the chunk may be overlayed with a page so we have to
2681 // make sure that the page flags fit in the size field. 2695 // make sure that the page flags fit in the size field.
2682 ASSERT((size & Page::kPageFlagMask) == 0); 2696 ASSERT((size & Page::kPageFlagMask) == 0);
2683 2697
2684 LOG(isolate, NewEvent("LargeObjectChunk", mem, size)); 2698 LOG(isolate, NewEvent("LargeObjectChunk", mem, size));
2685 if (size < requested) { 2699 if (size < requested + guardsize) {
2686 isolate->memory_allocator()->FreeRawMemory( 2700 isolate->memory_allocator()->FreeRawMemory(
2687 mem, size, executable); 2701 static_cast<Address>(mem) - guardsize, size + guardsize, executable);
Mads Ager (chromium) 2011/07/19 09:11:14 This looks wrong. You should use mem and size dire
Cris Neckar 2011/07/19 18:35:32 Yep I am dumb. :)
2688 LOG(isolate, DeleteEvent("LargeObjectChunk", mem)); 2702 LOG(isolate, DeleteEvent("LargeObjectChunk", mem));
2689 return NULL; 2703 return NULL;
2690 } 2704 }
2691 2705
2706 if (guardsize != 0) {
2707 OS::Guard(mem, guardsize);
2708 size -= guardsize;
2709 mem = static_cast<Address>(mem) + guardsize;
2710 }
2711
2692 ObjectSpace space = (executable == EXECUTABLE) 2712 ObjectSpace space = (executable == EXECUTABLE)
2693 ? kObjectSpaceCodeSpace 2713 ? kObjectSpaceCodeSpace
2694 : kObjectSpaceLoSpace; 2714 : kObjectSpaceLoSpace;
2695 isolate->memory_allocator()->PerformAllocationCallback( 2715 isolate->memory_allocator()->PerformAllocationCallback(
2696 space, kAllocationActionAllocate, size); 2716 space, kAllocationActionAllocate, size);
2697 2717
2698 LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem); 2718 LargeObjectChunk* chunk = reinterpret_cast<LargeObjectChunk*>(mem);
2699 chunk->size_ = size; 2719 chunk->size_ = size;
2700 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); 2720 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2701 page->heap_ = isolate->heap(); 2721 page->heap_ = isolate->heap();
(...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after
2735 while (first_chunk_ != NULL) { 2755 while (first_chunk_ != NULL) {
2736 LargeObjectChunk* chunk = first_chunk_; 2756 LargeObjectChunk* chunk = first_chunk_;
2737 first_chunk_ = first_chunk_->next(); 2757 first_chunk_ = first_chunk_->next();
2738 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk->address())); 2758 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk->address()));
2739 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize)); 2759 Page* page = Page::FromAddress(RoundUp(chunk->address(), Page::kPageSize));
2740 Executability executable = 2760 Executability executable =
2741 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE; 2761 page->IsPageExecutable() ? EXECUTABLE : NOT_EXECUTABLE;
2742 ObjectSpace space = kObjectSpaceLoSpace; 2762 ObjectSpace space = kObjectSpaceLoSpace;
2743 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace; 2763 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace;
2744 size_t size = chunk->size(); 2764 size_t size = chunk->size();
2745 heap()->isolate()->memory_allocator()->FreeRawMemory(chunk->address(), 2765 size_t guardsize = (executable == EXECUTABLE) ? Page::kPageSize : 0;
2746 size, 2766 heap()->isolate()->memory_allocator()->FreeRawMemory(
2747 executable); 2767 chunk->address() - guardsize,
2768 size + guardsize,
2769 executable);
2748 heap()->isolate()->memory_allocator()->PerformAllocationCallback( 2770 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2749 space, kAllocationActionFree, size); 2771 space, kAllocationActionFree, size);
2750 } 2772 }
2751 2773
2752 size_ = 0; 2774 size_ = 0;
2753 page_count_ = 0; 2775 page_count_ = 0;
2754 objects_size_ = 0; 2776 objects_size_ = 0;
2755 } 2777 }
2756 2778
2757 2779
(...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after
2934 2956
2935 // Free the chunk. 2957 // Free the chunk.
2936 heap()->mark_compact_collector()->ReportDeleteIfNeeded( 2958 heap()->mark_compact_collector()->ReportDeleteIfNeeded(
2937 object, heap()->isolate()); 2959 object, heap()->isolate());
2938 LiveObjectList::ProcessNonLive(object); 2960 LiveObjectList::ProcessNonLive(object);
2939 2961
2940 size_ -= static_cast<int>(chunk_size); 2962 size_ -= static_cast<int>(chunk_size);
2941 objects_size_ -= object->Size(); 2963 objects_size_ -= object->Size();
2942 page_count_--; 2964 page_count_--;
2943 ObjectSpace space = kObjectSpaceLoSpace; 2965 ObjectSpace space = kObjectSpaceLoSpace;
2944 if (executable == EXECUTABLE) space = kObjectSpaceCodeSpace; 2966 size_t guardsize = 0;
2945 heap()->isolate()->memory_allocator()->FreeRawMemory(chunk_address, 2967 if (executable == EXECUTABLE) {
2946 chunk_size, 2968 space = kObjectSpaceCodeSpace;
2947 executable); 2969 guardsize = Page::kPageSize;
2970 }
2971 heap()->isolate()->memory_allocator()->FreeRawMemory(
2972 chunk_address - guardsize,
2973 chunk_size + guardsize,
2974 executable);
2948 heap()->isolate()->memory_allocator()->PerformAllocationCallback( 2975 heap()->isolate()->memory_allocator()->PerformAllocationCallback(
2949 space, kAllocationActionFree, size_); 2976 space, kAllocationActionFree, size_);
2950 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address)); 2977 LOG(heap()->isolate(), DeleteEvent("LargeObjectChunk", chunk_address));
2951 } 2978 }
2952 } 2979 }
2953 } 2980 }
2954 2981
2955 2982
2956 bool LargeObjectSpace::Contains(HeapObject* object) { 2983 bool LargeObjectSpace::Contains(HeapObject* object) {
2957 Address address = object->address(); 2984 Address address = object->address();
(...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after
3057 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) { 3084 for (HeapObject* obj = obj_it.next(); obj != NULL; obj = obj_it.next()) {
3058 if (obj->IsCode()) { 3085 if (obj->IsCode()) {
3059 Code* code = Code::cast(obj); 3086 Code* code = Code::cast(obj);
3060 isolate->code_kind_statistics()[code->kind()] += code->Size(); 3087 isolate->code_kind_statistics()[code->kind()] += code->Size();
3061 } 3088 }
3062 } 3089 }
3063 } 3090 }
3064 #endif // DEBUG 3091 #endif // DEBUG
3065 3092
3066 } } // namespace v8::internal 3093 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698