Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(159)

Side by Side Diff: src/spaces.cc

Issue 23903008: Drop OS::IsOutsideAllocatedSpace() and move the tracking to the MemoryAllocator. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Fix invalid calculation of committed memory boundaries. Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | src/v8.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 210 matching lines...) Expand 10 before | Expand all | Expand 10 after
221 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment); 221 size_t aligned_requested = RoundUp(requested_size, MemoryChunk::kAlignment);
222 FreeBlock current = allocation_list_[current_allocation_block_index_]; 222 FreeBlock current = allocation_list_[current_allocation_block_index_];
223 if (aligned_requested >= (current.size - Page::kPageSize)) { 223 if (aligned_requested >= (current.size - Page::kPageSize)) {
224 // Don't leave a small free block, useless for a large object or chunk. 224 // Don't leave a small free block, useless for a large object or chunk.
225 *allocated = current.size; 225 *allocated = current.size;
226 } else { 226 } else {
227 *allocated = aligned_requested; 227 *allocated = aligned_requested;
228 } 228 }
229 ASSERT(*allocated <= current.size); 229 ASSERT(*allocated <= current.size);
230 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); 230 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
231 if (!MemoryAllocator::CommitExecutableMemory(code_range_, 231 if (!isolate_->memory_allocator()->CommitExecutableMemory(code_range_,
232 current.start, 232 current.start,
233 commit_size, 233 commit_size,
234 *allocated)) { 234 *allocated)) {
235 *allocated = 0; 235 *allocated = 0;
236 return NULL; 236 return NULL;
237 } 237 }
238 allocation_list_[current_allocation_block_index_].start += *allocated; 238 allocation_list_[current_allocation_block_index_].start += *allocated;
239 allocation_list_[current_allocation_block_index_].size -= *allocated; 239 allocation_list_[current_allocation_block_index_].size -= *allocated;
240 if (*allocated == current.size) { 240 if (*allocated == current.size) {
241 GetNextAllocationBlock(0); // This block is used up, get the next one. 241 GetNextAllocationBlock(0); // This block is used up, get the next one.
242 } 242 }
243 return current.start; 243 return current.start;
244 } 244 }
245 245
246 246
247 bool CodeRange::CommitRawMemory(Address start, size_t length) { 247 bool CodeRange::CommitRawMemory(Address start, size_t length) {
248 return code_range_->Commit(start, length, true); 248 return isolate_->memory_allocator()->CommitMemory(start, length, EXECUTABLE);
249 } 249 }
250 250
251 251
252 bool CodeRange::UncommitRawMemory(Address start, size_t length) { 252 bool CodeRange::UncommitRawMemory(Address start, size_t length) {
253 return code_range_->Uncommit(start, length); 253 return code_range_->Uncommit(start, length);
254 } 254 }
255 255
256 256
257 void CodeRange::FreeRawMemory(Address address, size_t length) { 257 void CodeRange::FreeRawMemory(Address address, size_t length) {
258 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); 258 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
(...skipping 12 matching lines...) Expand all
271 271
272 // ----------------------------------------------------------------------------- 272 // -----------------------------------------------------------------------------
273 // MemoryAllocator 273 // MemoryAllocator
274 // 274 //
275 275
276 MemoryAllocator::MemoryAllocator(Isolate* isolate) 276 MemoryAllocator::MemoryAllocator(Isolate* isolate)
277 : isolate_(isolate), 277 : isolate_(isolate),
278 capacity_(0), 278 capacity_(0),
279 capacity_executable_(0), 279 capacity_executable_(0),
280 size_(0), 280 size_(0),
281 size_executable_(0) { 281 size_executable_(0),
282 lowest_ever_allocated_(reinterpret_cast<void*>(-1)),
283 highest_ever_allocated_(reinterpret_cast<void*>(0)) {
282 } 284 }
283 285
284 286
285 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) { 287 bool MemoryAllocator::SetUp(intptr_t capacity, intptr_t capacity_executable) {
286 capacity_ = RoundUp(capacity, Page::kPageSize); 288 capacity_ = RoundUp(capacity, Page::kPageSize);
287 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize); 289 capacity_executable_ = RoundUp(capacity_executable, Page::kPageSize);
288 ASSERT_GE(capacity_, capacity_executable_); 290 ASSERT_GE(capacity_, capacity_executable_);
289 291
290 size_ = 0; 292 size_ = 0;
291 size_executable_ = 0; 293 size_executable_ = 0;
292 294
293 return true; 295 return true;
294 } 296 }
295 297
296 298
297 void MemoryAllocator::TearDown() { 299 void MemoryAllocator::TearDown() {
298 // Check that spaces were torn down before MemoryAllocator. 300 // Check that spaces were torn down before MemoryAllocator.
299 ASSERT(size_ == 0); 301 ASSERT(size_ == 0);
300 // TODO(gc) this will be true again when we fix FreeMemory. 302 // TODO(gc) this will be true again when we fix FreeMemory.
301 // ASSERT(size_executable_ == 0); 303 // ASSERT(size_executable_ == 0);
302 capacity_ = 0; 304 capacity_ = 0;
303 capacity_executable_ = 0; 305 capacity_executable_ = 0;
304 } 306 }
305 307
306 308
309 bool MemoryAllocator::CommitMemory(Address base,
310 size_t size,
311 Executability executable) {
312 if (!VirtualMemory::CommitRegion(base, size, executable == EXECUTABLE)) {
313 return false;
314 }
315 UpdateAllocatedSpaceLimits(base, base + size);
316 return true;
317 }
318
319
307 void MemoryAllocator::FreeMemory(VirtualMemory* reservation, 320 void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
308 Executability executable) { 321 Executability executable) {
309 // TODO(gc) make code_range part of memory allocator? 322 // TODO(gc) make code_range part of memory allocator?
310 ASSERT(reservation->IsReserved()); 323 ASSERT(reservation->IsReserved());
311 size_t size = reservation->size(); 324 size_t size = reservation->size();
312 ASSERT(size_ >= size); 325 ASSERT(size_ >= size);
313 size_ -= size; 326 size_ -= size;
314 327
315 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); 328 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
316 329
(...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after
376 if (base == NULL) return NULL; 389 if (base == NULL) return NULL;
377 390
378 if (executable == EXECUTABLE) { 391 if (executable == EXECUTABLE) {
379 if (!CommitExecutableMemory(&reservation, 392 if (!CommitExecutableMemory(&reservation,
380 base, 393 base,
381 commit_size, 394 commit_size,
382 reserve_size)) { 395 reserve_size)) {
383 base = NULL; 396 base = NULL;
384 } 397 }
385 } else { 398 } else {
386 if (!reservation.Commit(base, commit_size, false)) { 399 if (reservation.Commit(base, commit_size, false)) {
400 UpdateAllocatedSpaceLimits(base, base + commit_size);
401 } else {
387 base = NULL; 402 base = NULL;
388 } 403 }
389 } 404 }
390 405
391 if (base == NULL) { 406 if (base == NULL) {
392 // Failed to commit the body. Release the mapping and any partially 407 // Failed to commit the body. Release the mapping and any partially
393 // commited regions inside it. 408 // commited regions inside it.
394 reservation.Release(); 409 reservation.Release();
395 return NULL; 410 return NULL;
396 } 411 }
(...skipping 105 matching lines...) Expand 10 before | Expand all | Expand 10 after
502 size_t committed_size = RoundUp(header_size + (area_end() - area_start()), 517 size_t committed_size = RoundUp(header_size + (area_end() - area_start()),
503 OS::CommitPageSize()); 518 OS::CommitPageSize());
504 519
505 if (commit_size > committed_size) { 520 if (commit_size > committed_size) {
506 // Commit size should be less or equal than the reserved size. 521 // Commit size should be less or equal than the reserved size.
507 ASSERT(commit_size <= size() - 2 * guard_size); 522 ASSERT(commit_size <= size() - 2 * guard_size);
508 // Append the committed area. 523 // Append the committed area.
509 Address start = address() + committed_size + guard_size; 524 Address start = address() + committed_size + guard_size;
510 size_t length = commit_size - committed_size; 525 size_t length = commit_size - committed_size;
511 if (reservation_.IsReserved()) { 526 if (reservation_.IsReserved()) {
512 if (!reservation_.Commit(start, length, IsFlagSet(IS_EXECUTABLE))) { 527 Executability executable = IsFlagSet(IS_EXECUTABLE)
528 ? EXECUTABLE : NOT_EXECUTABLE;
529 if (!heap()->isolate()->memory_allocator()->CommitMemory(
530 start, length, executable)) {
513 return false; 531 return false;
514 } 532 }
515 } else { 533 } else {
516 CodeRange* code_range = heap_->isolate()->code_range(); 534 CodeRange* code_range = heap_->isolate()->code_range();
517 ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE)); 535 ASSERT(code_range->exists() && IsFlagSet(IS_EXECUTABLE));
518 if (!code_range->CommitRawMemory(start, length)) return false; 536 if (!code_range->CommitRawMemory(start, length)) return false;
519 } 537 }
520 538
521 if (Heap::ShouldZapGarbage()) { 539 if (Heap::ShouldZapGarbage()) {
522 heap_->isolate()->memory_allocator()->ZapBlock(start, length); 540 heap_->isolate()->memory_allocator()->ZapBlock(start, length);
(...skipping 233 matching lines...) Expand 10 before | Expand all | Expand 10 after
756 FreeMemory(chunk->address(), 774 FreeMemory(chunk->address(),
757 chunk->size(), 775 chunk->size(),
758 chunk->executable()); 776 chunk->executable());
759 } 777 }
760 } 778 }
761 779
762 780
763 bool MemoryAllocator::CommitBlock(Address start, 781 bool MemoryAllocator::CommitBlock(Address start,
764 size_t size, 782 size_t size,
765 Executability executable) { 783 Executability executable) {
766 if (!VirtualMemory::CommitRegion(start, size, executable)) return false; 784 if (!CommitMemory(start, size, executable)) return false;
767 785
768 if (Heap::ShouldZapGarbage()) { 786 if (Heap::ShouldZapGarbage()) {
769 ZapBlock(start, size); 787 ZapBlock(start, size);
770 } 788 }
771 789
772 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size)); 790 isolate_->counters()->memory_allocated()->Increment(static_cast<int>(size));
773 return true; 791 return true;
774 } 792 }
775 793
776 794
(...skipping 115 matching lines...) Expand 10 before | Expand all | Expand 10 after
892 commit_size - CodePageGuardStartOffset(), 910 commit_size - CodePageGuardStartOffset(),
893 true)) { 911 true)) {
894 return false; 912 return false;
895 } 913 }
896 914
897 // Create guard page before the end. 915 // Create guard page before the end.
898 if (!vm->Guard(start + reserved_size - CodePageGuardSize())) { 916 if (!vm->Guard(start + reserved_size - CodePageGuardSize())) {
899 return false; 917 return false;
900 } 918 }
901 919
920 UpdateAllocatedSpaceLimits(start,
921 start + CodePageAreaStartOffset() +
922 commit_size - CodePageGuardStartOffset());
902 return true; 923 return true;
903 } 924 }
904 925
905 926
906 // ----------------------------------------------------------------------------- 927 // -----------------------------------------------------------------------------
907 // MemoryChunk implementation 928 // MemoryChunk implementation
908 929
909 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) { 930 void MemoryChunk::IncrementLiveBytesFromMutator(Address address, int by) {
910 MemoryChunk* chunk = MemoryChunk::FromAddress(address); 931 MemoryChunk* chunk = MemoryChunk::FromAddress(address);
911 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) { 932 if (!chunk->InNewSpace() && !static_cast<Page*>(chunk)->WasSwept()) {
(...skipping 2288 matching lines...) Expand 10 before | Expand all | Expand 10 after
3200 object->ShortPrint(); 3221 object->ShortPrint();
3201 PrintF("\n"); 3222 PrintF("\n");
3202 } 3223 }
3203 printf(" --------------------------------------\n"); 3224 printf(" --------------------------------------\n");
3204 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3225 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
3205 } 3226 }
3206 3227
3207 #endif // DEBUG 3228 #endif // DEBUG
3208 3229
3209 } } // namespace v8::internal 3230 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | src/v8.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698