Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(266)

Side by Side Diff: src/spaces.cc

Issue 7865025: Move aligned allocation to the platform files. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/gc
Patch Set: Removed leftover debug code. Created 9 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/spaces.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after
285 ASSERT_GE(capacity_, capacity_executable_); 285 ASSERT_GE(capacity_, capacity_executable_);
286 286
287 size_ = 0; 287 size_ = 0;
288 size_executable_ = 0; 288 size_executable_ = 0;
289 289
290 return true; 290 return true;
291 } 291 }
292 292
293 293
294 void MemoryAllocator::TearDown() { 294 void MemoryAllocator::TearDown() {
295 // Check that spaces were teared down before MemoryAllocator. 295 // Check that spaces were torn down before MemoryAllocator.
296 ASSERT(size_ == 0); 296 ASSERT(size_ == 0);
297 // TODO(gc) this will be true again when we fix FreeMemory. 297 // TODO(gc) this will be true again when we fix FreeMemory.
298 // ASSERT(size_executable_ == 0); 298 // ASSERT(size_executable_ == 0);
299 capacity_ = 0; 299 capacity_ = 0;
300 capacity_executable_ = 0; 300 capacity_executable_ = 0;
301 } 301 }
302 302
303 303
304 void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
305 Executability executable) {
306 // TODO(gc) make code_range part of memory allocator?
307 ASSERT(reservation->IsReserved());
308 size_t size = reservation->size();
309 ASSERT(size_ >= size);
310 size_ -= size;
311
312 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
313
314 if (executable == EXECUTABLE) {
315 ASSERT(size_executable_ >= size);
316 size_executable_ -= size;
317 }
318 // Code which is part of the code-range does not have its own VirtualMemory.
319 ASSERT(!isolate_->code_range()->contains(
320 static_cast<Address>(reservation->address())));
321 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
322 reservation->Release();
323 }
324
325
304 void MemoryAllocator::FreeMemory(Address base, 326 void MemoryAllocator::FreeMemory(Address base,
305 size_t size, 327 size_t size,
306 Executability executable) { 328 Executability executable) {
307 // TODO(gc) make code_range part of memory allocator? 329 // TODO(gc) make code_range part of memory allocator?
308 ASSERT(size_ >= size); 330 ASSERT(size_ >= size);
309 size_ -= size; 331 size_ -= size;
310 332
311 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size)); 333 isolate_->counters()->memory_allocated()->Decrement(static_cast<int>(size));
312 334
313 if (executable == EXECUTABLE) { 335 if (executable == EXECUTABLE) {
314 ASSERT(size_executable_ >= size); 336 ASSERT(size_executable_ >= size);
315 size_executable_ -= size; 337 size_executable_ -= size;
316 } 338 }
317 if (isolate_->code_range()->contains(static_cast<Address>(base))) { 339 if (isolate_->code_range()->contains(static_cast<Address>(base))) {
318 ASSERT(executable == EXECUTABLE); 340 ASSERT(executable == EXECUTABLE);
319 isolate_->code_range()->FreeRawMemory(base, size); 341 isolate_->code_range()->FreeRawMemory(base, size);
320 } else { 342 } else {
321 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); 343 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
322 VirtualMemory::ReleaseRegion(base, size); 344 VirtualMemory::ReleaseRegion(base, size);
323 } 345 }
324 } 346 }
325 347
326 348
327 Address MemoryAllocator::ReserveAlignedMemory(const size_t requested, 349 Address MemoryAllocator::ReserveAlignedMemory(size_t size,
328 size_t alignment, 350 size_t alignment,
329 size_t* allocated_size) { 351 VirtualMemory* controller) {
330 ASSERT(IsAligned(alignment, OS::AllocateAlignment())); 352 VirtualMemory reservation(size, alignment);
331 if (size_ + requested > capacity_) return NULL;
332 353
333 size_t allocated = RoundUp(requested + alignment, 354 if (!reservation.IsReserved()) return NULL;
334 static_cast<intptr_t>(OS::AllocateAlignment())); 355 size_ += reservation.size();
335 356 Address base = RoundUp(static_cast<Address>(reservation.address()),
336 Address base = reinterpret_cast<Address>( 357 alignment);
337 VirtualMemory::ReserveRegion(allocated)); 358 controller->TakeControl(&reservation);
338
339 Address end = base + allocated;
340
341 if (base == 0) return NULL;
342
343 Address aligned_base = RoundUp(base, alignment);
344
345 ASSERT(aligned_base + requested <= base + allocated);
346
347 // The difference between re-aligned base address and base address is
348 // multiple of OS::AllocateAlignment().
349 if (aligned_base != base) {
350 ASSERT(aligned_base > base);
351 // TODO(gc) check result of operation?
352 VirtualMemory::ReleaseRegion(reinterpret_cast<void*>(base),
353 aligned_base - base);
354 allocated -= (aligned_base - base);
355 base = aligned_base;
356 }
357
358 ASSERT(base + allocated == end);
359
360 Address requested_end = base + requested;
361 Address aligned_requested_end =
362 RoundUp(requested_end, OS::AllocateAlignment());
363
364 if (aligned_requested_end < end) {
365 // TODO(gc) check result of operation?
366 VirtualMemory::ReleaseRegion(reinterpret_cast<void*>(aligned_requested_end),
367 end - aligned_requested_end);
368 allocated = aligned_requested_end - base;
369 }
370
371 size_ += allocated;
372 *allocated_size = allocated;
373 return base; 359 return base;
374 } 360 }
375 361
376 362
377 Address MemoryAllocator::AllocateAlignedMemory(const size_t requested, 363 Address MemoryAllocator::AllocateAlignedMemory(size_t size,
378 size_t alignment, 364 size_t alignment,
379 Executability executable, 365 Executability executable,
380 size_t* allocated_size) { 366 VirtualMemory* controller) {
381 Address base = 367 VirtualMemory reservation;
382 ReserveAlignedMemory(requested, Page::kPageSize, allocated_size); 368 Address base = ReserveAlignedMemory(size, alignment, &reservation);
383
384 if (base == NULL) return NULL; 369 if (base == NULL) return NULL;
385 370 if (!reservation.Commit(base,
386 if (!VirtualMemory::CommitRegion(base, 371 size,
387 *allocated_size, 372 executable == EXECUTABLE)) {
388 executable == EXECUTABLE)) {
389 VirtualMemory::ReleaseRegion(base, *allocated_size);
390 size_ -= *allocated_size;
391 return NULL; 373 return NULL;
392 } 374 }
393 375 controller->TakeControl(&reservation);
394 return base; 376 return base;
395 } 377 }
396 378
397 379
398 void Page::InitializeAsAnchor(PagedSpace* owner) { 380 void Page::InitializeAsAnchor(PagedSpace* owner) {
399 set_owner(owner); 381 set_owner(owner);
400 set_prev_page(this); 382 set_prev_page(this);
401 set_next_page(this); 383 set_next_page(this);
402 } 384 }
403 385
(...skipping 36 matching lines...) Expand 10 before | Expand all | Expand 10 after
440 Executability executable, 422 Executability executable,
441 Space* owner) { 423 Space* owner) {
442 MemoryChunk* chunk = FromAddress(base); 424 MemoryChunk* chunk = FromAddress(base);
443 425
444 ASSERT(base == chunk->address()); 426 ASSERT(base == chunk->address());
445 427
446 chunk->heap_ = heap; 428 chunk->heap_ = heap;
447 chunk->size_ = size; 429 chunk->size_ = size;
448 chunk->flags_ = 0; 430 chunk->flags_ = 0;
449 chunk->set_owner(owner); 431 chunk->set_owner(owner);
432 chunk->InitializeReservedMemory();
450 chunk->slots_buffer_ = NULL; 433 chunk->slots_buffer_ = NULL;
451 Bitmap::Clear(chunk); 434 Bitmap::Clear(chunk);
452 chunk->initialize_scan_on_scavenge(false); 435 chunk->initialize_scan_on_scavenge(false);
453 chunk->SetFlag(WAS_SWEPT_PRECISELY); 436 chunk->SetFlag(WAS_SWEPT_PRECISELY);
454 437
455 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset); 438 ASSERT(OFFSET_OF(MemoryChunk, flags_) == kFlagsOffset);
456 439
457 if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE); 440 if (executable == EXECUTABLE) chunk->SetFlag(IS_EXECUTABLE);
458 441
459 if (owner == heap->old_data_space()) chunk->SetFlag(CONTAINS_ONLY_DATA); 442 if (owner == heap->old_data_space()) chunk->SetFlag(CONTAINS_ONLY_DATA);
(...skipping 21 matching lines...) Expand all
481 next_chunk_ = NULL; 464 next_chunk_ = NULL;
482 } 465 }
483 466
484 467
485 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, 468 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
486 Executability executable, 469 Executability executable,
487 Space* owner) { 470 Space* owner) {
488 size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size; 471 size_t chunk_size = MemoryChunk::kObjectStartOffset + body_size;
489 Heap* heap = isolate_->heap(); 472 Heap* heap = isolate_->heap();
490 Address base = NULL; 473 Address base = NULL;
474 VirtualMemory reservation;
491 if (executable == EXECUTABLE) { 475 if (executable == EXECUTABLE) {
492 // Check executable memory limit. 476 // Check executable memory limit.
493 if (size_executable_ + chunk_size > capacity_executable_) { 477 if (size_executable_ + chunk_size > capacity_executable_) {
494 LOG(isolate_, 478 LOG(isolate_,
495 StringEvent("MemoryAllocator::AllocateRawMemory", 479 StringEvent("MemoryAllocator::AllocateRawMemory",
496 "V8 Executable Allocation capacity exceeded")); 480 "V8 Executable Allocation capacity exceeded"));
497 return NULL; 481 return NULL;
498 } 482 }
499 483
500 // Allocate executable memory either from code range or from the 484 // Allocate executable memory either from code range or from the
501 // OS. 485 // OS.
502 if (isolate_->code_range()->exists()) { 486 if (isolate_->code_range()->exists()) {
503 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); 487 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size);
504 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), 488 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
505 MemoryChunk::kAlignment)); 489 MemoryChunk::kAlignment));
490 if (base == NULL) return NULL;
506 size_ += chunk_size; 491 size_ += chunk_size;
492 // Update executable memory size.
493 size_executable_ += chunk_size;
507 } else { 494 } else {
508 base = AllocateAlignedMemory(chunk_size, 495 base = AllocateAlignedMemory(chunk_size,
509 MemoryChunk::kAlignment, 496 MemoryChunk::kAlignment,
510 executable, 497 executable,
511 &chunk_size); 498 &reservation);
499 if (base == NULL) return NULL;
500 // Update executable memory size.
501 size_executable_ += reservation.size();
512 } 502 }
513
514 if (base == NULL) return NULL;
515
516 // Update executable memory size.
517 size_executable_ += chunk_size;
518 } else { 503 } else {
519 base = AllocateAlignedMemory(chunk_size, 504 base = AllocateAlignedMemory(chunk_size,
520 MemoryChunk::kAlignment, 505 MemoryChunk::kAlignment,
521 executable, 506 executable,
522 &chunk_size); 507 &reservation);
523 508
524 if (base == NULL) return NULL; 509 if (base == NULL) return NULL;
525 } 510 }
526 511
527 #ifdef DEBUG 512 #ifdef DEBUG
528 ZapBlock(base, chunk_size); 513 ZapBlock(base, chunk_size);
529 #endif 514 #endif
530 isolate_->counters()->memory_allocated()-> 515 isolate_->counters()->memory_allocated()->
531 Increment(static_cast<int>(chunk_size)); 516 Increment(static_cast<int>(chunk_size));
532 517
533 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); 518 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
534 if (owner != NULL) { 519 if (owner != NULL) {
535 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); 520 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
536 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); 521 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
537 } 522 }
538 523
539 return MemoryChunk::Initialize(heap, 524 MemoryChunk* result = MemoryChunk::Initialize(heap,
540 base, 525 base,
541 chunk_size, 526 chunk_size,
542 executable, 527 executable,
543 owner); 528 owner);
529 result->set_reserved_memory(&reservation);
530 return result;
544 } 531 }
545 532
546 533
547 Page* MemoryAllocator::AllocatePage(PagedSpace* owner, 534 Page* MemoryAllocator::AllocatePage(PagedSpace* owner,
548 Executability executable) { 535 Executability executable) {
549 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner); 536 MemoryChunk* chunk = AllocateChunk(Page::kObjectAreaSize, executable, owner);
550 537
551 if (chunk == NULL) return NULL; 538 if (chunk == NULL) return NULL;
552 539
553 return Page::Initialize(isolate_->heap(), chunk, executable, owner); 540 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
(...skipping 10 matching lines...) Expand all
564 551
565 552
566 void MemoryAllocator::Free(MemoryChunk* chunk) { 553 void MemoryAllocator::Free(MemoryChunk* chunk) {
567 LOG(isolate_, DeleteEvent("MemoryChunk", chunk)); 554 LOG(isolate_, DeleteEvent("MemoryChunk", chunk));
568 if (chunk->owner() != NULL) { 555 if (chunk->owner() != NULL) {
569 ObjectSpace space = 556 ObjectSpace space =
570 static_cast<ObjectSpace>(1 << chunk->owner()->identity()); 557 static_cast<ObjectSpace>(1 << chunk->owner()->identity());
571 PerformAllocationCallback(space, kAllocationActionFree, chunk->size()); 558 PerformAllocationCallback(space, kAllocationActionFree, chunk->size());
572 } 559 }
573 560
574 FreeMemory(chunk->address(), 561 VirtualMemory* reservation = chunk->reserved_memory();
575 chunk->size(), 562 if (reservation->IsReserved()) {
576 chunk->executable()); 563 FreeMemory(reservation, chunk->executable());
564 } else {
565 FreeMemory(chunk->address(),
566 chunk->size(),
567 chunk->executable());
568 }
577 } 569 }
578 570
579 571
580 bool MemoryAllocator::CommitBlock(Address start, 572 bool MemoryAllocator::CommitBlock(Address start,
581 size_t size, 573 size_t size,
582 Executability executable) { 574 Executability executable) {
583 if (!VirtualMemory::CommitRegion(start, size, executable)) return false; 575 if (!VirtualMemory::CommitRegion(start, size, executable)) return false;
584 #ifdef DEBUG 576 #ifdef DEBUG
585 ZapBlock(start, size); 577 ZapBlock(start, size);
586 #endif 578 #endif
(...skipping 251 matching lines...) Expand 10 before | Expand all | Expand 10 after
838 830
839 831
840 bool NewSpace::Setup(int reserved_semispace_capacity, 832 bool NewSpace::Setup(int reserved_semispace_capacity,
841 int maximum_semispace_capacity) { 833 int maximum_semispace_capacity) {
842 // Setup new space based on the preallocated memory block defined by 834 // Setup new space based on the preallocated memory block defined by
843 // start and size. The provided space is divided into two semi-spaces. 835 // start and size. The provided space is divided into two semi-spaces.
844 // To support fast containment testing in the new space, the size of 836 // To support fast containment testing in the new space, the size of
845 // this chunk must be a power of two and it must be aligned to its size. 837 // this chunk must be a power of two and it must be aligned to its size.
846 int initial_semispace_capacity = heap()->InitialSemiSpaceSize(); 838 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
847 839
848 size_t size = 0; 840 size_t size = 2 * reserved_semispace_capacity;
849 Address base = 841 Address base =
850 heap()->isolate()->memory_allocator()->ReserveAlignedMemory( 842 heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
851 2 * reserved_semispace_capacity, 843 size, size, &reservation_);
852 2 * reserved_semispace_capacity,
853 &size);
854
855 if (base == NULL) return false; 844 if (base == NULL) return false;
856 845
857 chunk_base_ = base; 846 chunk_base_ = base;
858 chunk_size_ = static_cast<uintptr_t>(size); 847 chunk_size_ = static_cast<uintptr_t>(size);
859 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_)); 848 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
860 849
861 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity); 850 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
862 ASSERT(IsPowerOf2(maximum_semispace_capacity)); 851 ASSERT(IsPowerOf2(maximum_semispace_capacity));
863 852
864 // Allocate and setup the histogram arrays if necessary. 853 // Allocate and setup the histogram arrays if necessary.
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
908 } 897 }
909 898
910 start_ = NULL; 899 start_ = NULL;
911 allocation_info_.top = NULL; 900 allocation_info_.top = NULL;
912 allocation_info_.limit = NULL; 901 allocation_info_.limit = NULL;
913 902
914 to_space_.TearDown(); 903 to_space_.TearDown();
915 from_space_.TearDown(); 904 from_space_.TearDown();
916 905
917 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_)); 906 LOG(heap()->isolate(), DeleteEvent("InitialChunk", chunk_base_));
918 heap()->isolate()->memory_allocator()->FreeMemory( 907
919 chunk_base_, 908 ASSERT(reservation_.IsReserved());
920 static_cast<size_t>(chunk_size_), 909 heap()->isolate()->memory_allocator()->FreeMemory(&reservation_,
921 NOT_EXECUTABLE); 910 NOT_EXECUTABLE);
922 chunk_base_ = NULL; 911 chunk_base_ = NULL;
923 chunk_size_ = 0; 912 chunk_size_ = 0;
924 } 913 }
925 914
926 915
927 void NewSpace::Flip() { 916 void NewSpace::Flip() {
928 SemiSpace::Swap(&from_space_, &to_space_); 917 SemiSpace::Swap(&from_space_, &to_space_);
929 } 918 }
930 919
931 920
(...skipping 1575 matching lines...) Expand 10 before | Expand all | Expand 10 after
2507 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) { 2496 for (HeapObject* obj = obj_it.Next(); obj != NULL; obj = obj_it.Next()) {
2508 if (obj->IsCode()) { 2497 if (obj->IsCode()) {
2509 Code* code = Code::cast(obj); 2498 Code* code = Code::cast(obj);
2510 isolate->code_kind_statistics()[code->kind()] += code->Size(); 2499 isolate->code_kind_statistics()[code->kind()] += code->Size();
2511 } 2500 }
2512 } 2501 }
2513 } 2502 }
2514 #endif // DEBUG 2503 #endif // DEBUG
2515 2504
2516 } } // namespace v8::internal 2505 } } // namespace v8::internal
OLDNEW
« no previous file with comments | « src/spaces.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698