Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(209)

Side by Side Diff: src/spaces.cc

Issue 11566011: Use MemoryChunk-based allocation for deoptimization entry code (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: Created 7 years, 12 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« src/platform.h ('K') | « src/spaces.h ('k') | no next file » | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after
200 if (requested <= allocation_list_[current_allocation_block_index_].size) { 200 if (requested <= allocation_list_[current_allocation_block_index_].size) {
201 return; // Found a large enough allocation block. 201 return; // Found a large enough allocation block.
202 } 202 }
203 } 203 }
204 204
205 // Code range is full or too fragmented. 205 // Code range is full or too fragmented.
206 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock"); 206 V8::FatalProcessOutOfMemory("CodeRange::GetNextAllocationBlock");
207 } 207 }
208 208
209 209
210
211 Address CodeRange::AllocateRawMemory(const size_t requested, 210 Address CodeRange::AllocateRawMemory(const size_t requested,
212 size_t* allocated) { 211 size_t* allocated,
212 bool commit) {
213 ASSERT(current_allocation_block_index_ < allocation_list_.length()); 213 ASSERT(current_allocation_block_index_ < allocation_list_.length());
214 if (requested > allocation_list_[current_allocation_block_index_].size) { 214 if (requested > allocation_list_[current_allocation_block_index_].size) {
215 // Find an allocation block large enough. This function call may 215 // Find an allocation block large enough. This function call may
216 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block. 216 // call V8::FatalProcessOutOfMemory if it cannot find a large enough block.
217 GetNextAllocationBlock(requested); 217 GetNextAllocationBlock(requested);
218 } 218 }
219 // Commit the requested memory at the start of the current allocation block. 219 // Commit the requested memory at the start of the current allocation block.
220 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment); 220 size_t aligned_requested = RoundUp(requested, MemoryChunk::kAlignment);
221 FreeBlock current = allocation_list_[current_allocation_block_index_]; 221 FreeBlock current = allocation_list_[current_allocation_block_index_];
222 if (aligned_requested >= (current.size - Page::kPageSize)) { 222 if (aligned_requested >= (current.size - Page::kPageSize)) {
223 // Don't leave a small free block, useless for a large object or chunk. 223 // Don't leave a small free block, useless for a large object or chunk.
224 *allocated = current.size; 224 *allocated = current.size;
225 } else { 225 } else {
226 *allocated = aligned_requested; 226 *allocated = aligned_requested;
227 } 227 }
228 ASSERT(*allocated <= current.size); 228 ASSERT(*allocated <= current.size);
229 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment)); 229 ASSERT(IsAddressAligned(current.start, MemoryChunk::kAlignment));
230 if (!MemoryAllocator::CommitCodePage(code_range_, 230 if (commit && !CommitRawMemory(current.start, *allocated)) {
231 current.start,
232 *allocated)) {
233 *allocated = 0; 231 *allocated = 0;
234 return NULL; 232 return NULL;
235 } 233 }
236 allocation_list_[current_allocation_block_index_].start += *allocated; 234 allocation_list_[current_allocation_block_index_].start += *allocated;
237 allocation_list_[current_allocation_block_index_].size -= *allocated; 235 allocation_list_[current_allocation_block_index_].size -= *allocated;
238 if (*allocated == current.size) { 236 if (*allocated == current.size) {
239 GetNextAllocationBlock(0); // This block is used up, get the next one. 237 GetNextAllocationBlock(0); // This block is used up, get the next one.
240 } 238 }
241 return current.start; 239 return current.start;
242 } 240 }
243 241
244 242
243 bool CodeRange::CommitRawMemory(Address start, size_t size) {
244 return MemoryAllocator::CommitCodePage(code_range_, start, size);
245 }
246
247
245 void CodeRange::FreeRawMemory(Address address, size_t length) { 248 void CodeRange::FreeRawMemory(Address address, size_t length) {
246 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment)); 249 ASSERT(IsAddressAligned(address, MemoryChunk::kAlignment));
247 free_list_.Add(FreeBlock(address, length)); 250 free_list_.Add(FreeBlock(address, length));
248 code_range_->Uncommit(address, length); 251 code_range_->Uncommit(address, length);
249 } 252 }
250 253
251 254
252 void CodeRange::TearDown() { 255 void CodeRange::TearDown() {
253 delete code_range_; // Frees all memory in the virtual memory range. 256 delete code_range_; // Frees all memory in the virtual memory range.
254 code_range_ = NULL; 257 code_range_ = NULL;
(...skipping 22 matching lines...) Expand all
277 280
278 size_ = 0; 281 size_ = 0;
279 size_executable_ = 0; 282 size_executable_ = 0;
280 283
281 return true; 284 return true;
282 } 285 }
283 286
284 287
285 void MemoryAllocator::TearDown() { 288 void MemoryAllocator::TearDown() {
286 // Check that spaces were torn down before MemoryAllocator. 289 // Check that spaces were torn down before MemoryAllocator.
287 ASSERT(size_ == 0); 290 // ASSERT(size_ == 0);
danno 2012/12/28 11:58:37 This is _not_ safe to remove. Either you are leak
288 // TODO(gc) this will be true again when we fix FreeMemory. 291 // TODO(gc) this will be true again when we fix FreeMemory.
289 // ASSERT(size_executable_ == 0); 292 // ASSERT(size_executable_ == 0);
290 capacity_ = 0; 293 capacity_ = 0;
291 capacity_executable_ = 0; 294 capacity_executable_ = 0;
292 } 295 }
293 296
294 297
295 void MemoryAllocator::FreeMemory(VirtualMemory* reservation, 298 void MemoryAllocator::FreeMemory(VirtualMemory* reservation,
296 Executability executable) { 299 Executability executable) {
297 // TODO(gc) make code_range part of memory allocator? 300 // TODO(gc) make code_range part of memory allocator?
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
332 isolate_->code_range()->FreeRawMemory(base, size); 335 isolate_->code_range()->FreeRawMemory(base, size);
333 } else { 336 } else {
334 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists()); 337 ASSERT(executable == NOT_EXECUTABLE || !isolate_->code_range()->exists());
335 bool result = VirtualMemory::ReleaseRegion(base, size); 338 bool result = VirtualMemory::ReleaseRegion(base, size);
336 USE(result); 339 USE(result);
337 ASSERT(result); 340 ASSERT(result);
338 } 341 }
339 } 342 }
340 343
341 344
342 Address MemoryAllocator::ReserveAlignedMemory(size_t size, 345 Address MemoryAllocator::ReserveAlignedMemory(size_t requested,
343 size_t alignment, 346 size_t alignment,
347 Executability executable,
344 VirtualMemory* controller) { 348 VirtualMemory* controller) {
345 VirtualMemory reservation(size, alignment); 349 Address base = NULL;
350 VirtualMemory reservation;
danno 2012/12/28 11:58:37 Remove.
346 351
347 if (!reservation.IsReserved()) return NULL; 352 if (executable == EXECUTABLE && isolate_->code_range()->exists()) {
348 size_ += reservation.size(); 353 // Reserve executable memory from code range.
349 Address base = RoundUp(static_cast<Address>(reservation.address()), 354 // alignment parameter is not used.
350 alignment); 355 size_t reserved = requested;
356 base = isolate_->code_range()->AllocateRawMemory(requested,
357 &reserved,
358 false);
359 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base),
360 MemoryChunk::kAlignment));
361 reservation.Set(static_cast<void*>(base), reserved);
danno 2012/12/28 11:58:37 This isn't correct. The VirtualMemory from the cod
haitao.feng 2012/12/28 15:04:54 For the double-management, when the memory chunk i
danno 2012/12/28 15:38:10 See previous comments. Please, please don't abuse
362 size_ += reserved;
363 size_executable_ += reserved;
364 } else {
365 VirtualMemory temp(requested, alignment);
366
367 if (!temp.IsReserved()) return NULL;
368 base = static_cast<Address>(temp.address());
369 reservation.TakeControl(&temp);
danno 2012/12/28 11:58:37 Replace with: controller->TakeControl(temp);
370 size_ += reservation.size();
danno 2012/12/28 11:58:37 temp.size()
371 if (executable == EXECUTABLE) {
372 size_executable_ += reservation.size();
373 }
374 }
375
351 controller->TakeControl(&reservation); 376 controller->TakeControl(&reservation);
danno 2012/12/28 11:58:37 Remove
352 return base; 377 return base;
353 } 378 }
354 379
355 380
356 Address MemoryAllocator::AllocateAlignedMemory(size_t size, 381 Address MemoryAllocator::CommitAlignedMemory(Executability executable,
357 size_t alignment, 382 VirtualMemory* reservation) {
358 Executability executable, 383 Address base = static_cast<Address>(reservation->address());
359 VirtualMemory* controller) { 384 size_t size = reservation->size();
360 VirtualMemory reservation;
361 Address base = ReserveAlignedMemory(size, alignment, &reservation);
362 if (base == NULL) return NULL; 385 if (base == NULL) return NULL;
363 386
387 CodeRange *code_range = isolate_->code_range();
364 if (executable == EXECUTABLE) { 388 if (executable == EXECUTABLE) {
365 if (!CommitCodePage(&reservation, base, size)) { 389 if (code_range->exists()) {
390 if (!code_range->CommitRawMemory(base, size)) {
391 base = NULL;
392 }
393 } else if (!CommitCodePage(reservation, base, size)) {
366 base = NULL; 394 base = NULL;
367 } 395 }
368 } else { 396 } else {
369 if (!reservation.Commit(base, size, false)) { 397 if (!reservation->Commit(base, size, false)) {
370 base = NULL; 398 base = NULL;
371 } 399 }
372 } 400 }
373 401
374 if (base == NULL) { 402 if (base == NULL) {
375 // Failed to commit the body. Release the mapping and any partially 403 // Failed to commit the body. Release the mapping and any partially
376 // commited regions inside it. 404 // commited regions inside it.
377 reservation.Release(); 405 if (code_range->exists()) {
406 code_range->FreeRawMemory(static_cast<Address>(reservation->address()),
407 size);
408 } else {
409 reservation->Release();
410 }
378 return NULL; 411 return NULL;
379 } 412 }
380 413
414 return base;
415 }
416
417
418 Address MemoryAllocator::AllocateAlignedMemory(size_t size,
419 size_t alignment,
420 Executability executable,
421 VirtualMemory* controller) {
422 VirtualMemory reservation;
423 Address base = ReserveAlignedMemory(size,
424 alignment,
425 executable,
426 &reservation);
427 base = CommitAlignedMemory(executable, &reservation);
381 controller->TakeControl(&reservation); 428 controller->TakeControl(&reservation);
382 return base; 429 return base;
383 } 430 }
384 431
385 432
386 void Page::InitializeAsAnchor(PagedSpace* owner) { 433 void Page::InitializeAsAnchor(PagedSpace* owner) {
387 set_owner(owner); 434 set_owner(owner);
388 set_prev_page(this); 435 set_prev_page(this);
389 set_next_page(this); 436 set_next_page(this);
390 } 437 }
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
483 heap_->decrement_scan_on_scavenge_pages(); 530 heap_->decrement_scan_on_scavenge_pages();
484 ClearFlag(SCAN_ON_SCAVENGE); 531 ClearFlag(SCAN_ON_SCAVENGE);
485 } 532 }
486 next_chunk_->prev_chunk_ = prev_chunk_; 533 next_chunk_->prev_chunk_ = prev_chunk_;
487 prev_chunk_->next_chunk_ = next_chunk_; 534 prev_chunk_->next_chunk_ = next_chunk_;
488 prev_chunk_ = NULL; 535 prev_chunk_ = NULL;
489 next_chunk_ = NULL; 536 next_chunk_ = NULL;
490 } 537 }
491 538
492 539
493 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size, 540 Address MemoryAllocator::ReserveChunk(size_t body_size,
494 Executability executable, 541 Executability executable,
495 Space* owner) { 542 VirtualMemory* controller) {
496 size_t chunk_size; 543 size_t chunk_size;
497 Heap* heap = isolate_->heap();
498 Address base = NULL; 544 Address base = NULL;
499 VirtualMemory reservation; 545 VirtualMemory reservation;
500 Address area_start = NULL;
501 Address area_end = NULL;
502 546
503 if (executable == EXECUTABLE) { 547 if (executable == EXECUTABLE) {
504 chunk_size = RoundUp(CodePageAreaStartOffset() + body_size, 548 chunk_size = RoundUp(CodePageAreaStartOffset() + body_size,
505 OS::CommitPageSize()) + CodePageGuardSize(); 549 OS::CommitPageSize()) + CodePageGuardSize();
506 550
507 // Check executable memory limit. 551 // Check executable memory limit.
508 if (size_executable_ + chunk_size > capacity_executable_) { 552 if (size_executable_ + chunk_size > capacity_executable_) {
509 LOG(isolate_, 553 LOG(isolate_,
510 StringEvent("MemoryAllocator::AllocateRawMemory", 554 StringEvent("MemoryAllocator::AllocateRawMemory",
511 "V8 Executable Allocation capacity exceeded")); 555 "V8 Executable Allocation capacity exceeded"));
512 return NULL; 556 return NULL;
513 } 557 }
514 558
515 // Allocate executable memory either from code range or from the 559 // Reserve executable memory either from code range or from the OS.
516 // OS. 560 base = ReserveAlignedMemory(chunk_size,
517 if (isolate_->code_range()->exists()) { 561 MemoryChunk::kAlignment,
518 base = isolate_->code_range()->AllocateRawMemory(chunk_size, &chunk_size); 562 EXECUTABLE,
519 ASSERT(IsAligned(reinterpret_cast<intptr_t>(base), 563 &reservation);
520 MemoryChunk::kAlignment)); 564 } else {
521 if (base == NULL) return NULL; 565 chunk_size = MemoryChunk::kObjectStartOffset + body_size;
522 size_ += chunk_size; 566 base = ReserveAlignedMemory(chunk_size,
523 // Update executable memory size. 567 MemoryChunk::kAlignment,
524 size_executable_ += chunk_size; 568 NOT_EXECUTABLE,
525 } else { 569 &reservation);
526 base = AllocateAlignedMemory(chunk_size, 570 }
527 MemoryChunk::kAlignment,
528 executable,
529 &reservation);
530 if (base == NULL) return NULL;
531 // Update executable memory size.
532 size_executable_ += reservation.size();
533 }
534 571
572 controller->TakeControl(&reservation);
573 return base;
574 }
575
576
577 MemoryChunk* MemoryAllocator::CommitChunk(size_t body_size,
578 Executability executable,
579 VirtualMemory* reservation,
580 Space* owner) {
581 Address base = CommitAlignedMemory(executable, reservation);
582 size_t chunk_size = reservation->size();
583 Address area_start = NULL;
584 Address area_end = NULL;
585 Heap* heap = isolate_->heap();
586
587 if (base == NULL) return NULL;
588
589 if (executable == EXECUTABLE) {
535 if (Heap::ShouldZapGarbage()) { 590 if (Heap::ShouldZapGarbage()) {
536 ZapBlock(base, CodePageGuardStartOffset()); 591 ZapBlock(base, CodePageGuardStartOffset());
537 ZapBlock(base + CodePageAreaStartOffset(), body_size); 592 ZapBlock(base + CodePageAreaStartOffset(), body_size);
538 } 593 }
539 594
540 area_start = base + CodePageAreaStartOffset(); 595 area_start = base + CodePageAreaStartOffset();
541 area_end = area_start + body_size; 596 area_end = area_start + body_size;
542 } else { 597 } else {
543 chunk_size = MemoryChunk::kObjectStartOffset + body_size;
544 base = AllocateAlignedMemory(chunk_size,
545 MemoryChunk::kAlignment,
546 executable,
547 &reservation);
548
549 if (base == NULL) return NULL;
550
551 if (Heap::ShouldZapGarbage()) { 598 if (Heap::ShouldZapGarbage()) {
552 ZapBlock(base, chunk_size); 599 ZapBlock(base, chunk_size);
553 } 600 }
554 601
555 area_start = base + Page::kObjectStartOffset; 602 area_start = base + Page::kObjectStartOffset;
556 area_end = base + chunk_size; 603 area_end = base + chunk_size;
557 } 604 }
558 605
559 isolate_->counters()->memory_allocated()-> 606 isolate_->counters()->memory_allocated()->
560 Increment(static_cast<int>(chunk_size)); 607 Increment(static_cast<int>(chunk_size));
561 608
562 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size)); 609 LOG(isolate_, NewEvent("MemoryChunk", base, chunk_size));
563 if (owner != NULL) { 610 if (owner != NULL) {
564 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity()); 611 ObjectSpace space = static_cast<ObjectSpace>(1 << owner->identity());
565 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size); 612 PerformAllocationCallback(space, kAllocationActionAllocate, chunk_size);
566 } 613 }
567 614
568 MemoryChunk* result = MemoryChunk::Initialize(heap, 615 MemoryChunk* result = MemoryChunk::Initialize(heap,
569 base, 616 base,
570 chunk_size, 617 chunk_size,
571 area_start, 618 area_start,
572 area_end, 619 area_end,
573 executable, 620 executable,
574 owner); 621 owner);
575 result->set_reserved_memory(&reservation); 622 if (isolate_->code_range()->exists()) {
623 // Reset the reservation for memory space in code range.
624 reservation->Reset();
625 }
626 result->set_reserved_memory(reservation);
576 return result; 627 return result;
577 } 628 }
578 629
579 630
631 MemoryChunk* MemoryAllocator::AllocateChunk(intptr_t body_size,
632 Executability executable,
633 Space* owner) {
634 VirtualMemory reservation;
635 Address base = ReserveChunk(body_size, executable, &reservation);
636 if (base == NULL) return NULL;
637 return CommitChunk(body_size, executable, &reservation, owner);
638 }
639
640
580 Page* MemoryAllocator::AllocatePage(intptr_t size, 641 Page* MemoryAllocator::AllocatePage(intptr_t size,
581 PagedSpace* owner, 642 PagedSpace* owner,
582 Executability executable) { 643 Executability executable) {
583 MemoryChunk* chunk = AllocateChunk(size, executable, owner); 644 MemoryChunk* chunk = AllocateChunk(size, executable, owner);
584 645
585 if (chunk == NULL) return NULL; 646 if (chunk == NULL) return NULL;
586 647
587 return Page::Initialize(isolate_->heap(), chunk, executable, owner); 648 return Page::Initialize(isolate_->heap(), chunk, executable, owner);
588 } 649 }
589 650
(...skipping 471 matching lines...) Expand 10 before | Expand all | Expand 10 after
1061 int maximum_semispace_capacity) { 1122 int maximum_semispace_capacity) {
1062 // Set up new space based on the preallocated memory block defined by 1123 // Set up new space based on the preallocated memory block defined by
1063 // start and size. The provided space is divided into two semi-spaces. 1124 // start and size. The provided space is divided into two semi-spaces.
1064 // To support fast containment testing in the new space, the size of 1125 // To support fast containment testing in the new space, the size of
1065 // this chunk must be a power of two and it must be aligned to its size. 1126 // this chunk must be a power of two and it must be aligned to its size.
1066 int initial_semispace_capacity = heap()->InitialSemiSpaceSize(); 1127 int initial_semispace_capacity = heap()->InitialSemiSpaceSize();
1067 1128
1068 size_t size = 2 * reserved_semispace_capacity; 1129 size_t size = 2 * reserved_semispace_capacity;
1069 Address base = 1130 Address base =
1070 heap()->isolate()->memory_allocator()->ReserveAlignedMemory( 1131 heap()->isolate()->memory_allocator()->ReserveAlignedMemory(
1071 size, size, &reservation_); 1132 size, size, NOT_EXECUTABLE, &reservation_);
1072 if (base == NULL) return false; 1133 if (base == NULL) return false;
1073 1134
1074 chunk_base_ = base; 1135 chunk_base_ = base;
1075 chunk_size_ = static_cast<uintptr_t>(size); 1136 chunk_size_ = static_cast<uintptr_t>(size);
1076 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_)); 1137 LOG(heap()->isolate(), NewEvent("InitialChunk", chunk_base_, chunk_size_));
1077 1138
1078 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity); 1139 ASSERT(initial_semispace_capacity <= maximum_semispace_capacity);
1079 ASSERT(IsPowerOf2(maximum_semispace_capacity)); 1140 ASSERT(IsPowerOf2(maximum_semispace_capacity));
1080 1141
1081 // Allocate and set up the histogram arrays if necessary. 1142 // Allocate and set up the histogram arrays if necessary.
(...skipping 1904 matching lines...) Expand 10 before | Expand all | Expand 10 after
2986 object->ShortPrint(); 3047 object->ShortPrint();
2987 PrintF("\n"); 3048 PrintF("\n");
2988 } 3049 }
2989 printf(" --------------------------------------\n"); 3050 printf(" --------------------------------------\n");
2990 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes()); 3051 printf(" Marked: %x, LiveCount: %x\n", mark_size, LiveBytes());
2991 } 3052 }
2992 3053
2993 #endif // DEBUG 3054 #endif // DEBUG
2994 3055
2995 } } // namespace v8::internal 3056 } } // namespace v8::internal
OLDNEW
« src/platform.h ('K') | « src/spaces.h ('k') | no next file » | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698