Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(480)

Side by Side Diff: src/spaces.h

Issue 435003: Patch for allowing several V8 instances in process:... (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 11 years ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/serialize.cc ('k') | src/spaces.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. 1 // Copyright 2006-2008 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 190 matching lines...) Expand 10 before | Expand all | Expand 10 after
201 // Clears the corresponding remembered set bit for a given address. 201 // Clears the corresponding remembered set bit for a given address.
202 static inline void UnsetRSet(Address address, int offset); 202 static inline void UnsetRSet(Address address, int offset);
203 203
204 // Checks whether the remembered set bit for a given address is set. 204 // Checks whether the remembered set bit for a given address is set.
205 static inline bool IsRSetSet(Address address, int offset); 205 static inline bool IsRSetSet(Address address, int offset);
206 206
207 #ifdef DEBUG 207 #ifdef DEBUG
208 // Use a state to mark whether remembered set space can be used for other 208 // Use a state to mark whether remembered set space can be used for other
209 // purposes. 209 // purposes.
210 enum RSetState { IN_USE, NOT_IN_USE }; 210 enum RSetState { IN_USE, NOT_IN_USE };
211 static bool is_rset_in_use() { return rset_state_ == IN_USE; } 211 static bool is_rset_in_use() {
212 static void set_rset_state(RSetState state) { rset_state_ = state; } 212 return v8_context()->storage_data_.rset_used_;
213 }
214 static void set_rset_state(RSetState state) {
215 v8_context()->storage_data_.rset_used_ = state == IN_USE;
216 }
213 #endif 217 #endif
214 218
215 // 8K bytes per page. 219 // 8K bytes per page.
216 static const int kPageSizeBits = 13; 220 static const int kPageSizeBits = 13;
217 221
218 // Page size in bytes. This must be a multiple of the OS page size. 222 // Page size in bytes. This must be a multiple of the OS page size.
219 static const int kPageSize = 1 << kPageSizeBits; 223 static const int kPageSize = 1 << kPageSizeBits;
220 224
221 // Page size mask. 225 // Page size mask.
222 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1; 226 static const intptr_t kPageAlignmentMask = (1 << kPageSizeBits) - 1;
(...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after
306 310
307 #ifdef DEBUG 311 #ifdef DEBUG
308 virtual void Print() = 0; 312 virtual void Print() = 0;
309 #endif 313 #endif
310 314
311 private: 315 private:
312 AllocationSpace id_; 316 AllocationSpace id_;
313 Executability executable_; 317 Executability executable_;
314 }; 318 };
315 319
320 class CodeRangeData {
321 // Plain old data class, just a struct plus a constructor.
322 class FreeBlock {
323 public:
324 FreeBlock(Address start_arg, size_t size_arg)
325 : start(start_arg), size(size_arg) {}
326 FreeBlock(void* start_arg, size_t size_arg)
327 : start(static_cast<Address>(start_arg)), size(size_arg) {}
328
329 Address start;
330 size_t size;
331 };
332
333 // The reserved range of virtual memory that all code objects are put in.
334 VirtualMemory* code_range_;
335 // Freed blocks of memory are added to the free list. When the allocation
336 // list is exhausted, the free list is sorted and merged to make the new
337 // allocation list.
338 List<FreeBlock> free_list_;
339 // Memory is allocated from the free blocks on the allocation list.
340 // The block at current_allocation_block_index_ is the current block.
341 List<FreeBlock> allocation_list_;
342 int current_allocation_block_index_;
343
344 CodeRangeData();
345
346 friend class V8Context;
347 friend class CodeRange;
348 DISALLOW_COPY_AND_ASSIGN(CodeRangeData);
349 };
316 350
317 // ---------------------------------------------------------------------------- 351 // ----------------------------------------------------------------------------
318 // All heap objects containing executable code (code objects) must be allocated 352 // All heap objects containing executable code (code objects) must be allocated
319 // from a 2 GB range of memory, so that they can call each other using 32-bit 353 // from a 2 GB range of memory, so that they can call each other using 32-bit
320 // displacements. This happens automatically on 32-bit platforms, where 32-bit 354 // displacements. This happens automatically on 32-bit platforms, where 32-bit
321 // displacements cover the entire 4GB virtual address space. On 64-bit 355 // displacements cover the entire 4GB virtual address space. On 64-bit
322 // platforms, we support this using the CodeRange object, which reserves and 356 // platforms, we support this using the CodeRange object, which reserves and
323 // manages a range of virtual memory. 357 // manages a range of virtual memory.
324 class CodeRange : public AllStatic { 358 class CodeRange : public AllStatic {
325 public: 359 public:
326 // Reserves a range of virtual memory, but does not commit any of it. 360 // Reserves a range of virtual memory, but does not commit any of it.
327 // Can only be called once, at heap initialization time. 361 // Can only be called once, at heap initialization time.
328 // Returns false on failure. 362 // Returns false on failure.
329 static bool Setup(const size_t requested_size); 363 static bool Setup(const size_t requested_size);
330 364
331 // Frees the range of virtual memory, and frees the data structures used to 365 // Frees the range of virtual memory, and frees the data structures used to
332 // manage it. 366 // manage it.
333 static void TearDown(); 367 static void TearDown();
334 368
335 static bool exists() { return code_range_ != NULL; } 369 static bool exists() {
370 return v8_context()->code_range_data_.code_range_ != NULL;
371 }
336 static bool contains(Address address) { 372 static bool contains(Address address) {
337 if (code_range_ == NULL) return false; 373 CodeRangeData& data = v8_context()->code_range_data_;
338 Address start = static_cast<Address>(code_range_->address()); 374 if (data.code_range_ == NULL) return false;
339 return start <= address && address < start + code_range_->size(); 375 Address start = static_cast<Address>(data.code_range_->address());
376 return start <= address && address < start + data.code_range_->size();
340 } 377 }
341 378
342 // Allocates a chunk of memory from the large-object portion of 379 // Allocates a chunk of memory from the large-object portion of
343 // the code range. On platforms with no separate code range, should 380 // the code range. On platforms with no separate code range, should
344 // not be called. 381 // not be called.
345 static void* AllocateRawMemory(const size_t requested, size_t* allocated); 382 static void* AllocateRawMemory(const size_t requested, size_t* allocated);
346 static void FreeRawMemory(void* buf, size_t length); 383 static void FreeRawMemory(void* buf, size_t length);
347 384
348 private: 385 private:
349 // The reserved range of virtual memory that all code objects are put in.
350 static VirtualMemory* code_range_;
351 // Plain old data class, just a struct plus a constructor.
352 class FreeBlock {
353 public:
354 FreeBlock(Address start_arg, size_t size_arg)
355 : start(start_arg), size(size_arg) {}
356 FreeBlock(void* start_arg, size_t size_arg)
357 : start(static_cast<Address>(start_arg)), size(size_arg) {}
358
359 Address start;
360 size_t size;
361 };
362
363 // Freed blocks of memory are added to the free list. When the allocation
364 // list is exhausted, the free list is sorted and merged to make the new
365 // allocation list.
366 static List<FreeBlock> free_list_;
367 // Memory is allocated from the free blocks on the allocation list.
368 // The block at current_allocation_block_index_ is the current block.
369 static List<FreeBlock> allocation_list_;
370 static int current_allocation_block_index_;
371
372 // Finds a block on the allocation list that contains at least the 386 // Finds a block on the allocation list that contains at least the
373 // requested amount of memory. If none is found, sorts and merges 387 // requested amount of memory. If none is found, sorts and merges
374 // the existing free memory blocks, and searches again. 388 // the existing free memory blocks, and searches again.
375 // If none can be found, terminates V8 with FatalProcessOutOfMemory. 389 // If none can be found, terminates V8 with FatalProcessOutOfMemory.
376 static void GetNextAllocationBlock(size_t requested); 390 static void GetNextAllocationBlock(size_t requested);
377 // Compares the start addresses of two free blocks. 391 // Compares the start addresses of two free blocks.
378 static int CompareFreeBlockAddress(const FreeBlock* left, 392 static int CompareFreeBlockAddress(const CodeRangeData::FreeBlock* left,
379 const FreeBlock* right); 393 const CodeRangeData::FreeBlock* right);
380 }; 394 };
381 395
396 class MemoryAllocatorData {
397 // Allocated chunk info: chunk start address, chunk size, and owning space.
398 class ChunkInfo BASE_EMBEDDED {
399 public:
400 ChunkInfo() : address_(NULL), size_(0), owner_(NULL) {}
401 void init(Address a, size_t s, PagedSpace* o) {
402 address_ = a;
403 size_ = s;
404 owner_ = o;
405 }
406 Address address() { return address_; }
407 size_t size() { return size_; }
408 PagedSpace* owner() { return owner_; }
409
410 private:
411 Address address_;
412 size_t size_;
413 PagedSpace* owner_;
414 };
415
416 // Maximum space size in bytes.
417 int capacity_;
418
419 // Allocated space size in bytes.
420 int size_;
421
422 // The initial chunk of virtual memory.
423 VirtualMemory* initial_chunk_;
424
425 // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
426 List<ChunkInfo> chunks_;
427 List<int> free_chunk_ids_;
428 int max_nof_chunks_;
429 int top_;
430
431 friend class V8Context;
432 friend class MemoryAllocator;
433
434 MemoryAllocatorData();
435 DISALLOW_COPY_AND_ASSIGN(MemoryAllocatorData);
436 };
382 437
383 // ---------------------------------------------------------------------------- 438 // ----------------------------------------------------------------------------
384 // A space acquires chunks of memory from the operating system. The memory 439 // A space acquires chunks of memory from the operating system. The memory
385 // allocator manages chunks for the paged heap spaces (old space and map 440 // allocator manages chunks for the paged heap spaces (old space and map
386 // space). A paged chunk consists of pages. Pages in a chunk have contiguous 441 // space). A paged chunk consists of pages. Pages in a chunk have contiguous
387 // addresses and are linked as a list. 442 // addresses and are linked as a list.
388 // 443 //
389 // The allocator keeps an initial chunk which is used for the new space. The 444 // The allocator keeps an initial chunk which is used for the new space. The
390 // leftover regions of the initial chunk are used for the initial chunks of 445 // leftover regions of the initial chunk are used for the initial chunks of
391 // old space and map space if they are big enough to hold at least one page. 446 // old space and map space if they are big enough to hold at least one page.
(...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after
464 // but keep track of allocated bytes as part of heap. 519 // but keep track of allocated bytes as part of heap.
465 // If the flag is EXECUTABLE and a code range exists, the requested 520 // If the flag is EXECUTABLE and a code range exists, the requested
466 // memory is allocated from the code range. If a code range exists 521 // memory is allocated from the code range. If a code range exists
467 // and the freed memory is in it, the code range manages the freed memory. 522 // and the freed memory is in it, the code range manages the freed memory.
468 static void* AllocateRawMemory(const size_t requested, 523 static void* AllocateRawMemory(const size_t requested,
469 size_t* allocated, 524 size_t* allocated,
470 Executability executable); 525 Executability executable);
471 static void FreeRawMemory(void* buf, size_t length); 526 static void FreeRawMemory(void* buf, size_t length);
472 527
473 // Returns the maximum available bytes of heaps. 528 // Returns the maximum available bytes of heaps.
474 static int Available() { return capacity_ < size_ ? 0 : capacity_ - size_; } 529 static int Available() {
530 MemoryAllocatorData& data = v8_context()->memory_allocator_data_;
531 return data.capacity_ < data.size_ ? 0 : data.capacity_ - data.size_; }
475 532
476 // Returns allocated spaces in bytes. 533 // Returns allocated spaces in bytes.
477 static int Size() { return size_; } 534 static int Size() { return v8_context()->memory_allocator_data_.size_; }
478 535
479 // Returns maximum available bytes that the old space can have. 536 // Returns maximum available bytes that the old space can have.
480 static int MaxAvailable() { 537 static int MaxAvailable() {
481 return (Available() / Page::kPageSize) * Page::kObjectAreaSize; 538 return (Available() / Page::kPageSize) * Page::kObjectAreaSize;
482 } 539 }
483 540
484 // Links two pages. 541 // Links two pages.
485 static inline void SetNextPage(Page* prev, Page* next); 542 static inline void SetNextPage(Page* prev, Page* next);
486 543
487 // Returns the next page of a given page. 544 // Returns the next page of a given page.
(...skipping 30 matching lines...) Expand all
518 // If a chunk has at least 16 pages, the maximum heap size is about 575 // If a chunk has at least 16 pages, the maximum heap size is about
519 // 8K * 8K * 16 = 1G bytes. 576 // 8K * 8K * 16 = 1G bytes.
520 #ifdef V8_TARGET_ARCH_X64 577 #ifdef V8_TARGET_ARCH_X64
521 static const int kPagesPerChunk = 32; 578 static const int kPagesPerChunk = 32;
522 #else 579 #else
523 static const int kPagesPerChunk = 16; 580 static const int kPagesPerChunk = 16;
524 #endif 581 #endif
525 static const int kChunkSize = kPagesPerChunk * Page::kPageSize; 582 static const int kChunkSize = kPagesPerChunk * Page::kPageSize;
526 583
527 private: 584 private:
528 // Maximum space size in bytes.
529 static int capacity_;
530
531 // Allocated space size in bytes.
532 static int size_;
533
534 // The initial chunk of virtual memory.
535 static VirtualMemory* initial_chunk_;
536
537 // Allocated chunk info: chunk start address, chunk size, and owning space.
538 class ChunkInfo BASE_EMBEDDED {
539 public:
540 ChunkInfo() : address_(NULL), size_(0), owner_(NULL) {}
541 void init(Address a, size_t s, PagedSpace* o) {
542 address_ = a;
543 size_ = s;
544 owner_ = o;
545 }
546 Address address() { return address_; }
547 size_t size() { return size_; }
548 PagedSpace* owner() { return owner_; }
549
550 private:
551 Address address_;
552 size_t size_;
553 PagedSpace* owner_;
554 };
555
556 // Chunks_, free_chunk_ids_ and top_ act as a stack of free chunk ids.
557 static List<ChunkInfo> chunks_;
558 static List<int> free_chunk_ids_;
559 static int max_nof_chunks_;
560 static int top_;
561
562 // Push/pop a free chunk id onto/from the stack. 585 // Push/pop a free chunk id onto/from the stack.
563 static void Push(int free_chunk_id); 586 static void Push(int free_chunk_id);
564 static int Pop(); 587 static int Pop();
565 static bool OutOfChunkIds() { return top_ == 0; } 588 static bool OutOfChunkIds() {
589 return v8_context()->memory_allocator_data_.top_ == 0;
590 }
566 591
567 // Frees a chunk. 592 // Frees a chunk.
568 static void DeleteChunk(int chunk_id); 593 static void DeleteChunk(int chunk_id);
569 594
570 // Basic check whether a chunk id is in the valid range. 595 // Basic check whether a chunk id is in the valid range.
571 static inline bool IsValidChunkId(int chunk_id); 596 static inline bool IsValidChunkId(int chunk_id);
572 597
573 // Checks whether a chunk id identifies an allocated chunk. 598 // Checks whether a chunk id identifies an allocated chunk.
574 static inline bool IsValidChunk(int chunk_id); 599 static inline bool IsValidChunk(int chunk_id);
575 600
(...skipping 1373 matching lines...) Expand 10 before | Expand all | Expand 10 after
1949 1974
1950 private: 1975 private:
1951 LargeObjectChunk* current_; 1976 LargeObjectChunk* current_;
1952 HeapObjectCallback size_func_; 1977 HeapObjectCallback size_func_;
1953 }; 1978 };
1954 1979
1955 1980
1956 } } // namespace v8::internal 1981 } } // namespace v8::internal
1957 1982
1958 #endif // V8_SPACES_H_ 1983 #endif // V8_SPACES_H_
OLDNEW
« no previous file with comments | « src/serialize.cc ('k') | src/spaces.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698