Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 113 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) | 113 ASSERT((0 <= index) && (index <= MapSpace::kMaxMapPageIndex)) |
| 114 | 114 |
| 115 | 115 |
| 116 class PagedSpace; | 116 class PagedSpace; |
| 117 class MemoryAllocator; | 117 class MemoryAllocator; |
| 118 class AllocationInfo; | 118 class AllocationInfo; |
| 119 class Space; | 119 class Space; |
| 120 class FreeList; | 120 class FreeList; |
| 121 class MemoryChunk; | 121 class MemoryChunk; |
| 122 | 122 |
| 123 // TODO(gc): Check that this all gets inlined and register allocated on | |
| 124 // all platforms. | |
| 125 class MarkBit { | 123 class MarkBit { |
| 126 public: | 124 public: |
| 127 typedef uint32_t CellType; | 125 typedef uint32_t CellType; |
| 128 | 126 |
| 129 inline MarkBit(CellType* cell, CellType mask, bool data_only) | 127 inline MarkBit(CellType* cell, CellType mask, bool data_only) |
| 130 : cell_(cell), mask_(mask), data_only_(data_only) { } | 128 : cell_(cell), mask_(mask), data_only_(data_only) { } |
| 131 | 129 |
| 132 inline CellType* cell() { return cell_; } | 130 inline CellType* cell() { return cell_; } |
| 133 inline CellType mask() { return mask_; } | 131 inline CellType mask() { return mask_; } |
| 134 | 132 |
| (...skipping 774 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 909 void AddMemoryAllocationCallback(MemoryAllocationCallback callback, | 907 void AddMemoryAllocationCallback(MemoryAllocationCallback callback, |
| 910 ObjectSpace space, | 908 ObjectSpace space, |
| 911 AllocationAction action); | 909 AllocationAction action); |
| 912 | 910 |
| 913 void RemoveMemoryAllocationCallback( | 911 void RemoveMemoryAllocationCallback( |
| 914 MemoryAllocationCallback callback); | 912 MemoryAllocationCallback callback); |
| 915 | 913 |
| 916 bool MemoryAllocationCallbackRegistered( | 914 bool MemoryAllocationCallbackRegistered( |
| 917 MemoryAllocationCallback callback); | 915 MemoryAllocationCallback callback); |
| 918 | 916 |
| 919 | |
| 920 // TODO(gc) ISOLATSE | |
| 921 | |
| 922 private: | 917 private: |
| 923 Isolate* isolate_; | 918 Isolate* isolate_; |
| 924 | 919 |
| 925 // Maximum space size in bytes. | 920 // Maximum space size in bytes. |
| 926 size_t capacity_; | 921 size_t capacity_; |
| 927 // Maximum subset of capacity_ that can be executable | 922 // Maximum subset of capacity_ that can be executable |
| 928 size_t capacity_executable_; | 923 size_t capacity_executable_; |
| 929 | 924 |
| 930 // Allocated space size in bytes. | 925 // Allocated space size in bytes. |
| 931 size_t size_; | 926 size_t size_; |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 1065 class AllocationInfo { | 1060 class AllocationInfo { |
| 1066 public: | 1061 public: |
| 1067 AllocationInfo() : top(NULL), limit(NULL) { | 1062 AllocationInfo() : top(NULL), limit(NULL) { |
| 1068 } | 1063 } |
| 1069 | 1064 |
| 1070 Address top; // Current allocation top. | 1065 Address top; // Current allocation top. |
| 1071 Address limit; // Current allocation limit. | 1066 Address limit; // Current allocation limit. |
| 1072 | 1067 |
| 1073 #ifdef DEBUG | 1068 #ifdef DEBUG |
| 1074 bool VerifyPagedAllocation() { | 1069 bool VerifyPagedAllocation() { |
| 1075 // TODO(gc): Make this type-correct. NewSpacePage isn't a Page, | |
| 1076 // but NewSpace still uses AllocationInfo. | |
| 1077 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit)) | 1070 return (Page::FromAllocationTop(top) == Page::FromAllocationTop(limit)) |
| 1078 && (top <= limit); | 1071 && (top <= limit); |
| 1079 } | 1072 } |
| 1080 #endif | 1073 #endif |
| 1081 }; | 1074 }; |
| 1082 | 1075 |
| 1083 | 1076 |
| 1084 // An abstraction of the accounting statistics of a page-structured space. | 1077 // An abstraction of the accounting statistics of a page-structured space. |
| 1085 // The 'capacity' of a space is the number of object-area bytes (ie, not | 1078 // The 'capacity' of a space is the number of object-area bytes (ie, not |
| 1086 // including page bookkeeping structures) currently in the space. The 'size' | 1079 // including page bookkeeping structures) currently in the space. The 'size' |
| (...skipping 1174 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2261 // Creates a map space object with a maximum capacity. | 2254 // Creates a map space object with a maximum capacity. |
| 2262 MapSpace(Heap* heap, | 2255 MapSpace(Heap* heap, |
| 2263 intptr_t max_capacity, | 2256 intptr_t max_capacity, |
| 2264 int max_map_space_pages, | 2257 int max_map_space_pages, |
| 2265 AllocationSpace id) | 2258 AllocationSpace id) |
| 2266 : FixedSpace(heap, max_capacity, id, Map::kSize, "map"), | 2259 : FixedSpace(heap, max_capacity, id, Map::kSize, "map"), |
| 2267 max_map_space_pages_(max_map_space_pages) { | 2260 max_map_space_pages_(max_map_space_pages) { |
| 2268 } | 2261 } |
| 2269 | 2262 |
| 2270 // Given an index, returns the page address. | 2263 // Given an index, returns the page address. |
| 2271 // TODO(gc): this limit is artifical just to keep code compilable | 2264 // TODO(1600): this limit is artifical just to keep code compilable |
| 2272 static const int kMaxMapPageIndex = 1 << 16; | 2265 static const int kMaxMapPageIndex = 1 << 16; |
| 2273 | 2266 |
| 2274 // Are map pointers encodable into map word? | |
| 2275 bool MapPointersEncodable() { | |
| 2276 return false; | |
| 2277 } | |
| 2278 | |
| 2279 // Should be called after forced sweep to find out if map space needs | |
| 2280 // compaction. | |
| 2281 bool NeedsCompaction(int live_maps) { | |
| 2282 return false; // TODO(gc): Bring back map compaction. | |
| 2283 } | |
| 2284 | |
| 2285 virtual int RoundSizeDownToObjectAlignment(int size) { | 2267 virtual int RoundSizeDownToObjectAlignment(int size) { |
| 2286 if (IsPowerOf2(Map::kSize)) { | 2268 if (IsPowerOf2(Map::kSize)) { |
| 2287 return RoundDown(size, Map::kSize); | 2269 return RoundDown(size, Map::kSize); |
| 2288 } else { | 2270 } else { |
| 2289 return (size / Map::kSize) * Map::kSize; | 2271 return (size / Map::kSize) * Map::kSize; |
| 2290 } | 2272 } |
| 2291 } | 2273 } |
| 2292 | 2274 |
| 2293 protected: | 2275 protected: |
| 2294 #ifdef DEBUG | 2276 #ifdef DEBUG |
| (...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2349 public: | 2331 public: |
| 2350 LargeObjectSpace(Heap* heap, AllocationSpace id); | 2332 LargeObjectSpace(Heap* heap, AllocationSpace id); |
| 2351 virtual ~LargeObjectSpace() {} | 2333 virtual ~LargeObjectSpace() {} |
| 2352 | 2334 |
| 2353 // Initializes internal data structures. | 2335 // Initializes internal data structures. |
| 2354 bool Setup(); | 2336 bool Setup(); |
| 2355 | 2337 |
| 2356 // Releases internal resources, frees objects in this space. | 2338 // Releases internal resources, frees objects in this space. |
| 2357 void TearDown(); | 2339 void TearDown(); |
| 2358 | 2340 |
| 2359 // Allocates a (non-FixedArray, non-Code) large object. | |
| 2360 MUST_USE_RESULT MaybeObject* AllocateRawData(int size_in_bytes); | |
| 2361 // Allocates a large Code object. | |
| 2362 MUST_USE_RESULT MaybeObject* AllocateRawCode(int size_in_bytes); | |
| 2363 // Allocates a large FixedArray. | |
| 2364 MUST_USE_RESULT MaybeObject* AllocateRawFixedArray(int size_in_bytes); | |
| 2365 | |
| 2366 static intptr_t ObjectSizeFor(intptr_t chunk_size) { | 2341 static intptr_t ObjectSizeFor(intptr_t chunk_size) { |
| 2367 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; | 2342 if (chunk_size <= (Page::kPageSize + Page::kObjectStartOffset)) return 0; |
| 2368 return chunk_size - Page::kPageSize - Page::kObjectStartOffset; | 2343 return chunk_size - Page::kPageSize - Page::kObjectStartOffset; |
| 2369 } | 2344 } |
| 2370 | 2345 |
| 2346 // Shared implementation of AllocateRaw, AllocateRawCode and | |
| 2347 // AllocateRawFixedArray. | |
| 2348 MUST_USE_RESULT MaybeObject* AllocateRaw(int object_size, | |
| 2349 Executability executable); | |
| 2350 | |
| 2371 // Available bytes for objects in this space. | 2351 // Available bytes for objects in this space. |
| 2372 inline intptr_t Available(); | 2352 inline intptr_t Available(); |
| 2373 | 2353 |
| 2374 virtual intptr_t Size() { | 2354 virtual intptr_t Size() { |
| 2375 return size_; | 2355 return size_; |
| 2376 } | 2356 } |
| 2377 | 2357 |
| 2378 virtual intptr_t SizeOfObjects() { | 2358 virtual intptr_t SizeOfObjects() { |
| 2379 return objects_size_; | 2359 return objects_size_; |
| 2380 } | 2360 } |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2418 // iterates all objects in the space. May be slow. | 2398 // iterates all objects in the space. May be slow. |
| 2419 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); } | 2399 bool SlowContains(Address addr) { return !FindObject(addr)->IsFailure(); } |
| 2420 | 2400 |
| 2421 private: | 2401 private: |
| 2422 // The head of the linked list of large object chunks. | 2402 // The head of the linked list of large object chunks. |
| 2423 LargePage* first_page_; | 2403 LargePage* first_page_; |
| 2424 intptr_t size_; // allocated bytes | 2404 intptr_t size_; // allocated bytes |
| 2425 int page_count_; // number of chunks | 2405 int page_count_; // number of chunks |
| 2426 intptr_t objects_size_; // size of objects | 2406 intptr_t objects_size_; // size of objects |
| 2427 | 2407 |
| 2428 // Shared implementation of AllocateRaw, AllocateRawCode and | |
| 2429 // AllocateRawFixedArray. | |
| 2430 MUST_USE_RESULT MaybeObject* AllocateRawInternal(int object_size, | |
| 2431 Executability executable); | |
| 2432 | |
| 2433 friend class LargeObjectIterator; | 2408 friend class LargeObjectIterator; |
| 2434 | 2409 |
| 2435 public: | 2410 public: |
| 2436 TRACK_MEMORY("LargeObjectSpace") | 2411 TRACK_MEMORY("LargeObjectSpace") |
| 2437 }; | 2412 }; |
| 2438 | 2413 |
| 2439 | 2414 |
| 2440 class LargeObjectIterator: public ObjectIterator { | 2415 class LargeObjectIterator: public ObjectIterator { |
| 2441 public: | 2416 public: |
| 2442 explicit LargeObjectIterator(LargeObjectSpace* space); | 2417 explicit LargeObjectIterator(LargeObjectSpace* space); |
| 2443 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func); | 2418 LargeObjectIterator(LargeObjectSpace* space, HeapObjectCallback size_func); |
| 2444 | 2419 |
| 2445 HeapObject* Next(); | 2420 HeapObject* Next(); |
| 2446 | 2421 |
| 2447 // implementation of ObjectIterator. | 2422 // implementation of ObjectIterator. |
| 2448 virtual HeapObject* next_object() { return Next(); } | 2423 virtual HeapObject* next_object() { return Next(); } |
| 2449 | 2424 |
| 2450 private: | 2425 private: |
| 2451 LargePage* current_; | 2426 LargePage* current_; |
| 2452 HeapObjectCallback size_func_; | 2427 HeapObjectCallback size_func_; |
| 2453 }; | 2428 }; |
| 2454 | 2429 |
| 2455 | 2430 |
| 2456 // Iterates over the chunks (pages and large object pages) that can contain | 2431 // Iterates over the chunks (pages and large object pages) that can contain |
| 2457 // pointers to new space. | 2432 // pointers to new space. |
| 2458 class PointerChunkIterator BASE_EMBEDDED { | 2433 class PointerChunkIterator BASE_EMBEDDED { |
| 2459 public: | 2434 public: |
| 2460 inline PointerChunkIterator(); | 2435 inline explicit PointerChunkIterator(Heap* heap_); |
|
Erik Corry
2011/08/16 11:17:20
No underscore here.
| |
| 2461 | 2436 |
| 2462 // Return NULL when the iterator is done. | 2437 // Return NULL when the iterator is done. |
| 2463 MemoryChunk* next() { | 2438 MemoryChunk* next() { |
| 2464 switch (state_) { | 2439 switch (state_) { |
| 2465 case kOldPointerState: { | 2440 case kOldPointerState: { |
| 2466 if (old_pointer_iterator_.has_next()) { | 2441 if (old_pointer_iterator_.has_next()) { |
| 2467 return old_pointer_iterator_.next(); | 2442 return old_pointer_iterator_.next(); |
| 2468 } | 2443 } |
| 2469 state_ = kMapState; | 2444 state_ = kMapState; |
| 2470 // Fall through. | 2445 // Fall through. |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 2526 } | 2501 } |
| 2527 // Must be small, since an iteration is used for lookup. | 2502 // Must be small, since an iteration is used for lookup. |
| 2528 static const int kMaxComments = 64; | 2503 static const int kMaxComments = 64; |
| 2529 }; | 2504 }; |
| 2530 #endif | 2505 #endif |
| 2531 | 2506 |
| 2532 | 2507 |
| 2533 } } // namespace v8::internal | 2508 } } // namespace v8::internal |
| 2534 | 2509 |
| 2535 #endif // V8_SPACES_H_ | 2510 #endif // V8_SPACES_H_ |
| OLD | NEW |