OLD | NEW |
1 // Copyright 2006-2008 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
11 // with the distribution. | 11 // with the distribution. |
(...skipping 13 matching lines...) Expand all Loading... |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | 27 |
28 #include <stdlib.h> | 28 #include <stdlib.h> |
29 | 29 |
30 #include "v8.h" | 30 #include "v8.h" |
31 #include "cctest.h" | 31 #include "cctest.h" |
32 | 32 |
33 using namespace v8::internal; | 33 using namespace v8::internal; |
34 | 34 |
| 35 #if 0 |
35 static void VerifyRegionMarking(Address page_start) { | 36 static void VerifyRegionMarking(Address page_start) { |
| 37 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER |
36 Page* p = Page::FromAddress(page_start); | 38 Page* p = Page::FromAddress(page_start); |
37 | 39 |
38 p->SetRegionMarks(Page::kAllRegionsCleanMarks); | 40 p->SetRegionMarks(Page::kAllRegionsCleanMarks); |
39 | 41 |
40 for (Address addr = p->ObjectAreaStart(); | 42 for (Address addr = p->ObjectAreaStart(); |
41 addr < p->ObjectAreaEnd(); | 43 addr < p->ObjectAreaEnd(); |
42 addr += kPointerSize) { | 44 addr += kPointerSize) { |
43 CHECK(!Page::FromAddress(addr)->IsRegionDirty(addr)); | 45 CHECK(!Page::FromAddress(addr)->IsRegionDirty(addr)); |
44 } | 46 } |
45 | 47 |
46 for (Address addr = p->ObjectAreaStart(); | 48 for (Address addr = p->ObjectAreaStart(); |
47 addr < p->ObjectAreaEnd(); | 49 addr < p->ObjectAreaEnd(); |
48 addr += kPointerSize) { | 50 addr += kPointerSize) { |
49 Page::FromAddress(addr)->MarkRegionDirty(addr); | 51 Page::FromAddress(addr)->MarkRegionDirty(addr); |
50 } | 52 } |
51 | 53 |
52 for (Address addr = p->ObjectAreaStart(); | 54 for (Address addr = p->ObjectAreaStart(); |
53 addr < p->ObjectAreaEnd(); | 55 addr < p->ObjectAreaEnd(); |
54 addr += kPointerSize) { | 56 addr += kPointerSize) { |
55 CHECK(Page::FromAddress(addr)->IsRegionDirty(addr)); | 57 CHECK(Page::FromAddress(addr)->IsRegionDirty(addr)); |
56 } | 58 } |
| 59 #endif |
57 } | 60 } |
| 61 #endif |
58 | 62 |
59 | 63 |
| 64 // TODO(gc) you can no longer allocate pages like this. Details are hidden. |
| 65 #if 0 |
60 TEST(Page) { | 66 TEST(Page) { |
61 byte* mem = NewArray<byte>(2*Page::kPageSize); | 67 byte* mem = NewArray<byte>(2*Page::kPageSize); |
62 CHECK(mem != NULL); | 68 CHECK(mem != NULL); |
63 | 69 |
64 Address start = reinterpret_cast<Address>(mem); | 70 Address start = reinterpret_cast<Address>(mem); |
65 Address page_start = RoundUp(start, Page::kPageSize); | 71 Address page_start = RoundUp(start, Page::kPageSize); |
66 | 72 |
67 Page* p = Page::FromAddress(page_start); | 73 Page* p = Page::FromAddress(page_start); |
68 // Initialized Page has heap pointer, normally set by memory_allocator. | 74 // Initialized Page has heap pointer, normally set by memory_allocator. |
69 p->heap_ = HEAP; | 75 p->heap_ = HEAP; |
(...skipping 12 matching lines...) Expand all Loading... |
82 CHECK(p->Offset(page_start + Page::kPageSize) == Page::kPageSize); | 88 CHECK(p->Offset(page_start + Page::kPageSize) == Page::kPageSize); |
83 | 89 |
84 CHECK(p->OffsetToAddress(Page::kObjectStartOffset) == p->ObjectAreaStart()); | 90 CHECK(p->OffsetToAddress(Page::kObjectStartOffset) == p->ObjectAreaStart()); |
85 CHECK(p->OffsetToAddress(Page::kPageSize) == p->ObjectAreaEnd()); | 91 CHECK(p->OffsetToAddress(Page::kPageSize) == p->ObjectAreaEnd()); |
86 | 92 |
87 // test region marking | 93 // test region marking |
88 VerifyRegionMarking(page_start); | 94 VerifyRegionMarking(page_start); |
89 | 95 |
90 DeleteArray(mem); | 96 DeleteArray(mem); |
91 } | 97 } |
| 98 #endif |
92 | 99 |
93 | 100 |
94 namespace v8 { | 101 namespace v8 { |
95 namespace internal { | 102 namespace internal { |
96 | 103 |
97 // Temporarily sets a given allocator in an isolate. | 104 // Temporarily sets a given allocator in an isolate. |
98 class TestMemoryAllocatorScope { | 105 class TestMemoryAllocatorScope { |
99 public: | 106 public: |
100 TestMemoryAllocatorScope(Isolate* isolate, MemoryAllocator* allocator) | 107 TestMemoryAllocatorScope(Isolate* isolate, MemoryAllocator* allocator) |
101 : isolate_(isolate), | 108 : isolate_(isolate), |
(...skipping 13 matching lines...) Expand all Loading... |
115 }; | 122 }; |
116 | 123 |
117 } } // namespace v8::internal | 124 } } // namespace v8::internal |
118 | 125 |
119 | 126 |
120 TEST(MemoryAllocator) { | 127 TEST(MemoryAllocator) { |
121 OS::Setup(); | 128 OS::Setup(); |
122 Isolate* isolate = Isolate::Current(); | 129 Isolate* isolate = Isolate::Current(); |
123 isolate->InitializeLoggingAndCounters(); | 130 isolate->InitializeLoggingAndCounters(); |
124 Heap* heap = isolate->heap(); | 131 Heap* heap = isolate->heap(); |
125 CHECK(heap->ConfigureHeapDefault()); | 132 CHECK(isolate->heap()->ConfigureHeapDefault()); |
| 133 |
126 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate); | 134 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate); |
127 CHECK(memory_allocator->Setup(heap->MaxReserved(), | 135 CHECK(memory_allocator->Setup(heap->MaxReserved(), |
128 heap->MaxExecutableSize())); | 136 heap->MaxExecutableSize())); |
129 TestMemoryAllocatorScope test_scope(isolate, memory_allocator); | |
130 | 137 |
| 138 int total_pages = 0; |
131 OldSpace faked_space(heap, | 139 OldSpace faked_space(heap, |
132 heap->MaxReserved(), | 140 heap->MaxReserved(), |
133 OLD_POINTER_SPACE, | 141 OLD_POINTER_SPACE, |
134 NOT_EXECUTABLE); | 142 NOT_EXECUTABLE); |
135 int total_pages = 0; | 143 Page* first_page = |
136 int requested = MemoryAllocator::kPagesPerChunk; | 144 memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE); |
137 int allocated; | 145 |
138 // If we request n pages, we should get n or n - 1. | 146 first_page->InsertAfter(faked_space.anchor()->prev_page()); |
139 Page* first_page = memory_allocator->AllocatePages( | |
140 requested, &allocated, &faked_space); | |
141 CHECK(first_page->is_valid()); | 147 CHECK(first_page->is_valid()); |
142 CHECK(allocated == requested || allocated == requested - 1); | 148 CHECK(first_page->next_page() == faked_space.anchor()); |
143 total_pages += allocated; | 149 total_pages++; |
144 | 150 |
145 Page* last_page = first_page; | 151 for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) { |
146 for (Page* p = first_page; p->is_valid(); p = p->next_page()) { | 152 CHECK(p->owner() == &faked_space); |
147 CHECK(memory_allocator->IsPageInSpace(p, &faked_space)); | |
148 last_page = p; | |
149 } | 153 } |
150 | 154 |
151 // Again, we should get n or n - 1 pages. | 155 // Again, we should get n or n - 1 pages. |
152 Page* others = memory_allocator->AllocatePages( | 156 Page* other = |
153 requested, &allocated, &faked_space); | 157 memory_allocator->AllocatePage(&faked_space, NOT_EXECUTABLE); |
154 CHECK(others->is_valid()); | 158 CHECK(other->is_valid()); |
155 CHECK(allocated == requested || allocated == requested - 1); | 159 total_pages++; |
156 total_pages += allocated; | 160 other->InsertAfter(first_page); |
157 | |
158 memory_allocator->SetNextPage(last_page, others); | |
159 int page_count = 0; | 161 int page_count = 0; |
160 for (Page* p = first_page; p->is_valid(); p = p->next_page()) { | 162 for (Page* p = first_page; p != faked_space.anchor(); p = p->next_page()) { |
161 CHECK(memory_allocator->IsPageInSpace(p, &faked_space)); | 163 CHECK(p->owner() == &faked_space); |
162 page_count++; | 164 page_count++; |
163 } | 165 } |
164 CHECK(total_pages == page_count); | 166 CHECK(total_pages == page_count); |
165 | 167 |
166 Page* second_page = first_page->next_page(); | 168 Page* second_page = first_page->next_page(); |
167 CHECK(second_page->is_valid()); | 169 CHECK(second_page->is_valid()); |
168 | 170 memory_allocator->Free(first_page); |
169 // Freeing pages at the first chunk starting at or after the second page | 171 memory_allocator->Free(second_page); |
170 // should free the entire second chunk. It will return the page it was passed | |
171 // (since the second page was in the first chunk). | |
172 Page* free_return = memory_allocator->FreePages(second_page); | |
173 CHECK(free_return == second_page); | |
174 memory_allocator->SetNextPage(first_page, free_return); | |
175 | |
176 // Freeing pages in the first chunk starting at the first page should free | |
177 // the first chunk and return an invalid page. | |
178 Page* invalid_page = memory_allocator->FreePages(first_page); | |
179 CHECK(!invalid_page->is_valid()); | |
180 | |
181 memory_allocator->TearDown(); | 172 memory_allocator->TearDown(); |
182 delete memory_allocator; | 173 delete memory_allocator; |
183 } | 174 } |
184 | 175 |
185 | 176 |
186 TEST(NewSpace) { | 177 TEST(NewSpace) { |
187 OS::Setup(); | 178 OS::Setup(); |
188 Isolate* isolate = Isolate::Current(); | 179 Isolate* isolate = Isolate::Current(); |
189 isolate->InitializeLoggingAndCounters(); | 180 isolate->InitializeLoggingAndCounters(); |
190 Heap* heap = isolate->heap(); | 181 Heap* heap = isolate->heap(); |
191 CHECK(heap->ConfigureHeapDefault()); | 182 CHECK(heap->ConfigureHeapDefault()); |
192 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate); | 183 MemoryAllocator* memory_allocator = new MemoryAllocator(isolate); |
193 CHECK(memory_allocator->Setup(heap->MaxReserved(), | 184 CHECK(memory_allocator->Setup(heap->MaxReserved(), |
194 heap->MaxExecutableSize())); | 185 heap->MaxExecutableSize())); |
195 TestMemoryAllocatorScope test_scope(isolate, memory_allocator); | 186 TestMemoryAllocatorScope test_scope(isolate, memory_allocator); |
196 | 187 |
197 NewSpace new_space(heap); | 188 NewSpace new_space(heap); |
198 | 189 |
199 void* chunk = | 190 CHECK(new_space.Setup(HEAP->ReservedSemiSpaceSize(), |
200 memory_allocator->ReserveInitialChunk(4 * heap->ReservedSemiSpaceSize()); | 191 HEAP->ReservedSemiSpaceSize())); |
201 CHECK(chunk != NULL); | |
202 Address start = RoundUp(static_cast<Address>(chunk), | |
203 2 * heap->ReservedSemiSpaceSize()); | |
204 CHECK(new_space.Setup(start, 2 * heap->ReservedSemiSpaceSize())); | |
205 CHECK(new_space.HasBeenSetup()); | 192 CHECK(new_space.HasBeenSetup()); |
206 | 193 |
207 while (new_space.Available() >= Page::kMaxHeapObjectSize) { | 194 while (new_space.Available() >= Page::kMaxHeapObjectSize) { |
208 Object* obj = | 195 Object* obj = |
209 new_space.AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked(); | 196 new_space.AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked(); |
210 CHECK(new_space.Contains(HeapObject::cast(obj))); | 197 CHECK(new_space.Contains(HeapObject::cast(obj))); |
211 } | 198 } |
212 | 199 |
213 new_space.TearDown(); | 200 new_space.TearDown(); |
214 memory_allocator->TearDown(); | 201 memory_allocator->TearDown(); |
(...skipping 11 matching lines...) Expand all Loading... |
226 CHECK(memory_allocator->Setup(heap->MaxReserved(), | 213 CHECK(memory_allocator->Setup(heap->MaxReserved(), |
227 heap->MaxExecutableSize())); | 214 heap->MaxExecutableSize())); |
228 TestMemoryAllocatorScope test_scope(isolate, memory_allocator); | 215 TestMemoryAllocatorScope test_scope(isolate, memory_allocator); |
229 | 216 |
230 OldSpace* s = new OldSpace(heap, | 217 OldSpace* s = new OldSpace(heap, |
231 heap->MaxOldGenerationSize(), | 218 heap->MaxOldGenerationSize(), |
232 OLD_POINTER_SPACE, | 219 OLD_POINTER_SPACE, |
233 NOT_EXECUTABLE); | 220 NOT_EXECUTABLE); |
234 CHECK(s != NULL); | 221 CHECK(s != NULL); |
235 | 222 |
236 void* chunk = memory_allocator->ReserveInitialChunk( | 223 CHECK(s->Setup()); |
237 4 * heap->ReservedSemiSpaceSize()); | |
238 CHECK(chunk != NULL); | |
239 Address start = static_cast<Address>(chunk); | |
240 size_t size = RoundUp(start, 2 * heap->ReservedSemiSpaceSize()) - start; | |
241 | |
242 CHECK(s->Setup(start, size)); | |
243 | 224 |
244 while (s->Available() > 0) { | 225 while (s->Available() > 0) { |
245 s->AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked(); | 226 s->AllocateRaw(Page::kMaxHeapObjectSize)->ToObjectUnchecked(); |
246 } | 227 } |
247 | 228 |
248 s->TearDown(); | 229 s->TearDown(); |
249 delete s; | 230 delete s; |
250 memory_allocator->TearDown(); | 231 memory_allocator->TearDown(); |
251 delete memory_allocator; | 232 delete memory_allocator; |
252 } | 233 } |
253 | 234 |
254 | 235 |
255 TEST(LargeObjectSpace) { | 236 TEST(LargeObjectSpace) { |
256 v8::V8::Initialize(); | 237 v8::V8::Initialize(); |
257 | 238 |
258 LargeObjectSpace* lo = HEAP->lo_space(); | 239 LargeObjectSpace* lo = HEAP->lo_space(); |
259 CHECK(lo != NULL); | 240 CHECK(lo != NULL); |
260 | 241 |
261 Map* faked_map = reinterpret_cast<Map*>(HeapObject::FromAddress(0)); | |
262 int lo_size = Page::kPageSize; | 242 int lo_size = Page::kPageSize; |
263 | 243 |
264 Object* obj = lo->AllocateRaw(lo_size)->ToObjectUnchecked(); | 244 Object* obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->ToObjectUnchecked(); |
265 CHECK(obj->IsHeapObject()); | 245 CHECK(obj->IsHeapObject()); |
266 | 246 |
267 HeapObject* ho = HeapObject::cast(obj); | 247 HeapObject* ho = HeapObject::cast(obj); |
268 ho->set_map(faked_map); | |
269 | 248 |
270 CHECK(lo->Contains(HeapObject::cast(obj))); | 249 CHECK(lo->Contains(HeapObject::cast(obj))); |
271 | 250 |
272 CHECK(lo->FindObject(ho->address()) == obj); | 251 CHECK(lo->FindObject(ho->address()) == obj); |
273 | 252 |
274 CHECK(lo->Contains(ho)); | 253 CHECK(lo->Contains(ho)); |
275 | 254 |
276 while (true) { | 255 while (true) { |
277 intptr_t available = lo->Available(); | 256 intptr_t available = lo->Available(); |
278 { MaybeObject* maybe_obj = lo->AllocateRaw(lo_size); | 257 { MaybeObject* maybe_obj = lo->AllocateRaw(lo_size, NOT_EXECUTABLE); |
279 if (!maybe_obj->ToObject(&obj)) break; | 258 if (!maybe_obj->ToObject(&obj)) break; |
280 } | 259 } |
281 HeapObject::cast(obj)->set_map(faked_map); | |
282 CHECK(lo->Available() < available); | 260 CHECK(lo->Available() < available); |
283 }; | 261 }; |
284 | 262 |
285 CHECK(!lo->IsEmpty()); | 263 CHECK(!lo->IsEmpty()); |
286 | 264 |
287 CHECK(lo->AllocateRaw(lo_size)->IsFailure()); | 265 CHECK(lo->AllocateRaw(lo_size, NOT_EXECUTABLE)->IsFailure()); |
288 } | 266 } |
OLD | NEW |