Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(10)

Side by Side Diff: runtime/vm/freelist.cc

Issue 2974233002: VM: Re-format to use at most one newline between functions (Closed)
Patch Set: Rebase and merge Created 3 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/freelist.h ('k') | runtime/vm/freelist_test.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2011, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/freelist.h" 5 #include "vm/freelist.h"
6 6
7 #include "vm/bit_set.h" 7 #include "vm/bit_set.h"
8 #include "vm/hash_map.h" 8 #include "vm/hash_map.h"
9 #include "vm/lockers.h" 9 #include "vm/lockers.h"
10 #include "vm/object.h" 10 #include "vm/object.h"
11 #include "vm/os_thread.h" 11 #include "vm/os_thread.h"
12 #include "vm/raw_object.h" 12 #include "vm/raw_object.h"
13 13
14 namespace dart { 14 namespace dart {
15 15
16
17 FreeListElement* FreeListElement::AsElement(uword addr, intptr_t size) { 16 FreeListElement* FreeListElement::AsElement(uword addr, intptr_t size) {
18 // Precondition: the (page containing the) header of the element is 17 // Precondition: the (page containing the) header of the element is
19 // writable. 18 // writable.
20 ASSERT(size >= kObjectAlignment); 19 ASSERT(size >= kObjectAlignment);
21 ASSERT(Utils::IsAligned(size, kObjectAlignment)); 20 ASSERT(Utils::IsAligned(size, kObjectAlignment));
22 21
23 FreeListElement* result = reinterpret_cast<FreeListElement*>(addr); 22 FreeListElement* result = reinterpret_cast<FreeListElement*>(addr);
24 23
25 uint32_t tags = 0; 24 uint32_t tags = 0;
26 tags = RawObject::SizeTag::update(size, tags); 25 tags = RawObject::SizeTag::update(size, tags);
27 tags = RawObject::ClassIdTag::update(kFreeListElement, tags); 26 tags = RawObject::ClassIdTag::update(kFreeListElement, tags);
28 // All words in a freelist element header should look like Smis. 27 // All words in a freelist element header should look like Smis.
29 ASSERT(!reinterpret_cast<RawObject*>(tags)->IsHeapObject()); 28 ASSERT(!reinterpret_cast<RawObject*>(tags)->IsHeapObject());
30 29
31 result->tags_ = tags; 30 result->tags_ = tags;
32 #if defined(HASH_IN_OBJECT_HEADER) 31 #if defined(HASH_IN_OBJECT_HEADER)
33 // Clearing this is mostly for neatness. The identityHashCode 32 // Clearing this is mostly for neatness. The identityHashCode
34 // of free list entries is not used. 33 // of free list entries is not used.
35 result->hash_ = 0; 34 result->hash_ = 0;
36 #endif 35 #endif
37 if (size > RawObject::SizeTag::kMaxSizeTag) { 36 if (size > RawObject::SizeTag::kMaxSizeTag) {
38 *result->SizeAddress() = size; 37 *result->SizeAddress() = size;
39 } 38 }
40 result->set_next(NULL); 39 result->set_next(NULL);
41 return result; 40 return result;
42 // Postcondition: the (page containing the) header of the element is 41 // Postcondition: the (page containing the) header of the element is
43 // writable. 42 // writable.
44 } 43 }
45 44
46
47 void FreeListElement::InitOnce() { 45 void FreeListElement::InitOnce() {
48 ASSERT(sizeof(FreeListElement) == kObjectAlignment); 46 ASSERT(sizeof(FreeListElement) == kObjectAlignment);
49 ASSERT(OFFSET_OF(FreeListElement, tags_) == Object::tags_offset()); 47 ASSERT(OFFSET_OF(FreeListElement, tags_) == Object::tags_offset());
50 } 48 }
51 49
52
53 intptr_t FreeListElement::HeaderSizeFor(intptr_t size) { 50 intptr_t FreeListElement::HeaderSizeFor(intptr_t size) {
54 if (size == 0) return 0; 51 if (size == 0) return 0;
55 return ((size > RawObject::SizeTag::kMaxSizeTag) ? 3 : 2) * kWordSize; 52 return ((size > RawObject::SizeTag::kMaxSizeTag) ? 3 : 2) * kWordSize;
56 } 53 }
57 54
58
59 FreeList::FreeList() 55 FreeList::FreeList()
60 : mutex_(new Mutex()), 56 : mutex_(new Mutex()),
61 freelist_search_budget_(kInitialFreeListSearchBudget) { 57 freelist_search_budget_(kInitialFreeListSearchBudget) {
62 Reset(); 58 Reset();
63 } 59 }
64 60
65
66 FreeList::~FreeList() { 61 FreeList::~FreeList() {
67 delete mutex_; 62 delete mutex_;
68 } 63 }
69 64
70
71 uword FreeList::TryAllocate(intptr_t size, bool is_protected) { 65 uword FreeList::TryAllocate(intptr_t size, bool is_protected) {
72 MutexLocker ml(mutex_); 66 MutexLocker ml(mutex_);
73 return TryAllocateLocked(size, is_protected); 67 return TryAllocateLocked(size, is_protected);
74 } 68 }
75 69
76
77 uword FreeList::TryAllocateLocked(intptr_t size, bool is_protected) { 70 uword FreeList::TryAllocateLocked(intptr_t size, bool is_protected) {
78 DEBUG_ASSERT(mutex_->IsOwnedByCurrentThread()); 71 DEBUG_ASSERT(mutex_->IsOwnedByCurrentThread());
79 // Precondition: is_protected is false or else all free list elements are 72 // Precondition: is_protected is false or else all free list elements are
80 // in non-writable pages. 73 // in non-writable pages.
81 74
82 // Postcondition: if allocation succeeds, the allocated block is writable. 75 // Postcondition: if allocation succeeds, the allocated block is writable.
83 int index = IndexForSize(size); 76 int index = IndexForSize(size);
84 if ((index != kNumLists) && free_map_.Test(index)) { 77 if ((index != kNumLists) && free_map_.Test(index)) {
85 FreeListElement* element = DequeueElement(index); 78 FreeListElement* element = DequeueElement(index);
86 if (is_protected) { 79 if (is_protected) {
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
182 } else if (tries_left-- < 0) { 175 } else if (tries_left-- < 0) {
183 freelist_search_budget_ = kInitialFreeListSearchBudget; 176 freelist_search_budget_ = kInitialFreeListSearchBudget;
184 return 0; // Trigger allocation of new page. 177 return 0; // Trigger allocation of new page.
185 } 178 }
186 previous = current; 179 previous = current;
187 current = current->next(); 180 current = current->next();
188 } 181 }
189 return 0; 182 return 0;
190 } 183 }
191 184
192
193 void FreeList::Free(uword addr, intptr_t size) { 185 void FreeList::Free(uword addr, intptr_t size) {
194 MutexLocker ml(mutex_); 186 MutexLocker ml(mutex_);
195 FreeLocked(addr, size); 187 FreeLocked(addr, size);
196 } 188 }
197 189
198
199 void FreeList::FreeLocked(uword addr, intptr_t size) { 190 void FreeList::FreeLocked(uword addr, intptr_t size) {
200 DEBUG_ASSERT(mutex_->IsOwnedByCurrentThread()); 191 DEBUG_ASSERT(mutex_->IsOwnedByCurrentThread());
201 // Precondition required by AsElement and EnqueueElement: the (page 192 // Precondition required by AsElement and EnqueueElement: the (page
202 // containing the) header of the freed block should be writable. This is 193 // containing the) header of the freed block should be writable. This is
203 // the case when called for newly allocated pages because they are 194 // the case when called for newly allocated pages because they are
204 // allocated as writable. It is the case when called during GC sweeping 195 // allocated as writable. It is the case when called during GC sweeping
205 // because the entire heap is writable. 196 // because the entire heap is writable.
206 intptr_t index = IndexForSize(size); 197 intptr_t index = IndexForSize(size);
207 FreeListElement* element = FreeListElement::AsElement(addr, size); 198 FreeListElement* element = FreeListElement::AsElement(addr, size);
208 EnqueueElement(element, index); 199 EnqueueElement(element, index);
209 // Postcondition: the (page containing the) header is left writable. 200 // Postcondition: the (page containing the) header is left writable.
210 } 201 }
211 202
212
213 void FreeList::Reset() { 203 void FreeList::Reset() {
214 MutexLocker ml(mutex_); 204 MutexLocker ml(mutex_);
215 free_map_.Reset(); 205 free_map_.Reset();
216 last_free_small_size_ = -1; 206 last_free_small_size_ = -1;
217 for (int i = 0; i < (kNumLists + 1); i++) { 207 for (int i = 0; i < (kNumLists + 1); i++) {
218 free_lists_[i] = NULL; 208 free_lists_[i] = NULL;
219 } 209 }
220 } 210 }
221 211
222
223 intptr_t FreeList::IndexForSize(intptr_t size) { 212 intptr_t FreeList::IndexForSize(intptr_t size) {
224 ASSERT(size >= kObjectAlignment); 213 ASSERT(size >= kObjectAlignment);
225 ASSERT(Utils::IsAligned(size, kObjectAlignment)); 214 ASSERT(Utils::IsAligned(size, kObjectAlignment));
226 215
227 intptr_t index = size >> kObjectAlignmentLog2; 216 intptr_t index = size >> kObjectAlignmentLog2;
228 if (index >= kNumLists) { 217 if (index >= kNumLists) {
229 index = kNumLists; 218 index = kNumLists;
230 } 219 }
231 return index; 220 return index;
232 } 221 }
233 222
234
235 void FreeList::EnqueueElement(FreeListElement* element, intptr_t index) { 223 void FreeList::EnqueueElement(FreeListElement* element, intptr_t index) {
236 FreeListElement* next = free_lists_[index]; 224 FreeListElement* next = free_lists_[index];
237 if (next == NULL && index != kNumLists) { 225 if (next == NULL && index != kNumLists) {
238 free_map_.Set(index, true); 226 free_map_.Set(index, true);
239 last_free_small_size_ = 227 last_free_small_size_ =
240 Utils::Maximum(last_free_small_size_, index << kObjectAlignmentLog2); 228 Utils::Maximum(last_free_small_size_, index << kObjectAlignmentLog2);
241 } 229 }
242 element->set_next(next); 230 element->set_next(next);
243 free_lists_[index] = element; 231 free_lists_[index] = element;
244 } 232 }
245 233
246
247 FreeListElement* FreeList::DequeueElement(intptr_t index) { 234 FreeListElement* FreeList::DequeueElement(intptr_t index) {
248 FreeListElement* result = free_lists_[index]; 235 FreeListElement* result = free_lists_[index];
249 FreeListElement* next = result->next(); 236 FreeListElement* next = result->next();
250 if (next == NULL && index != kNumLists) { 237 if (next == NULL && index != kNumLists) {
251 intptr_t size = index << kObjectAlignmentLog2; 238 intptr_t size = index << kObjectAlignmentLog2;
252 if (size == last_free_small_size_) { 239 if (size == last_free_small_size_) {
253 // Note: This is -1 * kObjectAlignment if no other small sizes remain. 240 // Note: This is -1 * kObjectAlignment if no other small sizes remain.
254 last_free_small_size_ = 241 last_free_small_size_ =
255 free_map_.ClearLastAndFindPrevious(index) * kObjectAlignment; 242 free_map_.ClearLastAndFindPrevious(index) * kObjectAlignment;
256 } else { 243 } else {
257 free_map_.Set(index, false); 244 free_map_.Set(index, false);
258 } 245 }
259 } 246 }
260 free_lists_[index] = next; 247 free_lists_[index] = next;
261 return result; 248 return result;
262 } 249 }
263 250
264
265 intptr_t FreeList::LengthLocked(int index) const { 251 intptr_t FreeList::LengthLocked(int index) const {
266 DEBUG_ASSERT(mutex_->IsOwnedByCurrentThread()); 252 DEBUG_ASSERT(mutex_->IsOwnedByCurrentThread());
267 ASSERT(index >= 0); 253 ASSERT(index >= 0);
268 ASSERT(index < kNumLists); 254 ASSERT(index < kNumLists);
269 intptr_t result = 0; 255 intptr_t result = 0;
270 FreeListElement* element = free_lists_[index]; 256 FreeListElement* element = free_lists_[index];
271 while (element != NULL) { 257 while (element != NULL) {
272 ++result; 258 ++result;
273 element = element->next(); 259 element = element->next();
274 } 260 }
275 return result; 261 return result;
276 } 262 }
277 263
278
279 void FreeList::PrintSmall() const { 264 void FreeList::PrintSmall() const {
280 int small_sizes = 0; 265 int small_sizes = 0;
281 int small_objects = 0; 266 int small_objects = 0;
282 intptr_t small_bytes = 0; 267 intptr_t small_bytes = 0;
283 for (int i = 0; i < kNumLists; ++i) { 268 for (int i = 0; i < kNumLists; ++i) {
284 if (free_lists_[i] == NULL) { 269 if (free_lists_[i] == NULL) {
285 continue; 270 continue;
286 } 271 }
287 small_sizes += 1; 272 small_sizes += 1;
288 intptr_t list_length = LengthLocked(i); 273 intptr_t list_length = LengthLocked(i);
289 small_objects += list_length; 274 small_objects += list_length;
290 intptr_t list_bytes = list_length * i * kObjectAlignment; 275 intptr_t list_bytes = list_length * i * kObjectAlignment;
291 small_bytes += list_bytes; 276 small_bytes += list_bytes;
292 OS::Print( 277 OS::Print(
293 "small %3d [%8d bytes] : " 278 "small %3d [%8d bytes] : "
294 "%8" Pd " objs; %8.1f KB; %8.1f cum KB\n", 279 "%8" Pd " objs; %8.1f KB; %8.1f cum KB\n",
295 i, i * kObjectAlignment, list_length, 280 i, i * kObjectAlignment, list_length,
296 list_bytes / static_cast<double>(KB), 281 list_bytes / static_cast<double>(KB),
297 small_bytes / static_cast<double>(KB)); 282 small_bytes / static_cast<double>(KB));
298 } 283 }
299 } 284 }
300 285
301
302 class IntptrPair { 286 class IntptrPair {
303 public: 287 public:
304 IntptrPair() : first_(-1), second_(-1) {} 288 IntptrPair() : first_(-1), second_(-1) {}
305 IntptrPair(intptr_t first, intptr_t second) 289 IntptrPair(intptr_t first, intptr_t second)
306 : first_(first), second_(second) {} 290 : first_(first), second_(second) {}
307 291
308 intptr_t first() const { return first_; } 292 intptr_t first() const { return first_; }
309 intptr_t second() const { return second_; } 293 intptr_t second() const { return second_; }
310 void set_second(intptr_t s) { second_ = s; } 294 void set_second(intptr_t s) { second_ = s; }
311 295
312 bool operator==(const IntptrPair& other) { 296 bool operator==(const IntptrPair& other) {
313 return (first_ == other.first_) && (second_ == other.second_); 297 return (first_ == other.first_) && (second_ == other.second_);
314 } 298 }
315 299
316 bool operator!=(const IntptrPair& other) { 300 bool operator!=(const IntptrPair& other) {
317 return (first_ != other.first_) || (second_ != other.second_); 301 return (first_ != other.first_) || (second_ != other.second_);
318 } 302 }
319 303
320 private: 304 private:
321 intptr_t first_; 305 intptr_t first_;
322 intptr_t second_; 306 intptr_t second_;
323 }; 307 };
324 308
325
326 void FreeList::PrintLarge() const { 309 void FreeList::PrintLarge() const {
327 int large_sizes = 0; 310 int large_sizes = 0;
328 int large_objects = 0; 311 int large_objects = 0;
329 intptr_t large_bytes = 0; 312 intptr_t large_bytes = 0;
330 MallocDirectChainedHashMap<NumbersKeyValueTrait<IntptrPair> > map; 313 MallocDirectChainedHashMap<NumbersKeyValueTrait<IntptrPair> > map;
331 FreeListElement* node; 314 FreeListElement* node;
332 for (node = free_lists_[kNumLists]; node != NULL; node = node->next()) { 315 for (node = free_lists_[kNumLists]; node != NULL; node = node->next()) {
333 IntptrPair* pair = map.Lookup(node->Size()); 316 IntptrPair* pair = map.Lookup(node->Size());
334 if (pair == NULL) { 317 if (pair == NULL) {
335 large_sizes += 1; 318 large_sizes += 1;
(...skipping 14 matching lines...) Expand all
350 large_bytes += list_bytes; 333 large_bytes += list_bytes;
351 OS::Print("large %3" Pd " [%8" Pd 334 OS::Print("large %3" Pd " [%8" Pd
352 " bytes] : " 335 " bytes] : "
353 "%8" Pd " objs; %8.1f KB; %8.1f cum KB\n", 336 "%8" Pd " objs; %8.1f KB; %8.1f cum KB\n",
354 size / kObjectAlignment, size, list_length, 337 size / kObjectAlignment, size, list_length,
355 list_bytes / static_cast<double>(KB), 338 list_bytes / static_cast<double>(KB),
356 large_bytes / static_cast<double>(KB)); 339 large_bytes / static_cast<double>(KB));
357 } 340 }
358 } 341 }
359 342
360
361 void FreeList::Print() const { 343 void FreeList::Print() const {
362 MutexLocker ml(mutex_); 344 MutexLocker ml(mutex_);
363 PrintSmall(); 345 PrintSmall();
364 PrintLarge(); 346 PrintLarge();
365 } 347 }
366 348
367
368 void FreeList::SplitElementAfterAndEnqueue(FreeListElement* element, 349 void FreeList::SplitElementAfterAndEnqueue(FreeListElement* element,
369 intptr_t size, 350 intptr_t size,
370 bool is_protected) { 351 bool is_protected) {
371 // Precondition required by AsElement and EnqueueElement: either 352 // Precondition required by AsElement and EnqueueElement: either
372 // element->Size() == size, or else the (page containing the) header of 353 // element->Size() == size, or else the (page containing the) header of
373 // the remainder element starting at element + size is writable. 354 // the remainder element starting at element + size is writable.
374 intptr_t remainder_size = element->Size() - size; 355 intptr_t remainder_size = element->Size() - size;
375 if (remainder_size == 0) return; 356 if (remainder_size == 0) return;
376 357
377 uword remainder_address = reinterpret_cast<uword>(element) + size; 358 uword remainder_address = reinterpret_cast<uword>(element) + size;
378 element = FreeListElement::AsElement(remainder_address, remainder_size); 359 element = FreeListElement::AsElement(remainder_address, remainder_size);
379 intptr_t remainder_index = IndexForSize(remainder_size); 360 intptr_t remainder_index = IndexForSize(remainder_size);
380 EnqueueElement(element, remainder_index); 361 EnqueueElement(element, remainder_index);
381 362
382 // Postcondition: when allocating in a protected page, the remainder 363 // Postcondition: when allocating in a protected page, the remainder
383 // element is no longer writable unless it is in the same page as the 364 // element is no longer writable unless it is in the same page as the
384 // allocated element. (The allocated element is still writable, and the 365 // allocated element. (The allocated element is still writable, and the
385 // remainder element will be protected when the allocated one is). 366 // remainder element will be protected when the allocated one is).
386 if (is_protected && 367 if (is_protected &&
387 !VirtualMemory::InSamePage(remainder_address - 1, remainder_address)) { 368 !VirtualMemory::InSamePage(remainder_address - 1, remainder_address)) {
388 bool status = 369 bool status =
389 VirtualMemory::Protect(reinterpret_cast<void*>(remainder_address), 370 VirtualMemory::Protect(reinterpret_cast<void*>(remainder_address),
390 remainder_size, VirtualMemory::kReadExecute); 371 remainder_size, VirtualMemory::kReadExecute);
391 ASSERT(status); 372 ASSERT(status);
392 } 373 }
393 } 374 }
394 375
395
396 FreeListElement* FreeList::TryAllocateLarge(intptr_t minimum_size) { 376 FreeListElement* FreeList::TryAllocateLarge(intptr_t minimum_size) {
397 MutexLocker ml(mutex_); 377 MutexLocker ml(mutex_);
398 return TryAllocateLargeLocked(minimum_size); 378 return TryAllocateLargeLocked(minimum_size);
399 } 379 }
400 380
401
402 FreeListElement* FreeList::TryAllocateLargeLocked(intptr_t minimum_size) { 381 FreeListElement* FreeList::TryAllocateLargeLocked(intptr_t minimum_size) {
403 DEBUG_ASSERT(mutex_->IsOwnedByCurrentThread()); 382 DEBUG_ASSERT(mutex_->IsOwnedByCurrentThread());
404 FreeListElement* previous = NULL; 383 FreeListElement* previous = NULL;
405 FreeListElement* current = free_lists_[kNumLists]; 384 FreeListElement* current = free_lists_[kNumLists];
406 // TODO(koda): Find largest. 385 // TODO(koda): Find largest.
407 // We are willing to search the freelist further for a big block. 386 // We are willing to search the freelist further for a big block.
408 intptr_t tries_left = 387 intptr_t tries_left =
409 freelist_search_budget_ + (minimum_size >> kWordSizeLog2); 388 freelist_search_budget_ + (minimum_size >> kWordSizeLog2);
410 while (current != NULL) { 389 while (current != NULL) {
411 FreeListElement* next = current->next(); 390 FreeListElement* next = current->next();
412 if (current->Size() >= minimum_size) { 391 if (current->Size() >= minimum_size) {
413 if (previous == NULL) { 392 if (previous == NULL) {
414 free_lists_[kNumLists] = next; 393 free_lists_[kNumLists] = next;
415 } else { 394 } else {
416 previous->set_next(next); 395 previous->set_next(next);
417 } 396 }
418 freelist_search_budget_ = 397 freelist_search_budget_ =
419 Utils::Minimum(tries_left, kInitialFreeListSearchBudget); 398 Utils::Minimum(tries_left, kInitialFreeListSearchBudget);
420 return current; 399 return current;
421 } else if (tries_left-- < 0) { 400 } else if (tries_left-- < 0) {
422 freelist_search_budget_ = kInitialFreeListSearchBudget; 401 freelist_search_budget_ = kInitialFreeListSearchBudget;
423 return 0; // Trigger allocation of new page. 402 return 0; // Trigger allocation of new page.
424 } 403 }
425 previous = current; 404 previous = current;
426 current = next; 405 current = next;
427 } 406 }
428 return NULL; 407 return NULL;
429 } 408 }
430 409
431
432 uword FreeList::TryAllocateSmallLocked(intptr_t size) { 410 uword FreeList::TryAllocateSmallLocked(intptr_t size) {
433 DEBUG_ASSERT(mutex_->IsOwnedByCurrentThread()); 411 DEBUG_ASSERT(mutex_->IsOwnedByCurrentThread());
434 if (size > last_free_small_size_) { 412 if (size > last_free_small_size_) {
435 return 0; 413 return 0;
436 } 414 }
437 int index = IndexForSize(size); 415 int index = IndexForSize(size);
438 if (index != kNumLists && free_map_.Test(index)) { 416 if (index != kNumLists && free_map_.Test(index)) {
439 return reinterpret_cast<uword>(DequeueElement(index)); 417 return reinterpret_cast<uword>(DequeueElement(index));
440 } 418 }
441 if ((index + 1) < kNumLists) { 419 if ((index + 1) < kNumLists) {
442 intptr_t next_index = free_map_.Next(index + 1); 420 intptr_t next_index = free_map_.Next(index + 1);
443 if (next_index != -1) { 421 if (next_index != -1) {
444 FreeListElement* element = DequeueElement(next_index); 422 FreeListElement* element = DequeueElement(next_index);
445 SplitElementAfterAndEnqueue(element, size, false); 423 SplitElementAfterAndEnqueue(element, size, false);
446 return reinterpret_cast<uword>(element); 424 return reinterpret_cast<uword>(element);
447 } 425 }
448 } 426 }
449 return 0; 427 return 0;
450 } 428 }
451 429
452 } // namespace dart 430 } // namespace dart
OLDNEW
« no previous file with comments | « runtime/vm/freelist.h ('k') | runtime/vm/freelist_test.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698