Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(4)

Side by Side Diff: src/spaces-inl.h

Issue 22852024: Track JS allocations as they arrive with no affection on performance when tracking is switched off (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Code style fixes after review #2 Created 7 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 10 matching lines...) Expand all
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #ifndef V8_SPACES_INL_H_ 28 #ifndef V8_SPACES_INL_H_
29 #define V8_SPACES_INL_H_ 29 #define V8_SPACES_INL_H_
30 30
31 #include "heap-profiler.h"
31 #include "isolate.h" 32 #include "isolate.h"
32 #include "spaces.h" 33 #include "spaces.h"
33 #include "v8memory.h" 34 #include "v8memory.h"
34 35
35 namespace v8 { 36 namespace v8 {
36 namespace internal { 37 namespace internal {
37 38
38 39
39 // ----------------------------------------------------------------------------- 40 // -----------------------------------------------------------------------------
40 // Bitmap 41 // Bitmap
(...skipping 225 matching lines...) Expand 10 before | Expand all | Expand 10 after
266 Address current_top = allocation_info_.top; 267 Address current_top = allocation_info_.top;
267 Address new_top = current_top + size_in_bytes; 268 Address new_top = current_top + size_in_bytes;
268 if (new_top > allocation_info_.limit) return NULL; 269 if (new_top > allocation_info_.limit) return NULL;
269 270
270 allocation_info_.top = new_top; 271 allocation_info_.top = new_top;
271 return HeapObject::FromAddress(current_top); 272 return HeapObject::FromAddress(current_top);
272 } 273 }
273 274
274 275
275 // Raw allocation. 276 // Raw allocation.
276 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) { 277 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes,
277 HeapObject* object = AllocateLinearly(size_in_bytes); 278 AllocationType event) {
278 if (object != NULL) { 279 HeapObject* object = NULL;
279 if (identity() == CODE_SPACE) { 280
280 SkipList::Update(object->address(), size_in_bytes); 281 do {
281 } 282 object = AllocateLinearly(size_in_bytes);
282 return object; 283 if (object != NULL) break;
284
285 ASSERT(!heap()->linear_allocation() ||
286 (anchor_.next_chunk() == &anchor_ &&
287 anchor_.prev_chunk() == &anchor_));
288
289 object = free_list_.Allocate(size_in_bytes);
290 if (object != NULL) break;
291
292 object = SlowAllocateRaw(size_in_bytes);
293 } while (false);
Hannes Payer (out of office) 2013/10/02 18:00:29 I don't like the goto emulation here. Why don't we
Alexandra Mikhaylova 2013/10/03 16:27:55 Ok, corrected it keeping the original method.
294
295 if (object == NULL) return Failure::RetryAfterGC(identity());
296
297 if (identity() == CODE_SPACE) {
298 SkipList::Update(object->address(), size_in_bytes);
283 } 299 }
284 300
285 ASSERT(!heap()->linear_allocation() || 301 if (event == NEW_OBJECT) {
286 (anchor_.next_chunk() == &anchor_ && 302 HeapProfiler* profiler = heap()->isolate()->heap_profiler();
287 anchor_.prev_chunk() == &anchor_)); 303 if (profiler->is_tracking_allocations()) {
288 304 profiler->NewObjectEvent(object->address(), size_in_bytes);
289 object = free_list_.Allocate(size_in_bytes);
290 if (object != NULL) {
291 if (identity() == CODE_SPACE) {
292 SkipList::Update(object->address(), size_in_bytes);
293 } 305 }
294 return object;
295 } 306 }
296 307
297 object = SlowAllocateRaw(size_in_bytes); 308 return object;
298 if (object != NULL) {
299 if (identity() == CODE_SPACE) {
300 SkipList::Update(object->address(), size_in_bytes);
301 }
302 return object;
303 }
304
305 return Failure::RetryAfterGC(identity());
306 } 309 }
307 310
308 311
309 // ----------------------------------------------------------------------------- 312 // -----------------------------------------------------------------------------
310 // NewSpace 313 // NewSpace
311 314
312 315
313 MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) { 316 MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
314 Address old_top = allocation_info_.top; 317 Address old_top = allocation_info_.top;
315 #ifdef DEBUG 318 #ifdef DEBUG
316 // If we are stressing compaction we waste some memory in new space 319 // If we are stressing compaction we waste some memory in new space
317 // in order to get more frequent GCs. 320 // in order to get more frequent GCs.
318 if (FLAG_stress_compaction && !heap()->linear_allocation()) { 321 if (FLAG_stress_compaction && !heap()->linear_allocation()) {
319 if (allocation_info_.limit - old_top >= size_in_bytes * 4) { 322 if (allocation_info_.limit - old_top >= size_in_bytes * 4) {
320 int filler_size = size_in_bytes * 4; 323 int filler_size = size_in_bytes * 4;
321 for (int i = 0; i < filler_size; i += kPointerSize) { 324 for (int i = 0; i < filler_size; i += kPointerSize) {
322 *(reinterpret_cast<Object**>(old_top + i)) = 325 *(reinterpret_cast<Object**>(old_top + i)) =
323 heap()->one_pointer_filler_map(); 326 heap()->one_pointer_filler_map();
324 } 327 }
325 old_top += filler_size; 328 old_top += filler_size;
326 allocation_info_.top += filler_size; 329 allocation_info_.top += filler_size;
327 } 330 }
328 } 331 }
329 #endif 332 #endif
330 333
331 if (allocation_info_.limit - old_top < size_in_bytes) { 334 if (allocation_info_.limit - old_top < size_in_bytes) {
332 return SlowAllocateRaw(size_in_bytes); 335 return SlowAllocateRaw(size_in_bytes);
333 } 336 }
334 337
335 Object* obj = HeapObject::FromAddress(old_top); 338 HeapObject* obj = HeapObject::FromAddress(old_top);
336 allocation_info_.top += size_in_bytes; 339 allocation_info_.top += size_in_bytes;
337 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); 340 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
338 341
342 HeapProfiler* profiler = heap()->isolate()->heap_profiler();
343 if (profiler->is_tracking_allocations()) {
344 profiler->NewObjectEvent(obj->address(), size_in_bytes);
345 }
346
339 return obj; 347 return obj;
340 } 348 }
341 349
342 350
343 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) { 351 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
344 heap->incremental_marking()->SetOldSpacePageFlags(chunk); 352 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
345 return static_cast<LargePage*>(chunk); 353 return static_cast<LargePage*>(chunk);
346 } 354 }
347 355
348 356
349 intptr_t LargeObjectSpace::Available() { 357 intptr_t LargeObjectSpace::Available() {
350 return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available()); 358 return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
351 } 359 }
352 360
353 361
354 bool FreeListNode::IsFreeListNode(HeapObject* object) { 362 bool FreeListNode::IsFreeListNode(HeapObject* object) {
355 Map* map = object->map(); 363 Map* map = object->map();
356 Heap* heap = object->GetHeap(); 364 Heap* heap = object->GetHeap();
357 return map == heap->raw_unchecked_free_space_map() 365 return map == heap->raw_unchecked_free_space_map()
358 || map == heap->raw_unchecked_one_pointer_filler_map() 366 || map == heap->raw_unchecked_one_pointer_filler_map()
359 || map == heap->raw_unchecked_two_pointer_filler_map(); 367 || map == heap->raw_unchecked_two_pointer_filler_map();
360 } 368 }
361 369
362 } } // namespace v8::internal 370 } } // namespace v8::internal
363 371
364 #endif // V8_SPACES_INL_H_ 372 #endif // V8_SPACES_INL_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698