Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(323)

Side by Side Diff: src/spaces-inl.h

Issue 22852024: Track JS allocations as they arrive with no affection on performance when tracking is switched off (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Make separate API for JS allocations recording, add example of checking JS allocations recording in… Created 7 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 10 matching lines...) Expand all
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #ifndef V8_SPACES_INL_H_ 28 #ifndef V8_SPACES_INL_H_
29 #define V8_SPACES_INL_H_ 29 #define V8_SPACES_INL_H_
30 30
31 #include "heap-profiler.h"
31 #include "isolate.h" 32 #include "isolate.h"
32 #include "spaces.h" 33 #include "spaces.h"
33 #include "v8memory.h" 34 #include "v8memory.h"
34 35
35 namespace v8 { 36 namespace v8 {
36 namespace internal { 37 namespace internal {
37 38
38 39
39 // ----------------------------------------------------------------------------- 40 // -----------------------------------------------------------------------------
40 // Bitmap 41 // Bitmap
(...skipping 225 matching lines...) Expand 10 before | Expand all | Expand 10 after
266 Address current_top = allocation_info_.top; 267 Address current_top = allocation_info_.top;
267 Address new_top = current_top + size_in_bytes; 268 Address new_top = current_top + size_in_bytes;
268 if (new_top > allocation_info_.limit) return NULL; 269 if (new_top > allocation_info_.limit) return NULL;
269 270
270 allocation_info_.top = new_top; 271 allocation_info_.top = new_top;
271 return HeapObject::FromAddress(current_top); 272 return HeapObject::FromAddress(current_top);
272 } 273 }
273 274
274 275
275 // Raw allocation. 276 // Raw allocation.
276 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) { 277 HeapObject* PagedSpace::AllocateRawHelper(int size_in_bytes) {
277 HeapObject* object = AllocateLinearly(size_in_bytes); 278 HeapObject* object = AllocateLinearly(size_in_bytes);
278 if (object != NULL) { 279 if (object != NULL) {
279 if (identity() == CODE_SPACE) { 280 if (identity() == CODE_SPACE) {
280 SkipList::Update(object->address(), size_in_bytes); 281 SkipList::Update(object->address(), size_in_bytes);
281 } 282 }
282 return object; 283 return object;
283 } 284 }
284 285
285 ASSERT(!heap()->linear_allocation() || 286 ASSERT(!heap()->linear_allocation() ||
286 (anchor_.next_chunk() == &anchor_ && 287 (anchor_.next_chunk() == &anchor_ &&
287 anchor_.prev_chunk() == &anchor_)); 288 anchor_.prev_chunk() == &anchor_));
288 289
289 object = free_list_.Allocate(size_in_bytes); 290 object = free_list_.Allocate(size_in_bytes);
290 if (object != NULL) { 291 if (object != NULL) {
291 if (identity() == CODE_SPACE) { 292 if (identity() == CODE_SPACE) {
292 SkipList::Update(object->address(), size_in_bytes); 293 SkipList::Update(object->address(), size_in_bytes);
293 } 294 }
294 return object; 295 return object;
295 } 296 }
296 297
297 object = SlowAllocateRaw(size_in_bytes); 298 object = SlowAllocateRaw(size_in_bytes);
298 if (object != NULL) { 299 if (object != NULL) {
299 if (identity() == CODE_SPACE) { 300 if (identity() == CODE_SPACE) {
300 SkipList::Update(object->address(), size_in_bytes); 301 SkipList::Update(object->address(), size_in_bytes);
301 } 302 }
302 return object; 303 return object;
303 } 304 }
304 305
306 return NULL;
307 }
308
309
310 MaybeObject* PagedSpace::AllocateRaw(int size_in_bytes) {
311 HeapObject* object = AllocateRawHelper(size_in_bytes);
312 if (object != NULL) {
313 HeapProfiler* profiler = heap()->isolate()->heap_profiler();
314 if (profiler->is_tracking_allocations()) {
315 profiler->NewObjectEvent(object->address(), size_in_bytes);
316 }
317 return object;
318 }
305 return Failure::RetryAfterGC(identity()); 319 return Failure::RetryAfterGC(identity());
306 } 320 }
307 321
322
323 MaybeObject* PagedSpace::AllocateRawForMigration(int size_in_bytes) {
324 HeapObject* object = AllocateRawHelper(size_in_bytes);
325 if (object != NULL) {
326 return object;
327 }
328 return Failure::RetryAfterGC(identity());
329 }
330
308 331
309 // ----------------------------------------------------------------------------- 332 // -----------------------------------------------------------------------------
310 // NewSpace 333 // NewSpace
311 334
312 335
313 MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) { 336 MaybeObject* NewSpace::AllocateRaw(int size_in_bytes) {
314 Address old_top = allocation_info_.top; 337 Address old_top = allocation_info_.top;
315 #ifdef DEBUG 338 #ifdef DEBUG
316 // If we are stressing compaction we waste some memory in new space 339 // If we are stressing compaction we waste some memory in new space
317 // in order to get more frequent GCs. 340 // in order to get more frequent GCs.
318 if (FLAG_stress_compaction && !HEAP->linear_allocation()) { 341 if (FLAG_stress_compaction && !HEAP->linear_allocation()) {
319 if (allocation_info_.limit - old_top >= size_in_bytes * 4) { 342 if (allocation_info_.limit - old_top >= size_in_bytes * 4) {
320 int filler_size = size_in_bytes * 4; 343 int filler_size = size_in_bytes * 4;
321 for (int i = 0; i < filler_size; i += kPointerSize) { 344 for (int i = 0; i < filler_size; i += kPointerSize) {
322 *(reinterpret_cast<Object**>(old_top + i)) = 345 *(reinterpret_cast<Object**>(old_top + i)) =
323 HEAP->one_pointer_filler_map(); 346 HEAP->one_pointer_filler_map();
324 } 347 }
325 old_top += filler_size; 348 old_top += filler_size;
326 allocation_info_.top += filler_size; 349 allocation_info_.top += filler_size;
327 } 350 }
328 } 351 }
329 #endif 352 #endif
330 353
331 if (allocation_info_.limit - old_top < size_in_bytes) { 354 if (allocation_info_.limit - old_top < size_in_bytes) {
332 return SlowAllocateRaw(size_in_bytes); 355 return SlowAllocateRaw(size_in_bytes);
333 } 356 }
334 357
335 Object* obj = HeapObject::FromAddress(old_top); 358 HeapObject* obj = HeapObject::FromAddress(old_top);
loislo 2013/08/27 09:04:57 Could this line be moved down into the if ("tracki
Alexandra Mikhaylova 2013/09/19 16:03:38 I don't think so. Could you please explain why we
336 allocation_info_.top += size_in_bytes; 359 allocation_info_.top += size_in_bytes;
337 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_); 360 ASSERT_SEMISPACE_ALLOCATION_INFO(allocation_info_, to_space_);
338 361
362 HeapProfiler* profiler = heap()->isolate()->heap_profiler();
363 if (profiler->is_tracking_allocations()) {
364 profiler->NewObjectEvent(obj->address(), size_in_bytes);
365 }
366
339 return obj; 367 return obj;
340 } 368 }
341 369
342 370
343 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) { 371 LargePage* LargePage::Initialize(Heap* heap, MemoryChunk* chunk) {
344 heap->incremental_marking()->SetOldSpacePageFlags(chunk); 372 heap->incremental_marking()->SetOldSpacePageFlags(chunk);
345 return static_cast<LargePage*>(chunk); 373 return static_cast<LargePage*>(chunk);
346 } 374 }
347 375
348 376
349 intptr_t LargeObjectSpace::Available() { 377 intptr_t LargeObjectSpace::Available() {
350 return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available()); 378 return ObjectSizeFor(heap()->isolate()->memory_allocator()->Available());
351 } 379 }
352 380
353 381
354 bool FreeListNode::IsFreeListNode(HeapObject* object) { 382 bool FreeListNode::IsFreeListNode(HeapObject* object) {
355 Map* map = object->map(); 383 Map* map = object->map();
356 Heap* heap = object->GetHeap(); 384 Heap* heap = object->GetHeap();
357 return map == heap->raw_unchecked_free_space_map() 385 return map == heap->raw_unchecked_free_space_map()
358 || map == heap->raw_unchecked_one_pointer_filler_map() 386 || map == heap->raw_unchecked_one_pointer_filler_map()
359 || map == heap->raw_unchecked_two_pointer_filler_map(); 387 || map == heap->raw_unchecked_two_pointer_filler_map();
360 } 388 }
361 389
362 } } // namespace v8::internal 390 } } // namespace v8::internal
363 391
364 #endif // V8_SPACES_INL_H_ 392 #endif // V8_SPACES_INL_H_
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698