Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(99)

Side by Side Diff: src/x64/macro-assembler-x64.cc

Issue 59373003: AllocationProfiler: introduce allocation_profiler flag in V8 api. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 7 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 4072 matching lines...) Expand 10 before | Expand all | Expand 10 after
4083 } 4083 }
4084 } 4084 }
4085 jmp(gc_required); 4085 jmp(gc_required);
4086 return; 4086 return;
4087 } 4087 }
4088 ASSERT(!result.is(result_end)); 4088 ASSERT(!result.is(result_end));
4089 4089
4090 // Load address of new object into result. 4090 // Load address of new object into result.
4091 LoadAllocationTopHelper(result, scratch, flags); 4091 LoadAllocationTopHelper(result, scratch, flags);
4092 4092
4093 if (isolate()->heap_profiler()->is_tracking_allocations()) { 4093 if (isolate()->is_allocation_profiler_enabled()) {
4094 RecordObjectAllocation(isolate(), result, object_size); 4094 // It's always safe to call the allocation profiler stub hook,
4095 // as the hook itself is not allowed to call back to V8.
4096 AllowStubCallsScope allow_scope(this, true);
4097 push(Immediate(object_size));
4098 push(result);
4099 RecordObjectAllocationStub stub;
4100 CallStub(&stub);
4101 addq(rsp, Immediate(2 * kPointerSize));
4095 } 4102 }
4096 4103
4097 // Align the next allocation. Storing the filler map without checking top is 4104 // Align the next allocation. Storing the filler map without checking top is
4098 // safe in new-space because the limit of the heap is aligned there. 4105 // safe in new-space because the limit of the heap is aligned there.
4099 if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) { 4106 if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
4100 testq(result, Immediate(kDoubleAlignmentMask)); 4107 testq(result, Immediate(kDoubleAlignmentMask));
4101 Check(zero, kAllocationIsNotDoubleAligned); 4108 Check(zero, kAllocationIsNotDoubleAligned);
4102 } 4109 }
4103 4110
4104 // Calculate new top and bail out if new space is exhausted. 4111 // Calculate new top and bail out if new space is exhausted.
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
4166 // object_size is left unchanged by this function. 4173 // object_size is left unchanged by this function.
4167 } 4174 }
4168 jmp(gc_required); 4175 jmp(gc_required);
4169 return; 4176 return;
4170 } 4177 }
4171 ASSERT(!result.is(result_end)); 4178 ASSERT(!result.is(result_end));
4172 4179
4173 // Load address of new object into result. 4180 // Load address of new object into result.
4174 LoadAllocationTopHelper(result, scratch, flags); 4181 LoadAllocationTopHelper(result, scratch, flags);
4175 4182
4176 if (isolate()->heap_profiler()->is_tracking_allocations()) { 4183 if (isolate()->is_allocation_profiler_enabled()) {
4177 RecordObjectAllocation(isolate(), result, object_size); 4184 // It's always safe to call the allocation profiler stub hook,
4185 // as the hook itself is not allowed to call back to V8.
4186 AllowStubCallsScope allow_scope(this, true);
4187 push(object_size);
4188 push(result);
4189 RecordObjectAllocationStub stub;
4190 CallStub(&stub);
4191 addq(rsp, Immediate(2 * kRegisterSize));
alph 2013/11/05 13:55:20 Just curious why it's kRegisterSize here and kPoin
4178 } 4192 }
4179 4193
4180 // Align the next allocation. Storing the filler map without checking top is 4194 // Align the next allocation. Storing the filler map without checking top is
4181 // safe in new-space because the limit of the heap is aligned there. 4195 // safe in new-space because the limit of the heap is aligned there.
4182 if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) { 4196 if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
4183 testq(result, Immediate(kDoubleAlignmentMask)); 4197 testq(result, Immediate(kDoubleAlignmentMask));
4184 Check(zero, kAllocationIsNotDoubleAligned); 4198 Check(zero, kAllocationIsNotDoubleAligned);
4185 } 4199 }
4186 4200
4187 // Calculate new top and bail out if new space is exhausted. 4201 // Calculate new top and bail out if new space is exhausted.
(...skipping 742 matching lines...) Expand 10 before | Expand all | Expand 10 after
4930 movq(kScratchRegister, new_space_start); 4944 movq(kScratchRegister, new_space_start);
4931 cmpq(scratch_reg, kScratchRegister); 4945 cmpq(scratch_reg, kScratchRegister);
4932 j(less, no_memento_found); 4946 j(less, no_memento_found);
4933 cmpq(scratch_reg, ExternalOperand(new_space_allocation_top)); 4947 cmpq(scratch_reg, ExternalOperand(new_space_allocation_top));
4934 j(greater, no_memento_found); 4948 j(greater, no_memento_found);
4935 CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize), 4949 CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize),
4936 Heap::kAllocationMementoMapRootIndex); 4950 Heap::kAllocationMementoMapRootIndex);
4937 } 4951 }
4938 4952
4939 4953
4940 void MacroAssembler::RecordObjectAllocation(Isolate* isolate,
4941 Register object,
4942 Register object_size) {
4943 FrameScope frame(this, StackFrame::EXIT);
4944 PushSafepointRegisters();
4945 PrepareCallCFunction(3);
4946 // In case object is rdx
4947 movq(kScratchRegister, object);
4948 movq(arg_reg_3, object_size);
4949 movq(arg_reg_2, kScratchRegister);
4950 movq(arg_reg_1, isolate, RelocInfo::EXTERNAL_REFERENCE);
4951 CallCFunction(
4952 ExternalReference::record_object_allocation_function(isolate), 3);
4953 PopSafepointRegisters();
4954 }
4955
4956
4957 void MacroAssembler::RecordObjectAllocation(Isolate* isolate,
4958 Register object,
4959 int object_size) {
4960 FrameScope frame(this, StackFrame::EXIT);
4961 PushSafepointRegisters();
4962 PrepareCallCFunction(3);
4963 movq(arg_reg_2, object);
4964 movq(arg_reg_3, Immediate(object_size));
4965 movq(arg_reg_1, isolate, RelocInfo::EXTERNAL_REFERENCE);
4966 CallCFunction(
4967 ExternalReference::record_object_allocation_function(isolate), 3);
4968 PopSafepointRegisters();
4969 }
4970
4971
4972 } } // namespace v8::internal 4954 } } // namespace v8::internal
4973 4955
4974 #endif // V8_TARGET_ARCH_X64 4956 #endif // V8_TARGET_ARCH_X64
OLDNEW
« include/v8.h ('K') | « src/x64/code-stubs-x64.cc ('k') | test/cctest/test-heap-profiler.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698