Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(59)

Side by Side Diff: src/x64/macro-assembler-x64.cc

Issue 148503002: A64: Synchronize with r15545. (Closed) Base URL: https://v8.googlecode.com/svn/branches/experimental/a64
Patch Set: Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | src/x64/regexp-macro-assembler-x64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its 12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived 13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission. 14 // from this software without specific prior written permission.
15 // 15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 27
28 #include "v8.h" 28 #include "v8.h"
29 29
30 #if defined(V8_TARGET_ARCH_X64) 30 #if V8_TARGET_ARCH_X64
31 31
32 #include "bootstrapper.h" 32 #include "bootstrapper.h"
33 #include "codegen.h" 33 #include "codegen.h"
34 #include "cpu-profiler.h"
34 #include "assembler-x64.h" 35 #include "assembler-x64.h"
35 #include "macro-assembler-x64.h" 36 #include "macro-assembler-x64.h"
36 #include "serialize.h" 37 #include "serialize.h"
37 #include "debug.h" 38 #include "debug.h"
38 #include "heap.h" 39 #include "heap.h"
39 40
40 namespace v8 { 41 namespace v8 {
41 namespace internal { 42 namespace internal {
42 43
43 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size) 44 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
(...skipping 594 matching lines...) Expand 10 before | Expand all | Expand 10 after
638 639
639 CEntryStub stub(1); 640 CEntryStub stub(1);
640 CallStub(&stub); 641 CallStub(&stub);
641 } 642 }
642 643
643 644
644 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext, 645 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
645 int num_arguments, 646 int num_arguments,
646 int result_size) { 647 int result_size) {
647 // ----------- S t a t e ------------- 648 // ----------- S t a t e -------------
648 // -- rsp[0] : return address 649 // -- rsp[0] : return address
649 // -- rsp[8] : argument num_arguments - 1 650 // -- rsp[8] : argument num_arguments - 1
650 // ... 651 // ...
651 // -- rsp[8 * num_arguments] : argument 0 (receiver) 652 // -- rsp[8 * num_arguments] : argument 0 (receiver)
652 // ----------------------------------- 653 // -----------------------------------
653 654
654 // TODO(1236192): Most runtime routines don't need the number of 655 // TODO(1236192): Most runtime routines don't need the number of
655 // arguments passed in because it is constant. At some point we 656 // arguments passed in because it is constant. At some point we
656 // should remove this need and make the runtime routine entry code 657 // should remove this need and make the runtime routine entry code
657 // smarter. 658 // smarter.
658 Set(rax, num_arguments); 659 Set(rax, num_arguments);
659 JumpToExternalReference(ext, result_size); 660 JumpToExternalReference(ext, result_size);
(...skipping 304 matching lines...) Expand 10 before | Expand all | Expand 10 after
964 xorl(dst, dst); 965 xorl(dst, dst);
965 } else if (is_uint32(x)) { 966 } else if (is_uint32(x)) {
966 movl(dst, Immediate(static_cast<uint32_t>(x))); 967 movl(dst, Immediate(static_cast<uint32_t>(x)));
967 } else if (is_int32(x)) { 968 } else if (is_int32(x)) {
968 movq(dst, Immediate(static_cast<int32_t>(x))); 969 movq(dst, Immediate(static_cast<int32_t>(x)));
969 } else { 970 } else {
970 movq(dst, x, RelocInfo::NONE64); 971 movq(dst, x, RelocInfo::NONE64);
971 } 972 }
972 } 973 }
973 974
975
974 void MacroAssembler::Set(const Operand& dst, int64_t x) { 976 void MacroAssembler::Set(const Operand& dst, int64_t x) {
975 if (is_int32(x)) { 977 if (is_int32(x)) {
976 movq(dst, Immediate(static_cast<int32_t>(x))); 978 movq(dst, Immediate(static_cast<int32_t>(x)));
977 } else { 979 } else {
978 Set(kScratchRegister, x); 980 Set(kScratchRegister, x);
979 movq(dst, kScratchRegister); 981 movq(dst, kScratchRegister);
980 } 982 }
981 } 983 }
982 984
983 985
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
1021 xorl(kScratchRegister, kScratchRegister); 1023 xorl(kScratchRegister, kScratchRegister);
1022 return kScratchRegister; 1024 return kScratchRegister;
1023 } 1025 }
1024 if (value == 1) { 1026 if (value == 1) {
1025 return kSmiConstantRegister; 1027 return kSmiConstantRegister;
1026 } 1028 }
1027 LoadSmiConstant(kScratchRegister, source); 1029 LoadSmiConstant(kScratchRegister, source);
1028 return kScratchRegister; 1030 return kScratchRegister;
1029 } 1031 }
1030 1032
1033
1031 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) { 1034 void MacroAssembler::LoadSmiConstant(Register dst, Smi* source) {
1032 if (emit_debug_code()) { 1035 if (emit_debug_code()) {
1033 movq(dst, 1036 movq(dst,
1034 reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)), 1037 reinterpret_cast<uint64_t>(Smi::FromInt(kSmiConstantRegisterValue)),
1035 RelocInfo::NONE64); 1038 RelocInfo::NONE64);
1036 cmpq(dst, kSmiConstantRegister); 1039 cmpq(dst, kSmiConstantRegister);
1037 if (allow_stub_calls()) { 1040 if (allow_stub_calls()) {
1038 Assert(equal, "Uninitialized kSmiConstantRegister"); 1041 Assert(equal, "Uninitialized kSmiConstantRegister");
1039 } else { 1042 } else {
1040 Label ok; 1043 Label ok;
(...skipping 2848 matching lines...) Expand 10 before | Expand all | Expand 10 after
3889 3892
3890 void MacroAssembler::Allocate(int header_size, 3893 void MacroAssembler::Allocate(int header_size,
3891 ScaleFactor element_size, 3894 ScaleFactor element_size,
3892 Register element_count, 3895 Register element_count,
3893 Register result, 3896 Register result,
3894 Register result_end, 3897 Register result_end,
3895 Register scratch, 3898 Register scratch,
3896 Label* gc_required, 3899 Label* gc_required,
3897 AllocationFlags flags) { 3900 AllocationFlags flags) {
3898 ASSERT((flags & SIZE_IN_WORDS) == 0); 3901 ASSERT((flags & SIZE_IN_WORDS) == 0);
3899 if (!FLAG_inline_new) {
3900 if (emit_debug_code()) {
3901 // Trash the registers to simulate an allocation failure.
3902 movl(result, Immediate(0x7091));
3903 movl(result_end, Immediate(0x7191));
3904 if (scratch.is_valid()) {
3905 movl(scratch, Immediate(0x7291));
3906 }
3907 // Register element_count is not modified by the function.
3908 }
3909 jmp(gc_required);
3910 return;
3911 }
3912 ASSERT(!result.is(result_end));
3913
3914 // Load address of new object into result.
3915 LoadAllocationTopHelper(result, scratch, flags);
3916
3917 // Align the next allocation. Storing the filler map without checking top is
3918 // always safe because the limit of the heap is always aligned.
3919 if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
3920 testq(result, Immediate(kDoubleAlignmentMask));
3921 Check(zero, "Allocation is not double aligned");
3922 }
3923
3924 // Calculate new top and bail out if new space is exhausted.
3925 ExternalReference allocation_limit =
3926 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3927
3928 // We assume that element_count*element_size + header_size does not
3929 // overflow.
3930 lea(result_end, Operand(element_count, element_size, header_size)); 3902 lea(result_end, Operand(element_count, element_size, header_size));
3931 addq(result_end, result); 3903 Allocate(result_end, result, result_end, scratch, gc_required, flags);
3932 j(carry, gc_required);
3933 Operand limit_operand = ExternalOperand(allocation_limit);
3934 cmpq(result_end, limit_operand);
3935 j(above, gc_required);
3936
3937 // Update allocation top.
3938 UpdateAllocationTopHelper(result_end, scratch, flags);
3939
3940 // Tag the result if requested.
3941 if ((flags & TAG_OBJECT) != 0) {
3942 ASSERT(kHeapObjectTag == 1);
3943 incq(result);
3944 }
3945 } 3904 }
3946 3905
3947 3906
3948 void MacroAssembler::Allocate(Register object_size, 3907 void MacroAssembler::Allocate(Register object_size,
3949 Register result, 3908 Register result,
3950 Register result_end, 3909 Register result_end,
3951 Register scratch, 3910 Register scratch,
3952 Label* gc_required, 3911 Label* gc_required,
3953 AllocationFlags flags) { 3912 AllocationFlags flags) {
3954 ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); 3913 ASSERT((flags & SIZE_IN_WORDS) == 0);
3955 if (!FLAG_inline_new) { 3914 if (!FLAG_inline_new) {
3956 if (emit_debug_code()) { 3915 if (emit_debug_code()) {
3957 // Trash the registers to simulate an allocation failure. 3916 // Trash the registers to simulate an allocation failure.
3958 movl(result, Immediate(0x7091)); 3917 movl(result, Immediate(0x7091));
3959 movl(result_end, Immediate(0x7191)); 3918 movl(result_end, Immediate(0x7191));
3960 if (scratch.is_valid()) { 3919 if (scratch.is_valid()) {
3961 movl(scratch, Immediate(0x7291)); 3920 movl(scratch, Immediate(0x7291));
3962 } 3921 }
3963 // object_size is left unchanged by this function. 3922 // object_size is left unchanged by this function.
3964 } 3923 }
3965 jmp(gc_required); 3924 jmp(gc_required);
3966 return; 3925 return;
3967 } 3926 }
3968 ASSERT(!result.is(result_end)); 3927 ASSERT(!result.is(result_end));
3969 3928
3970 // Load address of new object into result. 3929 // Load address of new object into result.
3971 LoadAllocationTopHelper(result, scratch, flags); 3930 LoadAllocationTopHelper(result, scratch, flags);
3972 3931
3932 // Align the next allocation. Storing the filler map without checking top is
3933 // always safe because the limit of the heap is always aligned.
3934 if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
3935 testq(result, Immediate(kDoubleAlignmentMask));
3936 Check(zero, "Allocation is not double aligned");
3937 }
3938
3973 // Calculate new top and bail out if new space is exhausted. 3939 // Calculate new top and bail out if new space is exhausted.
3974 ExternalReference allocation_limit = 3940 ExternalReference allocation_limit =
3975 AllocationUtils::GetAllocationLimitReference(isolate(), flags); 3941 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
3976 if (!object_size.is(result_end)) { 3942 if (!object_size.is(result_end)) {
3977 movq(result_end, object_size); 3943 movq(result_end, object_size);
3978 } 3944 }
3979 addq(result_end, result); 3945 addq(result_end, result);
3980 j(carry, gc_required); 3946 j(carry, gc_required);
3981 Operand limit_operand = ExternalOperand(allocation_limit); 3947 Operand limit_operand = ExternalOperand(allocation_limit);
3982 cmpq(result_end, limit_operand); 3948 cmpq(result_end, limit_operand);
3983 j(above, gc_required); 3949 j(above, gc_required);
3984 3950
3985 // Update allocation top. 3951 // Update allocation top.
3986 UpdateAllocationTopHelper(result_end, scratch, flags); 3952 UpdateAllocationTopHelper(result_end, scratch, flags);
3987 3953
3988 // Align the next allocation. Storing the filler map without checking top is
3989 // always safe because the limit of the heap is always aligned.
3990 if (((flags & DOUBLE_ALIGNMENT) != 0) && FLAG_debug_code) {
3991 testq(result, Immediate(kDoubleAlignmentMask));
3992 Check(zero, "Allocation is not double aligned");
3993 }
3994
3995 // Tag the result if requested. 3954 // Tag the result if requested.
3996 if ((flags & TAG_OBJECT) != 0) { 3955 if ((flags & TAG_OBJECT) != 0) {
3997 addq(result, Immediate(kHeapObjectTag)); 3956 addq(result, Immediate(kHeapObjectTag));
3998 } 3957 }
3999 } 3958 }
4000 3959
4001 3960
4002 void MacroAssembler::UndoAllocationInNewSpace(Register object) { 3961 void MacroAssembler::UndoAllocationInNewSpace(Register object) {
4003 ExternalReference new_space_allocation_top = 3962 ExternalReference new_space_allocation_top =
4004 ExternalReference::new_space_allocation_top_address(isolate()); 3963 ExternalReference::new_space_allocation_top_address(isolate());
(...skipping 204 matching lines...) Expand 10 before | Expand all | Expand 10 after
4209 } 4168 }
4210 4169
4211 ASSERT(source.is(rsi)); 4170 ASSERT(source.is(rsi));
4212 ASSERT(destination.is(rdi)); 4171 ASSERT(destination.is(rdi));
4213 ASSERT(length.is(rcx)); 4172 ASSERT(length.is(rcx));
4214 4173
4215 // Because source is 8-byte aligned in our uses of this function, 4174 // Because source is 8-byte aligned in our uses of this function,
4216 // we keep source aligned for the rep movs operation by copying the odd bytes 4175 // we keep source aligned for the rep movs operation by copying the odd bytes
4217 // at the end of the ranges. 4176 // at the end of the ranges.
4218 movq(scratch, length); 4177 movq(scratch, length);
4219 shrl(length, Immediate(3)); 4178 shrl(length, Immediate(kPointerSizeLog2));
4220 repmovsq(); 4179 repmovsq();
4221 // Move remaining bytes of length. 4180 // Move remaining bytes of length.
4222 andl(scratch, Immediate(0x7)); 4181 andl(scratch, Immediate(kPointerSize - 1));
4223 movq(length, Operand(source, scratch, times_1, -8)); 4182 movq(length, Operand(source, scratch, times_1, -kPointerSize));
4224 movq(Operand(destination, scratch, times_1, -8), length); 4183 movq(Operand(destination, scratch, times_1, -kPointerSize), length);
4225 addq(destination, scratch); 4184 addq(destination, scratch);
4226 4185
4227 if (min_length <= kLongStringLimit) { 4186 if (min_length <= kLongStringLimit) {
4228 jmp(&done); 4187 jmp(&done);
4229 4188
4230 bind(&short_string); 4189 bind(&short_string);
4231 if (min_length == 0) { 4190 if (min_length == 0) {
4232 testl(length, length); 4191 testl(length, length);
4233 j(zero, &done); 4192 j(zero, &done);
4234 } 4193 }
(...skipping 492 matching lines...) Expand 10 before | Expand all | Expand 10 after
4727 j(greater, &no_info_available); 4686 j(greater, &no_info_available);
4728 CompareRoot(MemOperand(scratch_reg, -AllocationSiteInfo::kSize), 4687 CompareRoot(MemOperand(scratch_reg, -AllocationSiteInfo::kSize),
4729 Heap::kAllocationSiteInfoMapRootIndex); 4688 Heap::kAllocationSiteInfoMapRootIndex);
4730 bind(&no_info_available); 4689 bind(&no_info_available);
4731 } 4690 }
4732 4691
4733 4692
4734 } } // namespace v8::internal 4693 } } // namespace v8::internal
4735 4694
4736 #endif // V8_TARGET_ARCH_X64 4695 #endif // V8_TARGET_ARCH_X64
OLDNEW
« no previous file with comments | « src/x64/macro-assembler-x64.h ('k') | src/x64/regexp-macro-assembler-x64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698