| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_X64 | 7 #if V8_TARGET_ARCH_X64 |
| 8 | 8 |
| 9 #include "src/codegen.h" | 9 #include "src/codegen.h" |
| 10 #include "src/macro-assembler.h" | 10 #include "src/macro-assembler.h" |
| 11 | 11 |
| 12 namespace v8 { | 12 namespace v8 { |
| 13 namespace internal { | 13 namespace internal { |
| 14 | 14 |
| 15 // ------------------------------------------------------------------------- | 15 // ------------------------------------------------------------------------- |
| 16 // Platform-specific RuntimeCallHelper functions. | 16 // Platform-specific RuntimeCallHelper functions. |
| 17 | 17 |
| 18 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { | 18 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { |
| 19 masm->EnterFrame(StackFrame::INTERNAL); | 19 masm->EnterFrame(StackFrame::INTERNAL); |
| 20 ASSERT(!masm->has_frame()); | 20 DCHECK(!masm->has_frame()); |
| 21 masm->set_has_frame(true); | 21 masm->set_has_frame(true); |
| 22 } | 22 } |
| 23 | 23 |
| 24 | 24 |
| 25 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { | 25 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { |
| 26 masm->LeaveFrame(StackFrame::INTERNAL); | 26 masm->LeaveFrame(StackFrame::INTERNAL); |
| 27 ASSERT(masm->has_frame()); | 27 DCHECK(masm->has_frame()); |
| 28 masm->set_has_frame(false); | 28 masm->set_has_frame(false); |
| 29 } | 29 } |
| 30 | 30 |
| 31 | 31 |
| 32 #define __ masm. | 32 #define __ masm. |
| 33 | 33 |
| 34 | 34 |
| 35 UnaryMathFunction CreateExpFunction() { | 35 UnaryMathFunction CreateExpFunction() { |
| 36 if (!FLAG_fast_math) return &std::exp; | 36 if (!FLAG_fast_math) return &std::exp; |
| 37 size_t actual_size; | 37 size_t actual_size; |
| (...skipping 11 matching lines...) Expand all Loading... |
| 49 | 49 |
| 50 MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx); | 50 MathExpGenerator::EmitMathExp(&masm, input, result, xmm2, rax, rbx); |
| 51 | 51 |
| 52 __ popq(rbx); | 52 __ popq(rbx); |
| 53 __ popq(rax); | 53 __ popq(rax); |
| 54 __ movsd(xmm0, result); | 54 __ movsd(xmm0, result); |
| 55 __ Ret(); | 55 __ Ret(); |
| 56 | 56 |
| 57 CodeDesc desc; | 57 CodeDesc desc; |
| 58 masm.GetCode(&desc); | 58 masm.GetCode(&desc); |
| 59 ASSERT(!RelocInfo::RequiresRelocation(desc)); | 59 DCHECK(!RelocInfo::RequiresRelocation(desc)); |
| 60 | 60 |
| 61 CpuFeatures::FlushICache(buffer, actual_size); | 61 CpuFeatures::FlushICache(buffer, actual_size); |
| 62 base::OS::ProtectCode(buffer, actual_size); | 62 base::OS::ProtectCode(buffer, actual_size); |
| 63 return FUNCTION_CAST<UnaryMathFunction>(buffer); | 63 return FUNCTION_CAST<UnaryMathFunction>(buffer); |
| 64 } | 64 } |
| 65 | 65 |
| 66 | 66 |
| 67 UnaryMathFunction CreateSqrtFunction() { | 67 UnaryMathFunction CreateSqrtFunction() { |
| 68 size_t actual_size; | 68 size_t actual_size; |
| 69 // Allocate buffer in executable space. | 69 // Allocate buffer in executable space. |
| 70 byte* buffer = | 70 byte* buffer = |
| 71 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); | 71 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true)); |
| 72 if (buffer == NULL) return &std::sqrt; | 72 if (buffer == NULL) return &std::sqrt; |
| 73 | 73 |
| 74 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); | 74 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size)); |
| 75 // xmm0: raw double input. | 75 // xmm0: raw double input. |
| 76 // Move double input into registers. | 76 // Move double input into registers. |
| 77 __ sqrtsd(xmm0, xmm0); | 77 __ sqrtsd(xmm0, xmm0); |
| 78 __ Ret(); | 78 __ Ret(); |
| 79 | 79 |
| 80 CodeDesc desc; | 80 CodeDesc desc; |
| 81 masm.GetCode(&desc); | 81 masm.GetCode(&desc); |
| 82 ASSERT(!RelocInfo::RequiresRelocation(desc)); | 82 DCHECK(!RelocInfo::RequiresRelocation(desc)); |
| 83 | 83 |
| 84 CpuFeatures::FlushICache(buffer, actual_size); | 84 CpuFeatures::FlushICache(buffer, actual_size); |
| 85 base::OS::ProtectCode(buffer, actual_size); | 85 base::OS::ProtectCode(buffer, actual_size); |
| 86 return FUNCTION_CAST<UnaryMathFunction>(buffer); | 86 return FUNCTION_CAST<UnaryMathFunction>(buffer); |
| 87 } | 87 } |
| 88 | 88 |
| 89 | 89 |
| 90 #ifdef _WIN64 | 90 #ifdef _WIN64 |
| 91 typedef double (*ModuloFunction)(double, double); | 91 typedef double (*ModuloFunction)(double, double); |
| 92 // Define custom fmod implementation. | 92 // Define custom fmod implementation. |
| (...skipping 93 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 186 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( | 186 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition( |
| 187 MacroAssembler* masm, | 187 MacroAssembler* masm, |
| 188 Register receiver, | 188 Register receiver, |
| 189 Register key, | 189 Register key, |
| 190 Register value, | 190 Register value, |
| 191 Register target_map, | 191 Register target_map, |
| 192 AllocationSiteMode mode, | 192 AllocationSiteMode mode, |
| 193 Label* allocation_memento_found) { | 193 Label* allocation_memento_found) { |
| 194 // Return address is on the stack. | 194 // Return address is on the stack. |
| 195 Register scratch = rdi; | 195 Register scratch = rdi; |
| 196 ASSERT(!AreAliased(receiver, key, value, target_map, scratch)); | 196 DCHECK(!AreAliased(receiver, key, value, target_map, scratch)); |
| 197 | 197 |
| 198 if (mode == TRACK_ALLOCATION_SITE) { | 198 if (mode == TRACK_ALLOCATION_SITE) { |
| 199 ASSERT(allocation_memento_found != NULL); | 199 DCHECK(allocation_memento_found != NULL); |
| 200 __ JumpIfJSArrayHasAllocationMemento( | 200 __ JumpIfJSArrayHasAllocationMemento( |
| 201 receiver, scratch, allocation_memento_found); | 201 receiver, scratch, allocation_memento_found); |
| 202 } | 202 } |
| 203 | 203 |
| 204 // Set transitioned map. | 204 // Set transitioned map. |
| 205 __ movp(FieldOperand(receiver, HeapObject::kMapOffset), target_map); | 205 __ movp(FieldOperand(receiver, HeapObject::kMapOffset), target_map); |
| 206 __ RecordWriteField(receiver, | 206 __ RecordWriteField(receiver, |
| 207 HeapObject::kMapOffset, | 207 HeapObject::kMapOffset, |
| 208 target_map, | 208 target_map, |
| 209 scratch, | 209 scratch, |
| 210 kDontSaveFPRegs, | 210 kDontSaveFPRegs, |
| 211 EMIT_REMEMBERED_SET, | 211 EMIT_REMEMBERED_SET, |
| 212 OMIT_SMI_CHECK); | 212 OMIT_SMI_CHECK); |
| 213 } | 213 } |
| 214 | 214 |
| 215 | 215 |
| 216 void ElementsTransitionGenerator::GenerateSmiToDouble( | 216 void ElementsTransitionGenerator::GenerateSmiToDouble( |
| 217 MacroAssembler* masm, | 217 MacroAssembler* masm, |
| 218 Register receiver, | 218 Register receiver, |
| 219 Register key, | 219 Register key, |
| 220 Register value, | 220 Register value, |
| 221 Register target_map, | 221 Register target_map, |
| 222 AllocationSiteMode mode, | 222 AllocationSiteMode mode, |
| 223 Label* fail) { | 223 Label* fail) { |
| 224 // Return address is on the stack. | 224 // Return address is on the stack. |
| 225 ASSERT(receiver.is(rdx)); | 225 DCHECK(receiver.is(rdx)); |
| 226 ASSERT(key.is(rcx)); | 226 DCHECK(key.is(rcx)); |
| 227 ASSERT(value.is(rax)); | 227 DCHECK(value.is(rax)); |
| 228 ASSERT(target_map.is(rbx)); | 228 DCHECK(target_map.is(rbx)); |
| 229 | 229 |
| 230 // The fail label is not actually used since we do not allocate. | 230 // The fail label is not actually used since we do not allocate. |
| 231 Label allocated, new_backing_store, only_change_map, done; | 231 Label allocated, new_backing_store, only_change_map, done; |
| 232 | 232 |
| 233 if (mode == TRACK_ALLOCATION_SITE) { | 233 if (mode == TRACK_ALLOCATION_SITE) { |
| 234 __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail); | 234 __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail); |
| 235 } | 235 } |
| 236 | 236 |
| 237 // Check for empty arrays, which only require a map transition and no changes | 237 // Check for empty arrays, which only require a map transition and no changes |
| 238 // to the backing store. | 238 // to the backing store. |
| 239 __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset)); | 239 __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset)); |
| 240 __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex); | 240 __ CompareRoot(r8, Heap::kEmptyFixedArrayRootIndex); |
| 241 __ j(equal, &only_change_map); | 241 __ j(equal, &only_change_map); |
| 242 | 242 |
| 243 __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset)); | 243 __ SmiToInteger32(r9, FieldOperand(r8, FixedDoubleArray::kLengthOffset)); |
| 244 if (kPointerSize == kDoubleSize) { | 244 if (kPointerSize == kDoubleSize) { |
| 245 // Check backing store for COW-ness. For COW arrays we have to | 245 // Check backing store for COW-ness. For COW arrays we have to |
| 246 // allocate a new backing store. | 246 // allocate a new backing store. |
| 247 __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset), | 247 __ CompareRoot(FieldOperand(r8, HeapObject::kMapOffset), |
| 248 Heap::kFixedCOWArrayMapRootIndex); | 248 Heap::kFixedCOWArrayMapRootIndex); |
| 249 __ j(equal, &new_backing_store); | 249 __ j(equal, &new_backing_store); |
| 250 } else { | 250 } else { |
| 251 // For x32 port we have to allocate a new backing store as SMI size is | 251 // For x32 port we have to allocate a new backing store as SMI size is |
| 252 // not equal with double size. | 252 // not equal with double size. |
| 253 ASSERT(kDoubleSize == 2 * kPointerSize); | 253 DCHECK(kDoubleSize == 2 * kPointerSize); |
| 254 __ jmp(&new_backing_store); | 254 __ jmp(&new_backing_store); |
| 255 } | 255 } |
| 256 | 256 |
| 257 // Check if the backing store is in new-space. If not, we need to allocate | 257 // Check if the backing store is in new-space. If not, we need to allocate |
| 258 // a new one since the old one is in pointer-space. | 258 // a new one since the old one is in pointer-space. |
| 259 // If in new space, we can reuse the old backing store because it is | 259 // If in new space, we can reuse the old backing store because it is |
| 260 // the same size. | 260 // the same size. |
| 261 __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store); | 261 __ JumpIfNotInNewSpace(r8, rdi, &new_backing_store); |
| 262 | 262 |
| 263 __ movp(r14, r8); // Destination array equals source array. | 263 __ movp(r14, r8); // Destination array equals source array. |
| (...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 354 | 354 |
| 355 void ElementsTransitionGenerator::GenerateDoubleToObject( | 355 void ElementsTransitionGenerator::GenerateDoubleToObject( |
| 356 MacroAssembler* masm, | 356 MacroAssembler* masm, |
| 357 Register receiver, | 357 Register receiver, |
| 358 Register key, | 358 Register key, |
| 359 Register value, | 359 Register value, |
| 360 Register target_map, | 360 Register target_map, |
| 361 AllocationSiteMode mode, | 361 AllocationSiteMode mode, |
| 362 Label* fail) { | 362 Label* fail) { |
| 363 // Return address is on the stack. | 363 // Return address is on the stack. |
| 364 ASSERT(receiver.is(rdx)); | 364 DCHECK(receiver.is(rdx)); |
| 365 ASSERT(key.is(rcx)); | 365 DCHECK(key.is(rcx)); |
| 366 ASSERT(value.is(rax)); | 366 DCHECK(value.is(rax)); |
| 367 ASSERT(target_map.is(rbx)); | 367 DCHECK(target_map.is(rbx)); |
| 368 | 368 |
| 369 Label loop, entry, convert_hole, gc_required, only_change_map; | 369 Label loop, entry, convert_hole, gc_required, only_change_map; |
| 370 | 370 |
| 371 if (mode == TRACK_ALLOCATION_SITE) { | 371 if (mode == TRACK_ALLOCATION_SITE) { |
| 372 __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail); | 372 __ JumpIfJSArrayHasAllocationMemento(rdx, rdi, fail); |
| 373 } | 373 } |
| 374 | 374 |
| 375 // Check for empty arrays, which only require a map transition and no changes | 375 // Check for empty arrays, which only require a map transition and no changes |
| 376 // to the backing store. | 376 // to the backing store. |
| 377 __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset)); | 377 __ movp(r8, FieldOperand(rdx, JSObject::kElementsOffset)); |
| (...skipping 195 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 573 __ bind(&done); | 573 __ bind(&done); |
| 574 } | 574 } |
| 575 | 575 |
| 576 | 576 |
| 577 void MathExpGenerator::EmitMathExp(MacroAssembler* masm, | 577 void MathExpGenerator::EmitMathExp(MacroAssembler* masm, |
| 578 XMMRegister input, | 578 XMMRegister input, |
| 579 XMMRegister result, | 579 XMMRegister result, |
| 580 XMMRegister double_scratch, | 580 XMMRegister double_scratch, |
| 581 Register temp1, | 581 Register temp1, |
| 582 Register temp2) { | 582 Register temp2) { |
| 583 ASSERT(!input.is(result)); | 583 DCHECK(!input.is(result)); |
| 584 ASSERT(!input.is(double_scratch)); | 584 DCHECK(!input.is(double_scratch)); |
| 585 ASSERT(!result.is(double_scratch)); | 585 DCHECK(!result.is(double_scratch)); |
| 586 ASSERT(!temp1.is(temp2)); | 586 DCHECK(!temp1.is(temp2)); |
| 587 ASSERT(ExternalReference::math_exp_constants(0).address() != NULL); | 587 DCHECK(ExternalReference::math_exp_constants(0).address() != NULL); |
| 588 | 588 |
| 589 Label done; | 589 Label done; |
| 590 | 590 |
| 591 __ Move(kScratchRegister, ExternalReference::math_exp_constants(0)); | 591 __ Move(kScratchRegister, ExternalReference::math_exp_constants(0)); |
| 592 __ movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize)); | 592 __ movsd(double_scratch, Operand(kScratchRegister, 0 * kDoubleSize)); |
| 593 __ xorpd(result, result); | 593 __ xorpd(result, result); |
| 594 __ ucomisd(double_scratch, input); | 594 __ ucomisd(double_scratch, input); |
| 595 __ j(above_equal, &done); | 595 __ j(above_equal, &done); |
| 596 __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize)); | 596 __ ucomisd(input, Operand(kScratchRegister, 1 * kDoubleSize)); |
| 597 __ movsd(result, Operand(kScratchRegister, 2 * kDoubleSize)); | 597 __ movsd(result, Operand(kScratchRegister, 2 * kDoubleSize)); |
| (...skipping 24 matching lines...) Expand all Loading... |
| 622 __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize)); | 622 __ addsd(result, Operand(kScratchRegister, 8 * kDoubleSize)); |
| 623 __ mulsd(result, input); | 623 __ mulsd(result, input); |
| 624 | 624 |
| 625 __ bind(&done); | 625 __ bind(&done); |
| 626 } | 626 } |
| 627 | 627 |
| 628 #undef __ | 628 #undef __ |
| 629 | 629 |
| 630 | 630 |
| 631 CodeAgingHelper::CodeAgingHelper() { | 631 CodeAgingHelper::CodeAgingHelper() { |
| 632 ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength); | 632 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength); |
| 633 // The sequence of instructions that is patched out for aging code is the | 633 // The sequence of instructions that is patched out for aging code is the |
| 634 // following boilerplate stack-building prologue that is found both in | 634 // following boilerplate stack-building prologue that is found both in |
| 635 // FUNCTION and OPTIMIZED_FUNCTION code: | 635 // FUNCTION and OPTIMIZED_FUNCTION code: |
| 636 CodePatcher patcher(young_sequence_.start(), young_sequence_.length()); | 636 CodePatcher patcher(young_sequence_.start(), young_sequence_.length()); |
| 637 patcher.masm()->pushq(rbp); | 637 patcher.masm()->pushq(rbp); |
| 638 patcher.masm()->movp(rbp, rsp); | 638 patcher.masm()->movp(rbp, rsp); |
| 639 patcher.masm()->Push(rsi); | 639 patcher.masm()->Push(rsi); |
| 640 patcher.masm()->Push(rdi); | 640 patcher.masm()->Push(rdi); |
| 641 } | 641 } |
| 642 | 642 |
| 643 | 643 |
| 644 #ifdef DEBUG | 644 #ifdef DEBUG |
| 645 bool CodeAgingHelper::IsOld(byte* candidate) const { | 645 bool CodeAgingHelper::IsOld(byte* candidate) const { |
| 646 return *candidate == kCallOpcode; | 646 return *candidate == kCallOpcode; |
| 647 } | 647 } |
| 648 #endif | 648 #endif |
| 649 | 649 |
| 650 | 650 |
| 651 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) { | 651 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) { |
| 652 bool result = isolate->code_aging_helper()->IsYoung(sequence); | 652 bool result = isolate->code_aging_helper()->IsYoung(sequence); |
| 653 ASSERT(result || isolate->code_aging_helper()->IsOld(sequence)); | 653 DCHECK(result || isolate->code_aging_helper()->IsOld(sequence)); |
| 654 return result; | 654 return result; |
| 655 } | 655 } |
| 656 | 656 |
| 657 | 657 |
| 658 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, | 658 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age, |
| 659 MarkingParity* parity) { | 659 MarkingParity* parity) { |
| 660 if (IsYoungSequence(isolate, sequence)) { | 660 if (IsYoungSequence(isolate, sequence)) { |
| 661 *age = kNoAgeCodeAge; | 661 *age = kNoAgeCodeAge; |
| 662 *parity = NO_MARKING_PARITY; | 662 *parity = NO_MARKING_PARITY; |
| 663 } else { | 663 } else { |
| (...skipping 18 matching lines...) Expand all Loading... |
| 682 Code* stub = GetCodeAgeStub(isolate, age, parity); | 682 Code* stub = GetCodeAgeStub(isolate, age, parity); |
| 683 CodePatcher patcher(sequence, young_length); | 683 CodePatcher patcher(sequence, young_length); |
| 684 patcher.masm()->call(stub->instruction_start()); | 684 patcher.masm()->call(stub->instruction_start()); |
| 685 patcher.masm()->Nop( | 685 patcher.masm()->Nop( |
| 686 kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength); | 686 kNoCodeAgeSequenceLength - Assembler::kShortCallInstructionLength); |
| 687 } | 687 } |
| 688 } | 688 } |
| 689 | 689 |
| 690 | 690 |
| 691 Operand StackArgumentsAccessor::GetArgumentOperand(int index) { | 691 Operand StackArgumentsAccessor::GetArgumentOperand(int index) { |
| 692 ASSERT(index >= 0); | 692 DCHECK(index >= 0); |
| 693 int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0; | 693 int receiver = (receiver_mode_ == ARGUMENTS_CONTAIN_RECEIVER) ? 1 : 0; |
| 694 int displacement_to_last_argument = base_reg_.is(rsp) ? | 694 int displacement_to_last_argument = base_reg_.is(rsp) ? |
| 695 kPCOnStackSize : kFPOnStackSize + kPCOnStackSize; | 695 kPCOnStackSize : kFPOnStackSize + kPCOnStackSize; |
| 696 displacement_to_last_argument += extra_displacement_to_last_argument_; | 696 displacement_to_last_argument += extra_displacement_to_last_argument_; |
| 697 if (argument_count_reg_.is(no_reg)) { | 697 if (argument_count_reg_.is(no_reg)) { |
| 698 // argument[0] is at base_reg_ + displacement_to_last_argument + | 698 // argument[0] is at base_reg_ + displacement_to_last_argument + |
| 699 // (argument_count_immediate_ + receiver - 1) * kPointerSize. | 699 // (argument_count_immediate_ + receiver - 1) * kPointerSize. |
| 700 ASSERT(argument_count_immediate_ + receiver > 0); | 700 DCHECK(argument_count_immediate_ + receiver > 0); |
| 701 return Operand(base_reg_, displacement_to_last_argument + | 701 return Operand(base_reg_, displacement_to_last_argument + |
| 702 (argument_count_immediate_ + receiver - 1 - index) * kPointerSize); | 702 (argument_count_immediate_ + receiver - 1 - index) * kPointerSize); |
| 703 } else { | 703 } else { |
| 704 // argument[0] is at base_reg_ + displacement_to_last_argument + | 704 // argument[0] is at base_reg_ + displacement_to_last_argument + |
| 705 // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize. | 705 // argument_count_reg_ * times_pointer_size + (receiver - 1) * kPointerSize. |
| 706 return Operand(base_reg_, argument_count_reg_, times_pointer_size, | 706 return Operand(base_reg_, argument_count_reg_, times_pointer_size, |
| 707 displacement_to_last_argument + (receiver - 1 - index) * kPointerSize); | 707 displacement_to_last_argument + (receiver - 1 - index) * kPointerSize); |
| 708 } | 708 } |
| 709 } | 709 } |
| 710 | 710 |
| 711 | 711 |
| 712 } } // namespace v8::internal | 712 } } // namespace v8::internal |
| 713 | 713 |
| 714 #endif // V8_TARGET_ARCH_X64 | 714 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |