OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
220 descriptor->register_param_count_ = 1; | 220 descriptor->register_param_count_ = 1; |
221 descriptor->register_params_ = registers; | 221 descriptor->register_params_ = registers; |
222 descriptor->deoptimization_handler_ = | 222 descriptor->deoptimization_handler_ = |
223 FUNCTION_ADDR(ToBooleanIC_Miss); | 223 FUNCTION_ADDR(ToBooleanIC_Miss); |
224 descriptor->SetMissHandler( | 224 descriptor->SetMissHandler( |
225 ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate)); | 225 ExternalReference(IC_Utility(IC::kToBooleanIC_Miss), isolate)); |
226 } | 226 } |
227 | 227 |
228 | 228 |
229 #define __ ACCESS_MASM(masm) | 229 #define __ ACCESS_MASM(masm) |
| 230 #define __k __ |
| 231 #define __a __ |
| 232 #define __q __ |
| 233 #define __s __ |
| 234 #define __n __ |
230 | 235 |
231 | 236 |
232 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { | 237 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { |
233 // Update the static counter each time a new code stub is generated. | 238 // Update the static counter each time a new code stub is generated. |
234 Isolate* isolate = masm->isolate(); | 239 Isolate* isolate = masm->isolate(); |
235 isolate->counters()->code_stubs()->Increment(); | 240 isolate->counters()->code_stubs()->Increment(); |
236 | 241 |
237 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); | 242 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(isolate); |
238 int param_count = descriptor->register_param_count_; | 243 int param_count = descriptor->register_param_count_; |
239 { | 244 { |
(...skipping 20 matching lines...) Expand all Loading... |
260 __ j(not_zero, &check_heap_number, Label::kNear); | 265 __ j(not_zero, &check_heap_number, Label::kNear); |
261 __ Ret(); | 266 __ Ret(); |
262 | 267 |
263 __ bind(&check_heap_number); | 268 __ bind(&check_heap_number); |
264 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), | 269 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), |
265 Heap::kHeapNumberMapRootIndex); | 270 Heap::kHeapNumberMapRootIndex); |
266 __ j(not_equal, &call_builtin, Label::kNear); | 271 __ j(not_equal, &call_builtin, Label::kNear); |
267 __ Ret(); | 272 __ Ret(); |
268 | 273 |
269 __ bind(&call_builtin); | 274 __ bind(&call_builtin); |
270 __ pop(rcx); // Pop return address. | 275 __k pop(rcx); // Pop return address. |
271 __ push(rax); | 276 __ push(rax); |
272 __ push(rcx); // Push return address. | 277 __k push(rcx); // Push return address. |
273 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION); | 278 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_FUNCTION); |
274 } | 279 } |
275 | 280 |
276 | 281 |
277 void FastNewClosureStub::Generate(MacroAssembler* masm) { | 282 void FastNewClosureStub::Generate(MacroAssembler* masm) { |
278 // Create a new closure from the given function info in new | 283 // Create a new closure from the given function info in new |
279 // space. Set the context to the current context in rsi. | 284 // space. Set the context to the current context in rsi. |
280 Counters* counters = masm->isolate()->counters(); | 285 Counters* counters = masm->isolate()->counters(); |
281 | 286 |
282 Label gc; | 287 Label gc; |
283 __ Allocate(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT); | 288 __ Allocate(JSFunction::kSize, rax, rbx, rcx, &gc, TAG_OBJECT); |
284 | 289 |
285 __ IncrementCounter(counters->fast_new_closure_total(), 1); | 290 __ IncrementCounter(counters->fast_new_closure_total(), 1); |
286 | 291 |
287 // Get the function info from the stack. | 292 // Get the function info from the stack. |
288 __ movq(rdx, Operand(rsp, 1 * kPointerSize)); | 293 __a movq(rdx, Operand(rsp, 1 * kPointerSize)); |
289 | 294 |
290 int map_index = Context::FunctionMapIndex(language_mode_, is_generator_); | 295 int map_index = Context::FunctionMapIndex(language_mode_, is_generator_); |
291 | 296 |
292 // Compute the function map in the current native context and set that | 297 // Compute the function map in the current native context and set that |
293 // as the map of the allocated object. | 298 // as the map of the allocated object. |
294 __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 299 __ movq(rcx, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
295 __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset)); | 300 __ movq(rcx, FieldOperand(rcx, GlobalObject::kNativeContextOffset)); |
296 __ movq(rbx, Operand(rcx, Context::SlotOffset(map_index))); | 301 __ movq(rbx, Operand(rcx, Context::SlotOffset(map_index))); |
297 __ movq(FieldOperand(rax, JSObject::kMapOffset), rbx); | 302 __ movq(FieldOperand(rax, JSObject::kMapOffset), rbx); |
298 | 303 |
(...skipping 87 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
386 rcx, | 391 rcx, |
387 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST), | 392 Context::SlotOffset(Context::OPTIMIZED_FUNCTIONS_LIST), |
388 rdx, | 393 rdx, |
389 rbx, | 394 rbx, |
390 kDontSaveFPRegs); | 395 kDontSaveFPRegs); |
391 | 396 |
392 // Return and remove the on-stack parameter. | 397 // Return and remove the on-stack parameter. |
393 __ ret(1 * kPointerSize); | 398 __ ret(1 * kPointerSize); |
394 | 399 |
395 __ bind(&restore); | 400 __ bind(&restore); |
396 __ movq(rdx, Operand(rsp, 1 * kPointerSize)); | 401 __a movq(rdx, Operand(rsp, 1 * kPointerSize)); |
397 __ jmp(&install_unoptimized); | 402 __ jmp(&install_unoptimized); |
398 | 403 |
399 // Create a new closure through the slower runtime call. | 404 // Create a new closure through the slower runtime call. |
400 __ bind(&gc); | 405 __ bind(&gc); |
401 __ pop(rcx); // Temporarily remove return address. | 406 __k pop(rcx); // Temporarily remove return address. |
402 __ pop(rdx); | 407 __ pop(rdx); |
403 __ push(rsi); | 408 __ push(rsi); |
404 __ push(rdx); | 409 __ push(rdx); |
405 __ PushRoot(Heap::kFalseValueRootIndex); | 410 __ PushRoot(Heap::kFalseValueRootIndex); |
406 __ push(rcx); // Restore return address. | 411 __k push(rcx); // Restore return address. |
407 __ TailCallRuntime(Runtime::kNewClosure, 3, 1); | 412 __ TailCallRuntime(Runtime::kNewClosure, 3, 1); |
408 } | 413 } |
409 | 414 |
410 | 415 |
411 void FastNewContextStub::Generate(MacroAssembler* masm) { | 416 void FastNewContextStub::Generate(MacroAssembler* masm) { |
412 // Try to allocate the context in new space. | 417 // Try to allocate the context in new space. |
413 Label gc; | 418 Label gc; |
414 int length = slots_ + Context::MIN_CONTEXT_SLOTS; | 419 int length = slots_ + Context::MIN_CONTEXT_SLOTS; |
415 __ Allocate((length * kPointerSize) + FixedArray::kHeaderSize, | 420 __ Allocate((length * kPointerSize) + FixedArray::kHeaderSize, |
416 rax, rbx, rcx, &gc, TAG_OBJECT); | 421 rax, rbx, rcx, &gc, TAG_OBJECT); |
417 | 422 |
418 // Get the function from the stack. | 423 // Get the function from the stack. |
419 __ movq(rcx, Operand(rsp, 1 * kPointerSize)); | 424 __a movq(rcx, Operand(rsp, 1 * kPointerSize)); |
420 | 425 |
421 // Set up the object header. | 426 // Set up the object header. |
422 __ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex); | 427 __ LoadRoot(kScratchRegister, Heap::kFunctionContextMapRootIndex); |
423 __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister); | 428 __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister); |
424 __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length)); | 429 __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length)); |
425 | 430 |
426 // Set up the fixed slots. | 431 // Set up the fixed slots. |
427 __ Set(rbx, 0); // Set to NULL. | 432 __ Set(rbx, 0); // Set to NULL. |
428 __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx); | 433 __ movq(Operand(rax, Context::SlotOffset(Context::CLOSURE_INDEX)), rcx); |
429 __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi); | 434 __ movq(Operand(rax, Context::SlotOffset(Context::PREVIOUS_INDEX)), rsi); |
(...skipping 25 matching lines...) Expand all Loading... |
455 // [rsp + (1 * kPointerSize)] : function | 460 // [rsp + (1 * kPointerSize)] : function |
456 // [rsp + (2 * kPointerSize)] : serialized scope info | 461 // [rsp + (2 * kPointerSize)] : serialized scope info |
457 | 462 |
458 // Try to allocate the context in new space. | 463 // Try to allocate the context in new space. |
459 Label gc; | 464 Label gc; |
460 int length = slots_ + Context::MIN_CONTEXT_SLOTS; | 465 int length = slots_ + Context::MIN_CONTEXT_SLOTS; |
461 __ Allocate(FixedArray::SizeFor(length), | 466 __ Allocate(FixedArray::SizeFor(length), |
462 rax, rbx, rcx, &gc, TAG_OBJECT); | 467 rax, rbx, rcx, &gc, TAG_OBJECT); |
463 | 468 |
464 // Get the function from the stack. | 469 // Get the function from the stack. |
465 __ movq(rcx, Operand(rsp, 1 * kPointerSize)); | 470 __a movq(rcx, Operand(rsp, 1 * kPointerSize)); |
466 | 471 |
467 // Get the serialized scope info from the stack. | 472 // Get the serialized scope info from the stack. |
468 __ movq(rbx, Operand(rsp, 2 * kPointerSize)); | 473 __a movq(rbx, Operand(rsp, 2 * kPointerSize)); |
469 | 474 |
470 // Set up the object header. | 475 // Set up the object header. |
471 __ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex); | 476 __ LoadRoot(kScratchRegister, Heap::kBlockContextMapRootIndex); |
472 __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister); | 477 __ movq(FieldOperand(rax, HeapObject::kMapOffset), kScratchRegister); |
473 __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length)); | 478 __ Move(FieldOperand(rax, FixedArray::kLengthOffset), Smi::FromInt(length)); |
474 | 479 |
475 // If this block context is nested in the native context we get a smi | 480 // If this block context is nested in the native context we get a smi |
476 // sentinel instead of a function. The block context should get the | 481 // sentinel instead of a function. The block context should get the |
477 // canonical empty function of the native context as its closure which | 482 // canonical empty function of the native context as its closure which |
478 // we still have to look up. | 483 // we still have to look up. |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
582 Register source) { | 587 Register source) { |
583 // Result may be rcx. If result and source are the same register, source will | 588 // Result may be rcx. If result and source are the same register, source will |
584 // be overwritten. | 589 // be overwritten. |
585 ASSERT(!result.is(rdi) && !result.is(rbx)); | 590 ASSERT(!result.is(rdi) && !result.is(rbx)); |
586 // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use | 591 // TODO(lrn): When type info reaches here, if value is a 32-bit integer, use |
587 // cvttsd2si (32-bit version) directly. | 592 // cvttsd2si (32-bit version) directly. |
588 Register double_exponent = rbx; | 593 Register double_exponent = rbx; |
589 Register double_value = rdi; | 594 Register double_value = rdi; |
590 Label done, exponent_63_plus; | 595 Label done, exponent_63_plus; |
591 // Get double and extract exponent. | 596 // Get double and extract exponent. |
592 __ movq(double_value, FieldOperand(source, HeapNumber::kValueOffset)); | 597 __k movq(double_value, FieldOperand(source, HeapNumber::kValueOffset)); |
593 // Clear result preemptively, in case we need to return zero. | 598 // Clear result preemptively, in case we need to return zero. |
594 __ xorl(result, result); | 599 __ xorl(result, result); |
595 __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there. | 600 __ movq(xmm0, double_value); // Save copy in xmm0 in case we need it there. |
596 // Double to remove sign bit, shift exponent down to least significant bits. | 601 // Double to remove sign bit, shift exponent down to least significant bits. |
597 // and subtract bias to get the unshifted, unbiased exponent. | 602 // and subtract bias to get the unshifted, unbiased exponent. |
598 __ lea(double_exponent, Operand(double_value, double_value, times_1, 0)); | 603 __k lea(double_exponent, Operand(double_value, double_value, times_1, 0)); |
599 __ shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits)); | 604 __k shr(double_exponent, Immediate(64 - HeapNumber::kExponentBits)); |
600 __ subl(double_exponent, Immediate(HeapNumber::kExponentBias)); | 605 __ subl(double_exponent, Immediate(HeapNumber::kExponentBias)); |
601 // Check whether the exponent is too big for a 63 bit unsigned integer. | 606 // Check whether the exponent is too big for a 63 bit unsigned integer. |
602 __ cmpl(double_exponent, Immediate(63)); | 607 __ cmpl(double_exponent, Immediate(63)); |
603 __ j(above_equal, &exponent_63_plus, Label::kNear); | 608 __ j(above_equal, &exponent_63_plus, Label::kNear); |
604 // Handle exponent range 0..62. | 609 // Handle exponent range 0..62. |
605 __ cvttsd2siq(result, xmm0); | 610 __ cvttsd2siq(result, xmm0); |
606 __ jmp(&done, Label::kNear); | 611 __ jmp(&done, Label::kNear); |
607 | 612 |
608 __ bind(&exponent_63_plus); | 613 __ bind(&exponent_63_plus); |
609 // Exponent negative or 63+. | 614 // Exponent negative or 63+. |
610 __ cmpl(double_exponent, Immediate(83)); | 615 __ cmpl(double_exponent, Immediate(83)); |
611 // If exponent negative or above 83, number contains no significant bits in | 616 // If exponent negative or above 83, number contains no significant bits in |
612 // the range 0..2^31, so result is zero, and rcx already holds zero. | 617 // the range 0..2^31, so result is zero, and rcx already holds zero. |
613 __ j(above, &done, Label::kNear); | 618 __ j(above, &done, Label::kNear); |
614 | 619 |
615 // Exponent in rage 63..83. | 620 // Exponent in rage 63..83. |
616 // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely | 621 // Mantissa * 2^exponent contains bits in the range 2^0..2^31, namely |
617 // the least significant exponent-52 bits. | 622 // the least significant exponent-52 bits. |
618 | 623 |
619 // Negate low bits of mantissa if value is negative. | 624 // Negate low bits of mantissa if value is negative. |
620 __ addq(double_value, double_value); // Move sign bit to carry. | 625 __k addq(double_value, double_value); // Move sign bit to carry. |
621 __ sbbl(result, result); // And convert carry to -1 in result register. | 626 __ sbbl(result, result); // And convert carry to -1 in result register. |
622 // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0. | 627 // if scratch2 is negative, do (scratch2-1)^-1, otherwise (scratch2-0)^0. |
623 __ addl(double_value, result); | 628 __ addl(double_value, result); |
624 // Do xor in opposite directions depending on where we want the result | 629 // Do xor in opposite directions depending on where we want the result |
625 // (depending on whether result is rcx or not). | 630 // (depending on whether result is rcx or not). |
626 | 631 |
627 if (result.is(rcx)) { | 632 if (result.is(rcx)) { |
628 __ xorl(double_value, result); | 633 __ xorl(double_value, result); |
629 // Left shift mantissa by (exponent - mantissabits - 1) to save the | 634 // Left shift mantissa by (exponent - mantissabits - 1) to save the |
630 // bits that have positional values below 2^32 (the extra -1 comes from the | 635 // bits that have positional values below 2^32 (the extra -1 comes from the |
(...skipping 24 matching lines...) Expand all Loading... |
655 GenerateNumberStub(masm); | 660 GenerateNumberStub(masm); |
656 break; | 661 break; |
657 case UnaryOpIC::GENERIC: | 662 case UnaryOpIC::GENERIC: |
658 GenerateGenericStub(masm); | 663 GenerateGenericStub(masm); |
659 break; | 664 break; |
660 } | 665 } |
661 } | 666 } |
662 | 667 |
663 | 668 |
664 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | 669 void UnaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
665 __ pop(rcx); // Save return address. | 670 __k pop(rcx); // Save return address. |
666 | 671 |
667 __ push(rax); // the operand | 672 __ push(rax); // the operand |
668 __ Push(Smi::FromInt(op_)); | 673 __ Push(Smi::FromInt(op_)); |
669 __ Push(Smi::FromInt(mode_)); | 674 __ Push(Smi::FromInt(mode_)); |
670 __ Push(Smi::FromInt(operand_type_)); | 675 __ Push(Smi::FromInt(operand_type_)); |
671 | 676 |
672 __ push(rcx); // Push return address. | 677 __k push(rcx); // Push return address. |
673 | 678 |
674 // Patch the caller to an appropriate specialized stub and return the | 679 // Patch the caller to an appropriate specialized stub and return the |
675 // operation result to the caller of the stub. | 680 // operation result to the caller of the stub. |
676 __ TailCallExternalReference( | 681 __ TailCallExternalReference( |
677 ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1); | 682 ExternalReference(IC_Utility(IC::kUnaryOp_Patch), masm->isolate()), 4, 1); |
678 } | 683 } |
679 | 684 |
680 | 685 |
681 // TODO(svenpanne): Use virtual functions instead of switch. | 686 // TODO(svenpanne): Use virtual functions instead of switch. |
682 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) { | 687 void UnaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
773 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, | 778 void UnaryOpStub::GenerateHeapNumberCodeSub(MacroAssembler* masm, |
774 Label* slow) { | 779 Label* slow) { |
775 // Check if the operand is a heap number. | 780 // Check if the operand is a heap number. |
776 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), | 781 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), |
777 Heap::kHeapNumberMapRootIndex); | 782 Heap::kHeapNumberMapRootIndex); |
778 __ j(not_equal, slow); | 783 __ j(not_equal, slow); |
779 | 784 |
780 // Operand is a float, negate its value by flipping the sign bit. | 785 // Operand is a float, negate its value by flipping the sign bit. |
781 if (mode_ == UNARY_OVERWRITE) { | 786 if (mode_ == UNARY_OVERWRITE) { |
782 __ Set(kScratchRegister, 0x01); | 787 __ Set(kScratchRegister, 0x01); |
783 __ shl(kScratchRegister, Immediate(63)); | 788 __k shl(kScratchRegister, Immediate(63)); |
784 __ xor_(FieldOperand(rax, HeapNumber::kValueOffset), kScratchRegister); | 789 __k xor_(FieldOperand(rax, HeapNumber::kValueOffset), kScratchRegister); |
785 } else { | 790 } else { |
786 // Allocate a heap number before calculating the answer, | 791 // Allocate a heap number before calculating the answer, |
787 // so we don't have an untagged double around during GC. | 792 // so we don't have an untagged double around during GC. |
788 Label slow_allocate_heapnumber, heapnumber_allocated; | 793 Label slow_allocate_heapnumber, heapnumber_allocated; |
789 __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber); | 794 __ AllocateHeapNumber(rcx, rbx, &slow_allocate_heapnumber); |
790 __ jmp(&heapnumber_allocated); | 795 __ jmp(&heapnumber_allocated); |
791 | 796 |
792 __ bind(&slow_allocate_heapnumber); | 797 __ bind(&slow_allocate_heapnumber); |
793 { | 798 { |
794 FrameScope scope(masm, StackFrame::INTERNAL); | 799 FrameScope scope(masm, StackFrame::INTERNAL); |
795 __ push(rax); | 800 __ push(rax); |
796 __ CallRuntime(Runtime::kNumberAlloc, 0); | 801 __ CallRuntime(Runtime::kNumberAlloc, 0); |
797 __ movq(rcx, rax); | 802 __ movq(rcx, rax); |
798 __ pop(rax); | 803 __ pop(rax); |
799 } | 804 } |
800 __ bind(&heapnumber_allocated); | 805 __ bind(&heapnumber_allocated); |
801 // rcx: allocated 'empty' number | 806 // rcx: allocated 'empty' number |
802 | 807 |
803 // Copy the double value to the new heap number, flipping the sign. | 808 // Copy the double value to the new heap number, flipping the sign. |
804 __ movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset)); | 809 __k movq(rdx, FieldOperand(rax, HeapNumber::kValueOffset)); |
805 __ Set(kScratchRegister, 0x01); | 810 __ Set(kScratchRegister, 0x01); |
806 __ shl(kScratchRegister, Immediate(63)); | 811 __k shl(kScratchRegister, Immediate(63)); |
807 __ xor_(rdx, kScratchRegister); // Flip sign. | 812 __k xor_(rdx, kScratchRegister); // Flip sign. |
808 __ movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx); | 813 __k movq(FieldOperand(rcx, HeapNumber::kValueOffset), rdx); |
809 __ movq(rax, rcx); | 814 __ movq(rax, rcx); |
810 } | 815 } |
811 __ ret(0); | 816 __ ret(0); |
812 } | 817 } |
813 | 818 |
814 | 819 |
815 void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm, | 820 void UnaryOpStub::GenerateHeapNumberCodeBitNot(MacroAssembler* masm, |
816 Label* slow) { | 821 Label* slow) { |
817 // Check if the operand is a heap number. | 822 // Check if the operand is a heap number. |
818 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), | 823 __ CompareRoot(FieldOperand(rax, HeapObject::kMapOffset), |
819 Heap::kHeapNumberMapRootIndex); | 824 Heap::kHeapNumberMapRootIndex); |
820 __ j(not_equal, slow); | 825 __ j(not_equal, slow); |
821 | 826 |
| 827 #ifndef V8_TARGET_ARCH_X32 |
822 // Convert the heap number in rax to an untagged integer in rcx. | 828 // Convert the heap number in rax to an untagged integer in rcx. |
823 IntegerConvert(masm, rax, rax); | 829 IntegerConvert(masm, rax, rax); |
824 | 830 |
825 // Do the bitwise operation and smi tag the result. | 831 // Do the bitwise operation and smi tag the result. |
826 __ notl(rax); | 832 __ notl(rax); |
827 __ Integer32ToSmi(rax, rax); | 833 __ Integer32ToSmi(rax, rax); |
828 __ ret(0); | 834 __ ret(0); |
| 835 #else |
| 836 // Convert the heap number in rax to an untagged integer in rcx. |
| 837 IntegerConvert(masm, rcx, rax); |
| 838 |
| 839 // Do the bitwise operation and smi tag the result. |
| 840 Label try_float; |
| 841 __ notl(rcx); |
| 842 __ cmpl(rcx, Immediate(0xc0000000)); |
| 843 __ j(sign, &try_float, Label::kNear); |
| 844 __ Integer32ToSmi(rax, rcx); |
| 845 __ ret(0); |
| 846 |
| 847 // Try to store the result in a heap number. |
| 848 __ bind(&try_float); |
| 849 if (mode_ == UNARY_NO_OVERWRITE) { |
| 850 Label slow_allocate_heapnumber, heapnumber_allocated; |
| 851 __ movl(rbx, rax); |
| 852 __ AllocateHeapNumber(rax, kScratchRegister, &slow_allocate_heapnumber); |
| 853 __ jmp(&heapnumber_allocated); |
| 854 |
| 855 __ bind(&slow_allocate_heapnumber); |
| 856 { |
| 857 FrameScope scope(masm, StackFrame::INTERNAL); |
| 858 // Push the original HeapNumber on the stack. The integer value can't |
| 859 // be stored since it's untagged and not in the smi range (so we can't |
| 860 // smi-tag it). We'll recalculate the value after the GC instead. |
| 861 __ Push(rbx); |
| 862 __ CallRuntime(Runtime::kNumberAlloc, 0); |
| 863 // New HeapNumber is in eax. |
| 864 __ Pop(rbx); |
| 865 } |
| 866 // Recalcuate bit-not value. |
| 867 IntegerConvert(masm, rcx, rbx); |
| 868 __ notl(rcx); |
| 869 |
| 870 __ bind(&heapnumber_allocated); |
| 871 } |
| 872 __ cvtlsi2sd(xmm0, rcx); |
| 873 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
| 874 __ ret(0); |
| 875 #endif |
829 } | 876 } |
830 | 877 |
831 | 878 |
832 // TODO(svenpanne): Use virtual functions instead of switch. | 879 // TODO(svenpanne): Use virtual functions instead of switch. |
833 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) { | 880 void UnaryOpStub::GenerateGenericStub(MacroAssembler* masm) { |
834 switch (op_) { | 881 switch (op_) { |
835 case Token::SUB: | 882 case Token::SUB: |
836 GenerateGenericStubSub(masm); | 883 GenerateGenericStubSub(masm); |
837 break; | 884 break; |
838 case Token::BIT_NOT: | 885 case Token::BIT_NOT: |
(...skipping 20 matching lines...) Expand all Loading... |
859 GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear); | 906 GenerateSmiCodeBitNot(masm, &non_smi, Label::kNear); |
860 __ bind(&non_smi); | 907 __ bind(&non_smi); |
861 GenerateHeapNumberCodeBitNot(masm, &slow); | 908 GenerateHeapNumberCodeBitNot(masm, &slow); |
862 __ bind(&slow); | 909 __ bind(&slow); |
863 GenerateGenericCodeFallback(masm); | 910 GenerateGenericCodeFallback(masm); |
864 } | 911 } |
865 | 912 |
866 | 913 |
867 void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) { | 914 void UnaryOpStub::GenerateGenericCodeFallback(MacroAssembler* masm) { |
868 // Handle the slow case by jumping to the JavaScript builtin. | 915 // Handle the slow case by jumping to the JavaScript builtin. |
869 __ pop(rcx); // pop return address | 916 __k pop(rcx); // pop return address |
870 __ push(rax); | 917 __ push(rax); |
871 __ push(rcx); // push return address | 918 __k push(rcx); // push return address |
872 switch (op_) { | 919 switch (op_) { |
873 case Token::SUB: | 920 case Token::SUB: |
874 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); | 921 __ InvokeBuiltin(Builtins::UNARY_MINUS, JUMP_FUNCTION); |
875 break; | 922 break; |
876 case Token::BIT_NOT: | 923 case Token::BIT_NOT: |
877 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); | 924 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_FUNCTION); |
878 break; | 925 break; |
879 default: | 926 default: |
880 UNREACHABLE(); | 927 UNREACHABLE(); |
881 } | 928 } |
(...skipping 11 matching lines...) Expand all Loading... |
893 op_name, | 940 op_name, |
894 overwrite_name, | 941 overwrite_name, |
895 UnaryOpIC::GetName(operand_type_)); | 942 UnaryOpIC::GetName(operand_type_)); |
896 } | 943 } |
897 | 944 |
898 | 945 |
899 void BinaryOpStub::Initialize() {} | 946 void BinaryOpStub::Initialize() {} |
900 | 947 |
901 | 948 |
902 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | 949 void BinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
903 __ pop(rcx); // Save return address. | 950 __k pop(rcx); // Save return address. |
904 __ push(rdx); | 951 __ push(rdx); |
905 __ push(rax); | 952 __ push(rax); |
906 // Left and right arguments are now on top. | 953 // Left and right arguments are now on top. |
907 __ Push(Smi::FromInt(MinorKey())); | 954 __ Push(Smi::FromInt(MinorKey())); |
908 | 955 |
909 __ push(rcx); // Push return address. | 956 __k push(rcx); // Push return address. |
910 | 957 |
911 // Patch the caller to an appropriate specialized stub and return the | 958 // Patch the caller to an appropriate specialized stub and return the |
912 // operation result to the caller of the stub. | 959 // operation result to the caller of the stub. |
913 __ TailCallExternalReference( | 960 __ TailCallExternalReference( |
914 ExternalReference(IC_Utility(IC::kBinaryOp_Patch), | 961 ExternalReference(IC_Utility(IC::kBinaryOp_Patch), |
915 masm->isolate()), | 962 masm->isolate()), |
916 3, | 963 3, |
917 1); | 964 1); |
918 } | 965 } |
919 | 966 |
920 | 967 |
921 static void BinaryOpStub_GenerateSmiCode( | 968 static void BinaryOpStub_GenerateSmiCode( |
922 MacroAssembler* masm, | 969 MacroAssembler* masm, |
923 Label* slow, | 970 Label* slow, |
924 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, | 971 BinaryOpStub::SmiCodeGenerateHeapNumberResults allow_heapnumber_results, |
925 Token::Value op) { | 972 Token::Value op) { |
926 | 973 |
927 // Arguments to BinaryOpStub are in rdx and rax. | 974 // Arguments to BinaryOpStub are in rdx and rax. |
928 const Register left = rdx; | 975 const Register left = rdx; |
929 const Register right = rax; | 976 const Register right = rax; |
930 | 977 |
931 // We only generate heapnumber answers for overflowing calculations | 978 // We only generate heapnumber answers for overflowing calculations |
932 // for the four basic arithmetic operations and logical right shift by 0. | 979 // for the four basic arithmetic operations and logical right shift by 0. |
| 980 #ifndef V8_TARGET_ARCH_X32 |
933 bool generate_inline_heapnumber_results = | 981 bool generate_inline_heapnumber_results = |
934 (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) && | 982 (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) && |
935 (op == Token::ADD || op == Token::SUB || | 983 (op == Token::ADD || op == Token::SUB || |
936 op == Token::MUL || op == Token::DIV || op == Token::SHR); | 984 op == Token::MUL || op == Token::DIV || op == Token::SHR); |
| 985 #else |
| 986 bool generate_inline_heapnumber_results = |
| 987 (allow_heapnumber_results == BinaryOpStub::ALLOW_HEAPNUMBER_RESULTS) && |
| 988 (op == Token::ADD || op == Token::SUB || op == Token::SHL || |
| 989 op == Token::MUL || op == Token::DIV || op == Token::SHR); |
| 990 #endif |
937 | 991 |
938 // Smi check of both operands. If op is BIT_OR, the check is delayed | 992 // Smi check of both operands. If op is BIT_OR, the check is delayed |
939 // until after the OR operation. | 993 // until after the OR operation. |
940 Label not_smis; | 994 Label not_smis; |
941 Label use_fp_on_smis; | 995 Label use_fp_on_smis; |
942 Label fail; | 996 Label fail; |
943 | 997 |
944 if (op != Token::BIT_OR) { | 998 if (op != Token::BIT_OR) { |
945 Comment smi_check_comment(masm, "-- Smi check arguments"); | 999 Comment smi_check_comment(masm, "-- Smi check arguments"); |
946 __ JumpIfNotBothSmi(left, right, ¬_smis); | 1000 __ JumpIfNotBothSmi(left, right, ¬_smis); |
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
989 ASSERT(right.is(rax)); | 1043 ASSERT(right.is(rax)); |
990 __ SmiXor(right, right, left); // BIT_XOR is commutative. | 1044 __ SmiXor(right, right, left); // BIT_XOR is commutative. |
991 break; | 1045 break; |
992 | 1046 |
993 case Token::BIT_AND: | 1047 case Token::BIT_AND: |
994 ASSERT(right.is(rax)); | 1048 ASSERT(right.is(rax)); |
995 __ SmiAnd(right, right, left); // BIT_AND is commutative. | 1049 __ SmiAnd(right, right, left); // BIT_AND is commutative. |
996 break; | 1050 break; |
997 | 1051 |
998 case Token::SHL: | 1052 case Token::SHL: |
| 1053 #ifndef V8_TARGET_ARCH_X32 |
999 __ SmiShiftLeft(left, left, right); | 1054 __ SmiShiftLeft(left, left, right); |
| 1055 #else |
| 1056 __ movl(kScratchRegister, left); |
| 1057 __ SmiShiftLeft(left, left, right, &use_fp_on_smis); |
| 1058 #endif |
1000 __ movq(rax, left); | 1059 __ movq(rax, left); |
1001 break; | 1060 break; |
1002 | 1061 |
1003 case Token::SAR: | 1062 case Token::SAR: |
1004 __ SmiShiftArithmeticRight(left, left, right); | 1063 __ SmiShiftArithmeticRight(left, left, right); |
1005 __ movq(rax, left); | 1064 __ movq(rax, left); |
1006 break; | 1065 break; |
1007 | 1066 |
1008 case Token::SHR: | 1067 case Token::SHR: |
| 1068 #ifdef V8_TARGET_ARCH_X32 |
| 1069 __ movl(kScratchRegister, left); |
| 1070 #endif |
1009 __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis); | 1071 __ SmiShiftLogicalRight(left, left, right, &use_fp_on_smis); |
1010 __ movq(rax, left); | 1072 __ movq(rax, left); |
1011 break; | 1073 break; |
1012 | 1074 |
1013 default: | 1075 default: |
1014 UNREACHABLE(); | 1076 UNREACHABLE(); |
1015 } | 1077 } |
1016 | 1078 |
1017 // 5. Emit return of result in rax. Some operations have registers pushed. | 1079 // 5. Emit return of result in rax. Some operations have registers pushed. |
1018 __ ret(0); | 1080 __ ret(0); |
1019 | 1081 |
1020 if (use_fp_on_smis.is_linked()) { | 1082 if (use_fp_on_smis.is_linked()) { |
1021 // 6. For some operations emit inline code to perform floating point | 1083 // 6. For some operations emit inline code to perform floating point |
1022 // operations on known smis (e.g., if the result of the operation | 1084 // operations on known smis (e.g., if the result of the operation |
1023 // overflowed the smi range). | 1085 // overflowed the smi range). |
1024 __ bind(&use_fp_on_smis); | 1086 __ bind(&use_fp_on_smis); |
1025 if (op == Token::DIV || op == Token::MOD) { | 1087 if (op == Token::DIV || op == Token::MOD) { |
1026 // Restore left and right to rdx and rax. | 1088 // Restore left and right to rdx and rax. |
1027 __ movq(rdx, rcx); | 1089 __ movq(rdx, rcx); |
1028 __ movq(rax, rbx); | 1090 __ movq(rax, rbx); |
1029 } | 1091 } |
1030 | 1092 |
1031 if (generate_inline_heapnumber_results) { | 1093 if (generate_inline_heapnumber_results) { |
1032 __ AllocateHeapNumber(rcx, rbx, slow); | 1094 __ AllocateHeapNumber(rcx, rbx, slow); |
1033 Comment perform_float(masm, "-- Perform float operation on smis"); | 1095 Comment perform_float(masm, "-- Perform float operation on smis"); |
| 1096 #ifndef V8_TARGET_ARCH_X32 |
1034 if (op == Token::SHR) { | 1097 if (op == Token::SHR) { |
1035 __ SmiToInteger32(left, left); | 1098 __ SmiToInteger32(left, left); |
1036 __ cvtqsi2sd(xmm0, left); | 1099 __ cvtqsi2sd(xmm0, left); |
1037 } else { | 1100 } else { |
| 1101 #else |
| 1102 if (op == Token::SHL) { |
| 1103 __ cvtlsi2sd(xmm0, left); |
| 1104 } else if (op == Token::SHR) { |
| 1105 // The value of left is from MacroAssembler::SmiShiftLogicalRight |
| 1106 // We allow logical shift value: |
| 1107 // 0 : might turn a signed integer into unsigned integer |
| 1108 // 1 : the value might be above 2^30 - 1 |
| 1109 __ cvtqsi2sd(xmm0, left); |
| 1110 } else { |
| 1111 #endif |
1038 FloatingPointHelper::LoadSSE2SmiOperands(masm); | 1112 FloatingPointHelper::LoadSSE2SmiOperands(masm); |
1039 switch (op) { | 1113 switch (op) { |
1040 case Token::ADD: __ addsd(xmm0, xmm1); break; | 1114 case Token::ADD: __ addsd(xmm0, xmm1); break; |
1041 case Token::SUB: __ subsd(xmm0, xmm1); break; | 1115 case Token::SUB: __ subsd(xmm0, xmm1); break; |
1042 case Token::MUL: __ mulsd(xmm0, xmm1); break; | 1116 case Token::MUL: __ mulsd(xmm0, xmm1); break; |
1043 case Token::DIV: __ divsd(xmm0, xmm1); break; | 1117 case Token::DIV: __ divsd(xmm0, xmm1); break; |
1044 default: UNREACHABLE(); | 1118 default: UNREACHABLE(); |
1045 } | 1119 } |
1046 } | 1120 } |
1047 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); | 1121 __ movsd(FieldOperand(rcx, HeapNumber::kValueOffset), xmm0); |
1048 __ movq(rax, rcx); | 1122 __ movq(rax, rcx); |
1049 __ ret(0); | 1123 __ ret(0); |
1050 } else { | 1124 } else { |
| 1125 #ifdef V8_TARGET_ARCH_X32 |
| 1126 // Restore the orignial left value from kScratchRegister for stub call |
| 1127 // KScratchRegister is not killed by MacroAssembler::SmiShiftLogicalRight |
| 1128 // and is not killed by MacroAssembler::SmiShiftLeft either. |
| 1129 if (op == Token::SHL || op == Token::SHR) { |
| 1130 __ movl(left, kScratchRegister); |
| 1131 } |
| 1132 #endif |
1051 __ jmp(&fail); | 1133 __ jmp(&fail); |
1052 } | 1134 } |
1053 } | 1135 } |
1054 | 1136 |
1055 // 7. Non-smi operands reach the end of the code generated by | 1137 // 7. Non-smi operands reach the end of the code generated by |
1056 // GenerateSmiCode, and fall through to subsequent code, | 1138 // GenerateSmiCode, and fall through to subsequent code, |
1057 // with the operands in rdx and rax. | 1139 // with the operands in rdx and rax. |
1058 // But first we check if non-smi values are HeapNumbers holding | 1140 // But first we check if non-smi values are HeapNumbers holding |
1059 // values that could be smi. | 1141 // values that could be smi. |
1060 __ bind(¬_smis); | 1142 __ bind(¬_smis); |
(...skipping 18 matching lines...) Expand all Loading... |
1079 | 1161 |
1080 static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, | 1162 static void BinaryOpStub_GenerateHeapResultAllocation(MacroAssembler* masm, |
1081 Label* alloc_failure, | 1163 Label* alloc_failure, |
1082 OverwriteMode mode); | 1164 OverwriteMode mode); |
1083 | 1165 |
1084 | 1166 |
1085 static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm, | 1167 static void BinaryOpStub_GenerateFloatingPointCode(MacroAssembler* masm, |
1086 Label* allocation_failure, | 1168 Label* allocation_failure, |
1087 Label* non_numeric_failure, | 1169 Label* non_numeric_failure, |
1088 Token::Value op, | 1170 Token::Value op, |
| 1171 #ifdef V8_TARGET_ARCH_X32 |
| 1172 BinaryOpIC::TypeInfo |
| 1173 result_type, |
| 1174 Label* non_int32_failure, |
| 1175 #endif |
1089 OverwriteMode mode) { | 1176 OverwriteMode mode) { |
1090 switch (op) { | 1177 switch (op) { |
1091 case Token::ADD: | 1178 case Token::ADD: |
1092 case Token::SUB: | 1179 case Token::SUB: |
1093 case Token::MUL: | 1180 case Token::MUL: |
1094 case Token::DIV: { | 1181 case Token::DIV: { |
1095 FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure); | 1182 FloatingPointHelper::LoadSSE2UnknownOperands(masm, non_numeric_failure); |
1096 | 1183 |
1097 switch (op) { | 1184 switch (op) { |
1098 case Token::ADD: __ addsd(xmm0, xmm1); break; | 1185 case Token::ADD: __ addsd(xmm0, xmm1); break; |
1099 case Token::SUB: __ subsd(xmm0, xmm1); break; | 1186 case Token::SUB: __ subsd(xmm0, xmm1); break; |
1100 case Token::MUL: __ mulsd(xmm0, xmm1); break; | 1187 case Token::MUL: __ mulsd(xmm0, xmm1); break; |
1101 case Token::DIV: __ divsd(xmm0, xmm1); break; | 1188 case Token::DIV: __ divsd(xmm0, xmm1); break; |
1102 default: UNREACHABLE(); | 1189 default: UNREACHABLE(); |
1103 } | 1190 } |
| 1191 #ifdef V8_TARGET_ARCH_X32 |
| 1192 if (non_int32_failure != NULL) { |
| 1193 if (result_type <= BinaryOpIC::INT32) { |
| 1194 __ cvttsd2si(kScratchRegister, xmm0); |
| 1195 __ cvtlsi2sd(xmm2, kScratchRegister); |
| 1196 __ pcmpeqd(xmm2, xmm0); |
| 1197 __ movmskpd(rcx, xmm2); |
| 1198 __ testl(rcx, Immediate(1)); |
| 1199 __ j(zero, non_int32_failure); |
| 1200 } |
| 1201 } |
| 1202 #endif |
1104 BinaryOpStub_GenerateHeapResultAllocation( | 1203 BinaryOpStub_GenerateHeapResultAllocation( |
1105 masm, allocation_failure, mode); | 1204 masm, allocation_failure, mode); |
1106 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); | 1205 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
1107 __ ret(0); | 1206 __ ret(0); |
1108 break; | 1207 break; |
1109 } | 1208 } |
1110 case Token::MOD: { | 1209 case Token::MOD: { |
1111 // For MOD we jump to the allocation_failure label, to call runtime. | 1210 // For MOD we jump to the allocation_failure label, to call runtime. |
1112 __ jmp(allocation_failure); | 1211 __ jmp(allocation_failure); |
1113 break; | 1212 break; |
1114 } | 1213 } |
1115 case Token::BIT_OR: | 1214 case Token::BIT_OR: |
1116 case Token::BIT_AND: | 1215 case Token::BIT_AND: |
1117 case Token::BIT_XOR: | 1216 case Token::BIT_XOR: |
1118 case Token::SAR: | 1217 case Token::SAR: |
1119 case Token::SHL: | 1218 case Token::SHL: |
1120 case Token::SHR: { | 1219 case Token::SHR: { |
1121 Label non_smi_shr_result; | 1220 Label non_smi_shr_result; |
1122 Register heap_number_map = r9; | 1221 Register heap_number_map = r9; |
| 1222 #ifdef V8_TARGET_ARCH_X32 |
| 1223 __ movl(kScratchRegister, rax); |
| 1224 #endif |
1123 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 1225 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
1124 FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure, | 1226 FloatingPointHelper::LoadAsIntegers(masm, non_numeric_failure, |
1125 heap_number_map); | 1227 heap_number_map); |
1126 switch (op) { | 1228 switch (op) { |
1127 case Token::BIT_OR: __ orl(rax, rcx); break; | 1229 case Token::BIT_OR: __ orl(rax, rcx); break; |
1128 case Token::BIT_AND: __ andl(rax, rcx); break; | 1230 case Token::BIT_AND: __ andl(rax, rcx); break; |
1129 case Token::BIT_XOR: __ xorl(rax, rcx); break; | 1231 case Token::BIT_XOR: __ xorl(rax, rcx); break; |
1130 case Token::SAR: __ sarl_cl(rax); break; | 1232 case Token::SAR: __ sarl_cl(rax); break; |
1131 case Token::SHL: __ shll_cl(rax); break; | 1233 case Token::SHL: __ shll_cl(rax); break; |
1132 case Token::SHR: { | 1234 case Token::SHR: { |
1133 __ shrl_cl(rax); | 1235 __ shrl_cl(rax); |
| 1236 #ifndef V8_TARGET_ARCH_X32 |
1134 // Check if result is negative. This can only happen for a shift | 1237 // Check if result is negative. This can only happen for a shift |
1135 // by zero. | 1238 // by zero. |
1136 __ testl(rax, rax); | 1239 __ testl(rax, rax); |
1137 __ j(negative, &non_smi_shr_result); | 1240 __ j(negative, &non_smi_shr_result); |
| 1241 #endif |
1138 break; | 1242 break; |
1139 } | 1243 } |
1140 default: UNREACHABLE(); | 1244 default: UNREACHABLE(); |
1141 } | 1245 } |
| 1246 #ifndef V8_TARGET_ARCH_X32 |
1142 STATIC_ASSERT(kSmiValueSize == 32); | 1247 STATIC_ASSERT(kSmiValueSize == 32); |
| 1248 #else |
| 1249 STATIC_ASSERT(kSmiValueSize == 31); |
| 1250 if (op == Token::SHR) { |
| 1251 __ testl(rax, Immediate(0xc0000000)); |
| 1252 __ j(not_zero, &non_smi_shr_result); |
| 1253 } else { |
| 1254 __ cmpl(rax, Immediate(0xc0000000)); |
| 1255 __ j(negative, &non_smi_shr_result, Label::kNear); |
| 1256 } |
| 1257 #endif |
1143 // Tag smi result and return. | 1258 // Tag smi result and return. |
1144 __ Integer32ToSmi(rax, rax); | 1259 __ Integer32ToSmi(rax, rax); |
1145 __ Ret(); | 1260 __ Ret(); |
1146 | 1261 |
| 1262 #ifndef V8_TARGET_ARCH_X32 |
1147 // Logical shift right can produce an unsigned int32 that is not | 1263 // Logical shift right can produce an unsigned int32 that is not |
1148 // an int32, and so is not in the smi range. Allocate a heap number | 1264 // an int32, and so is not in the smi range. Allocate a heap number |
1149 // in that case. | 1265 // in that case. |
1150 if (op == Token::SHR) { | 1266 if (op == Token::SHR) { |
1151 __ bind(&non_smi_shr_result); | 1267 __ bind(&non_smi_shr_result); |
1152 Label allocation_failed; | 1268 Label allocation_failed; |
1153 __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). | 1269 __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). |
1154 // Allocate heap number in new space. | 1270 // Allocate heap number in new space. |
1155 // Not using AllocateHeapNumber macro in order to reuse | 1271 // Not using AllocateHeapNumber macro in order to reuse |
1156 // already loaded heap_number_map. | 1272 // already loaded heap_number_map. |
1157 __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed, | 1273 __ Allocate(HeapNumber::kSize, rax, rdx, no_reg, &allocation_failed, |
1158 TAG_OBJECT); | 1274 TAG_OBJECT); |
1159 // Set the map. | 1275 // Set the map. |
1160 __ AssertRootValue(heap_number_map, | 1276 __ AssertRootValue(heap_number_map, |
1161 Heap::kHeapNumberMapRootIndex, | 1277 Heap::kHeapNumberMapRootIndex, |
1162 "HeapNumberMap register clobbered."); | 1278 "HeapNumberMap register clobbered."); |
1163 __ movq(FieldOperand(rax, HeapObject::kMapOffset), | 1279 __ movq(FieldOperand(rax, HeapObject::kMapOffset), |
1164 heap_number_map); | 1280 heap_number_map); |
1165 __ cvtqsi2sd(xmm0, rbx); | 1281 __ cvtqsi2sd(xmm0, rbx); |
1166 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); | 1282 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
1167 __ Ret(); | 1283 __ Ret(); |
1168 | 1284 |
1169 __ bind(&allocation_failed); | 1285 __ bind(&allocation_failed); |
1170 // We need tagged values in rdx and rax for the following code, | 1286 // We need tagged values in rdx and rax for the following code, |
1171 // not int32 in rax and rcx. | 1287 // not int32 in rax and rcx. |
1172 __ Integer32ToSmi(rax, rcx); | 1288 __ Integer32ToSmi(rax, rcx); |
1173 __ Integer32ToSmi(rdx, rbx); | 1289 __ Integer32ToSmi(rdx, rbx); |
1174 __ jmp(allocation_failure); | 1290 __ jmp(allocation_failure); |
1175 } | 1291 } |
| 1292 #else |
| 1293 __ bind(&non_smi_shr_result); |
| 1294 Label allocation_failed; |
| 1295 __ movl(rbx, rax); // rbx holds result value (uint32 value as int64). |
| 1296 // Allocate heap number in new space. |
| 1297 // Not using AllocateHeapNumber macro in order to reuse |
| 1298 // already loaded heap_number_map. |
| 1299 __ Allocate(HeapNumber::kSize, rax, r8, no_reg, &allocation_failed, |
| 1300 TAG_OBJECT); |
| 1301 // Set the map. |
| 1302 __ AssertRootValue(heap_number_map, |
| 1303 Heap::kHeapNumberMapRootIndex, |
| 1304 "HeapNumberMap register clobbered."); |
| 1305 __ movl(FieldOperand(rax, HeapObject::kMapOffset), |
| 1306 heap_number_map); |
| 1307 if (op == Token::SHR) { |
| 1308 __ cvtqsi2sd(xmm0, rbx); |
| 1309 } else { |
| 1310 // All other operations returns a signed int32, so we |
| 1311 // use lsi2sd here to retain the sign bit. |
| 1312 __ cvtlsi2sd(xmm0, rbx); |
| 1313 } |
| 1314 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |
| 1315 __ Ret(); |
| 1316 |
| 1317 __ bind(&allocation_failed); |
| 1318 // Restore the right operand from kScratchRegister. |
| 1319 // Left operand is in rdx, not changed in this function. |
| 1320 __ movl(rax, kScratchRegister); |
| 1321 __ jmp(allocation_failure); |
| 1322 #endif |
1176 break; | 1323 break; |
1177 } | 1324 } |
1178 default: UNREACHABLE(); break; | 1325 default: UNREACHABLE(); break; |
1179 } | 1326 } |
1180 // No fall-through from this generated code. | 1327 // No fall-through from this generated code. |
1181 if (FLAG_debug_code) { | 1328 if (FLAG_debug_code) { |
1182 __ Abort("Unexpected fall-through in " | 1329 __ Abort("Unexpected fall-through in " |
1183 "BinaryStub_GenerateFloatingPointCode."); | 1330 "BinaryStub_GenerateFloatingPointCode."); |
1184 } | 1331 } |
1185 } | 1332 } |
1186 | 1333 |
1187 | 1334 |
1188 static void BinaryOpStub_GenerateRegisterArgsPushUnderReturn( | 1335 static void BinaryOpStub_GenerateRegisterArgsPushUnderReturn( |
1189 MacroAssembler* masm) { | 1336 MacroAssembler* masm) { |
1190 // Push arguments, but ensure they are under the return address | 1337 // Push arguments, but ensure they are under the return address |
1191 // for a tail call. | 1338 // for a tail call. |
1192 __ pop(rcx); | 1339 __k pop(rcx); |
1193 __ push(rdx); | 1340 __ push(rdx); |
1194 __ push(rax); | 1341 __ push(rax); |
1195 __ push(rcx); | 1342 __k push(rcx); |
1196 } | 1343 } |
1197 | 1344 |
1198 | 1345 |
1199 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { | 1346 void BinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { |
1200 ASSERT(op_ == Token::ADD); | 1347 ASSERT(op_ == Token::ADD); |
1201 Label left_not_string, call_runtime; | 1348 Label left_not_string, call_runtime; |
1202 | 1349 |
1203 // Registers containing left and right operands respectively. | 1350 // Registers containing left and right operands respectively. |
1204 Register left = rdx; | 1351 Register left = rdx; |
1205 Register right = rax; | 1352 Register right = rax; |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1261 FrameScope scope(masm, StackFrame::INTERNAL); | 1408 FrameScope scope(masm, StackFrame::INTERNAL); |
1262 GenerateRegisterArgsPush(masm); | 1409 GenerateRegisterArgsPush(masm); |
1263 GenerateCallRuntime(masm); | 1410 GenerateCallRuntime(masm); |
1264 } | 1411 } |
1265 __ Ret(); | 1412 __ Ret(); |
1266 } | 1413 } |
1267 } | 1414 } |
1268 | 1415 |
1269 | 1416 |
1270 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { | 1417 void BinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
| 1418 #ifndef V8_TARGET_ARCH_X32 |
1271 // The int32 case is identical to the Smi case. We avoid creating this | 1419 // The int32 case is identical to the Smi case. We avoid creating this |
1272 // ic state on x64. | 1420 // ic state on x64. |
1273 UNREACHABLE(); | 1421 UNREACHABLE(); |
| 1422 #else |
| 1423 ASSERT(Max(left_type_, right_type_) == BinaryOpIC::INT32); |
| 1424 |
| 1425 Label gc_required, not_number, not_int32; |
| 1426 BinaryOpStub_GenerateFloatingPointCode(masm, &gc_required, ¬_number, |
| 1427 op_, result_type_, ¬_int32, mode_); |
| 1428 |
| 1429 __ bind(¬_number); |
| 1430 __ bind(¬_int32); |
| 1431 GenerateTypeTransition(masm); |
| 1432 |
| 1433 __ bind(&gc_required); |
| 1434 { |
| 1435 FrameScope scope(masm, StackFrame::INTERNAL); |
| 1436 GenerateRegisterArgsPush(masm); |
| 1437 GenerateCallRuntime(masm); |
| 1438 } |
| 1439 __ Ret(); |
| 1440 #endif |
1274 } | 1441 } |
1275 | 1442 |
1276 | 1443 |
1277 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { | 1444 void BinaryOpStub::GenerateBothStringStub(MacroAssembler* masm) { |
1278 Label call_runtime; | 1445 Label call_runtime; |
1279 ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); | 1446 ASSERT(left_type_ == BinaryOpIC::STRING && right_type_ == BinaryOpIC::STRING); |
1280 ASSERT(op_ == Token::ADD); | 1447 ASSERT(op_ == Token::ADD); |
1281 // If both arguments are strings, call the string add stub. | 1448 // If both arguments are strings, call the string add stub. |
1282 // Otherwise, do a transition. | 1449 // Otherwise, do a transition. |
1283 | 1450 |
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1349 // HeapNumbers containing 32bit integer values are also allowed. | 1516 // HeapNumbers containing 32bit integer values are also allowed. |
1350 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 1517 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
1351 __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map); | 1518 __ cmpq(FieldOperand(input, HeapObject::kMapOffset), heap_number_map); |
1352 __ j(not_equal, fail); | 1519 __ j(not_equal, fail); |
1353 __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset)); | 1520 __ movsd(xmm0, FieldOperand(input, HeapNumber::kValueOffset)); |
1354 // Convert, convert back, and compare the two doubles' bits. | 1521 // Convert, convert back, and compare the two doubles' bits. |
1355 __ cvttsd2siq(scratch2, xmm0); | 1522 __ cvttsd2siq(scratch2, xmm0); |
1356 __ cvtlsi2sd(xmm1, scratch2); | 1523 __ cvtlsi2sd(xmm1, scratch2); |
1357 __ movq(scratch1, xmm0); | 1524 __ movq(scratch1, xmm0); |
1358 __ movq(scratch2, xmm1); | 1525 __ movq(scratch2, xmm1); |
1359 __ cmpq(scratch1, scratch2); | 1526 __k cmpq(scratch1, scratch2); |
1360 __ j(not_equal, fail); | 1527 __ j(not_equal, fail); |
1361 __ bind(&ok); | 1528 __ bind(&ok); |
1362 } | 1529 } |
1363 | 1530 |
1364 | 1531 |
1365 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { | 1532 void BinaryOpStub::GenerateNumberStub(MacroAssembler* masm) { |
1366 Label gc_required, not_number; | 1533 Label gc_required, not_number; |
1367 | 1534 |
1368 // It could be that only SMIs have been seen at either the left | 1535 // It could be that only SMIs have been seen at either the left |
1369 // or the right operand. For precise type feedback, patch the IC | 1536 // or the right operand. For precise type feedback, patch the IC |
1370 // again if this changes. | 1537 // again if this changes. |
1371 if (left_type_ == BinaryOpIC::SMI) { | 1538 if (left_type_ == BinaryOpIC::SMI) { |
1372 BinaryOpStub_CheckSmiInput(masm, rdx, ¬_number); | 1539 BinaryOpStub_CheckSmiInput(masm, rdx, ¬_number); |
1373 } | 1540 } |
1374 if (right_type_ == BinaryOpIC::SMI) { | 1541 if (right_type_ == BinaryOpIC::SMI) { |
1375 BinaryOpStub_CheckSmiInput(masm, rax, ¬_number); | 1542 BinaryOpStub_CheckSmiInput(masm, rax, ¬_number); |
1376 } | 1543 } |
1377 | 1544 |
| 1545 #ifndef V8_TARGET_ARCH_X32 |
1378 BinaryOpStub_GenerateFloatingPointCode( | 1546 BinaryOpStub_GenerateFloatingPointCode( |
1379 masm, &gc_required, ¬_number, op_, mode_); | 1547 masm, &gc_required, ¬_number, op_, mode_); |
| 1548 #else |
| 1549 BinaryOpStub_GenerateFloatingPointCode( |
| 1550 masm, &gc_required, ¬_number, op_, result_type_, NULL, mode_); |
| 1551 #endif |
1380 | 1552 |
1381 __ bind(¬_number); | 1553 __ bind(¬_number); |
1382 GenerateTypeTransition(masm); | 1554 GenerateTypeTransition(masm); |
1383 | 1555 |
1384 __ bind(&gc_required); | 1556 __ bind(&gc_required); |
1385 { | 1557 { |
1386 FrameScope scope(masm, StackFrame::INTERNAL); | 1558 FrameScope scope(masm, StackFrame::INTERNAL); |
1387 GenerateRegisterArgsPush(masm); | 1559 GenerateRegisterArgsPush(masm); |
1388 GenerateCallRuntime(masm); | 1560 GenerateCallRuntime(masm); |
1389 } | 1561 } |
1390 __ Ret(); | 1562 __ Ret(); |
1391 } | 1563 } |
1392 | 1564 |
1393 | 1565 |
1394 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { | 1566 void BinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
1395 Label call_runtime, call_string_add_or_runtime; | 1567 Label call_runtime, call_string_add_or_runtime; |
1396 | 1568 |
1397 BinaryOpStub_GenerateSmiCode( | 1569 BinaryOpStub_GenerateSmiCode( |
1398 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); | 1570 masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS, op_); |
1399 | 1571 |
| 1572 #ifndef V8_TARGET_ARCH_X32 |
1400 BinaryOpStub_GenerateFloatingPointCode( | 1573 BinaryOpStub_GenerateFloatingPointCode( |
1401 masm, &call_runtime, &call_string_add_or_runtime, op_, mode_); | 1574 masm, &call_runtime, &call_string_add_or_runtime, op_, mode_); |
| 1575 #else |
| 1576 BinaryOpStub_GenerateFloatingPointCode( |
| 1577 masm, &call_runtime, &call_string_add_or_runtime, op_, |
| 1578 result_type_, NULL, mode_); |
| 1579 #endif |
1402 | 1580 |
1403 __ bind(&call_string_add_or_runtime); | 1581 __ bind(&call_string_add_or_runtime); |
1404 if (op_ == Token::ADD) { | 1582 if (op_ == Token::ADD) { |
1405 GenerateAddStrings(masm); | 1583 GenerateAddStrings(masm); |
1406 } | 1584 } |
1407 | 1585 |
1408 __ bind(&call_runtime); | 1586 __ bind(&call_runtime); |
1409 { | 1587 { |
1410 FrameScope scope(masm, StackFrame::INTERNAL); | 1588 FrameScope scope(masm, StackFrame::INTERNAL); |
1411 GenerateRegisterArgsPush(masm); | 1589 GenerateRegisterArgsPush(masm); |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1474 // Output: | 1652 // Output: |
1475 // xmm1 : untagged double result. | 1653 // xmm1 : untagged double result. |
1476 | 1654 |
1477 Label runtime_call; | 1655 Label runtime_call; |
1478 Label runtime_call_clear_stack; | 1656 Label runtime_call_clear_stack; |
1479 Label skip_cache; | 1657 Label skip_cache; |
1480 const bool tagged = (argument_type_ == TAGGED); | 1658 const bool tagged = (argument_type_ == TAGGED); |
1481 if (tagged) { | 1659 if (tagged) { |
1482 Label input_not_smi, loaded; | 1660 Label input_not_smi, loaded; |
1483 // Test that rax is a number. | 1661 // Test that rax is a number. |
1484 __ movq(rax, Operand(rsp, kPointerSize)); | 1662 __a movq(rax, Operand(rsp, 1 * kPointerSize)); |
1485 __ JumpIfNotSmi(rax, &input_not_smi, Label::kNear); | 1663 __ JumpIfNotSmi(rax, &input_not_smi, Label::kNear); |
1486 // Input is a smi. Untag and load it onto the FPU stack. | 1664 // Input is a smi. Untag and load it onto the FPU stack. |
1487 // Then load the bits of the double into rbx. | 1665 // Then load the bits of the double into rbx. |
1488 __ SmiToInteger32(rax, rax); | 1666 __ SmiToInteger32(rax, rax); |
1489 __ subq(rsp, Immediate(kDoubleSize)); | 1667 __ subq(rsp, Immediate(kDoubleSize)); |
1490 __ cvtlsi2sd(xmm1, rax); | 1668 __ cvtlsi2sd(xmm1, rax); |
1491 __ movsd(Operand(rsp, 0), xmm1); | 1669 __ movsd(Operand(rsp, 0), xmm1); |
1492 __ movq(rbx, xmm1); | 1670 __ movq(rbx, xmm1); |
1493 __ movq(rdx, xmm1); | 1671 __ movq(rdx, xmm1); |
1494 __ fld_d(Operand(rsp, 0)); | 1672 __ fld_d(Operand(rsp, 0)); |
1495 __ addq(rsp, Immediate(kDoubleSize)); | 1673 __ addq(rsp, Immediate(kDoubleSize)); |
1496 __ jmp(&loaded, Label::kNear); | 1674 __ jmp(&loaded, Label::kNear); |
1497 | 1675 |
1498 __ bind(&input_not_smi); | 1676 __ bind(&input_not_smi); |
1499 // Check if input is a HeapNumber. | 1677 // Check if input is a HeapNumber. |
1500 __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex); | 1678 __ LoadRoot(rbx, Heap::kHeapNumberMapRootIndex); |
1501 __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); | 1679 __ cmpq(rbx, FieldOperand(rax, HeapObject::kMapOffset)); |
1502 __ j(not_equal, &runtime_call); | 1680 __ j(not_equal, &runtime_call); |
1503 // Input is a HeapNumber. Push it on the FPU stack and load its | 1681 // Input is a HeapNumber. Push it on the FPU stack and load its |
1504 // bits into rbx. | 1682 // bits into rbx. |
1505 __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset)); | 1683 __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset)); |
1506 __ movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset)); | 1684 __k movq(rbx, FieldOperand(rax, HeapNumber::kValueOffset)); |
1507 __ movq(rdx, rbx); | 1685 __k movq(rdx, rbx); |
1508 | 1686 |
1509 __ bind(&loaded); | 1687 __ bind(&loaded); |
1510 } else { // UNTAGGED. | 1688 } else { // UNTAGGED. |
1511 __ movq(rbx, xmm1); | 1689 __ movq(rbx, xmm1); |
1512 __ movq(rdx, xmm1); | 1690 __ movq(rdx, xmm1); |
1513 } | 1691 } |
1514 | 1692 |
1515 // ST[0] == double value, if TAGGED. | 1693 // ST[0] == double value, if TAGGED. |
1516 // rbx = bits of double value. | 1694 // rbx = bits of double value. |
1517 // rdx = also bits of double value. | 1695 // rdx = also bits of double value. |
1518 // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic): | 1696 // Compute hash (h is 32 bits, bits are 64 and the shifts are arithmetic): |
1519 // h = h0 = bits ^ (bits >> 32); | 1697 // h = h0 = bits ^ (bits >> 32); |
1520 // h ^= h >> 16; | 1698 // h ^= h >> 16; |
1521 // h ^= h >> 8; | 1699 // h ^= h >> 8; |
1522 // h = h & (cacheSize - 1); | 1700 // h = h & (cacheSize - 1); |
1523 // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1) | 1701 // or h = (h0 ^ (h0 >> 8) ^ (h0 >> 16) ^ (h0 >> 24)) & (cacheSize - 1) |
1524 __ sar(rdx, Immediate(32)); | 1702 __k sar(rdx, Immediate(32)); |
1525 __ xorl(rdx, rbx); | 1703 __ xorl(rdx, rbx); |
1526 __ movl(rcx, rdx); | 1704 __ movl(rcx, rdx); |
1527 __ movl(rax, rdx); | 1705 __ movl(rax, rdx); |
1528 __ movl(rdi, rdx); | 1706 __ movl(rdi, rdx); |
1529 __ sarl(rdx, Immediate(8)); | 1707 __ sarl(rdx, Immediate(8)); |
1530 __ sarl(rcx, Immediate(16)); | 1708 __ sarl(rcx, Immediate(16)); |
1531 __ sarl(rax, Immediate(24)); | 1709 __ sarl(rax, Immediate(24)); |
1532 __ xorl(rcx, rdx); | 1710 __ xorl(rcx, rdx); |
1533 __ xorl(rax, rdi); | 1711 __ xorl(rax, rdi); |
1534 __ xorl(rcx, rax); | 1712 __ xorl(rcx, rax); |
(...skipping 23 matching lines...) Expand all Loading... |
1558 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); | 1736 char* elem_in1 = reinterpret_cast<char*>(&(test_elem[0].in[1])); |
1559 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); | 1737 char* elem_out = reinterpret_cast<char*>(&(test_elem[0].output)); |
1560 // Two uint_32's and a pointer per element. | 1738 // Two uint_32's and a pointer per element. |
1561 CHECK_EQ(2 * kIntSize + 1 * kPointerSize, | 1739 CHECK_EQ(2 * kIntSize + 1 * kPointerSize, |
1562 static_cast<int>(elem2_start - elem_start)); | 1740 static_cast<int>(elem2_start - elem_start)); |
1563 CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start)); | 1741 CHECK_EQ(0, static_cast<int>(elem_in0 - elem_start)); |
1564 CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start)); | 1742 CHECK_EQ(kIntSize, static_cast<int>(elem_in1 - elem_start)); |
1565 CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start)); | 1743 CHECK_EQ(2 * kIntSize, static_cast<int>(elem_out - elem_start)); |
1566 } | 1744 } |
1567 #endif | 1745 #endif |
| 1746 #ifndef V8_TARGET_ARCH_X32 |
1568 // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16]. | 1747 // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*16]. |
1569 __ addl(rcx, rcx); | 1748 __ addl(rcx, rcx); |
1570 __ lea(rcx, Operand(rax, rcx, times_8, 0)); | 1749 __ lea(rcx, Operand(rax, rcx, times_8, 0)); |
| 1750 #else |
| 1751 // Find the address of the rcx'th entry in the cache, i.e., &rax[rcx*12]. |
| 1752 __ leal(rcx, Operand(rcx, rcx, times_2, 0)); |
| 1753 __ leal(rcx, Operand(rax, rcx, times_4, 0)); |
| 1754 #endif |
1571 // Check if cache matches: Double value is stored in uint32_t[2] array. | 1755 // Check if cache matches: Double value is stored in uint32_t[2] array. |
1572 Label cache_miss; | 1756 Label cache_miss; |
1573 __ cmpq(rbx, Operand(rcx, 0)); | 1757 __k cmpq(rbx, Operand(rcx, 0)); |
1574 __ j(not_equal, &cache_miss, Label::kNear); | 1758 __ j(not_equal, &cache_miss, Label::kNear); |
1575 // Cache hit! | 1759 // Cache hit! |
1576 Counters* counters = masm->isolate()->counters(); | 1760 Counters* counters = masm->isolate()->counters(); |
1577 __ IncrementCounter(counters->transcendental_cache_hit(), 1); | 1761 __ IncrementCounter(counters->transcendental_cache_hit(), 1); |
1578 __ movq(rax, Operand(rcx, 2 * kIntSize)); | 1762 __ movq(rax, Operand(rcx, 2 * kIntSize)); |
1579 if (tagged) { | 1763 if (tagged) { |
1580 __ fstp(0); // Clear FPU stack. | 1764 __ fstp(0); // Clear FPU stack. |
1581 __ ret(kPointerSize); | 1765 __ ret(kPointerSize); |
1582 } else { // UNTAGGED. | 1766 } else { // UNTAGGED. |
1583 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); | 1767 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); |
1584 __ Ret(); | 1768 __ Ret(); |
1585 } | 1769 } |
1586 | 1770 |
1587 __ bind(&cache_miss); | 1771 __ bind(&cache_miss); |
1588 __ IncrementCounter(counters->transcendental_cache_miss(), 1); | 1772 __ IncrementCounter(counters->transcendental_cache_miss(), 1); |
1589 // Update cache with new value. | 1773 // Update cache with new value. |
1590 if (tagged) { | 1774 if (tagged) { |
1591 __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack); | 1775 __ AllocateHeapNumber(rax, rdi, &runtime_call_clear_stack); |
1592 } else { // UNTAGGED. | 1776 } else { // UNTAGGED. |
1593 __ AllocateHeapNumber(rax, rdi, &skip_cache); | 1777 __ AllocateHeapNumber(rax, rdi, &skip_cache); |
1594 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1); | 1778 __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm1); |
1595 __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset)); | 1779 __ fld_d(FieldOperand(rax, HeapNumber::kValueOffset)); |
1596 } | 1780 } |
1597 GenerateOperation(masm, type_); | 1781 GenerateOperation(masm, type_); |
1598 __ movq(Operand(rcx, 0), rbx); | 1782 __k movq(Operand(rcx, 0), rbx); |
1599 __ movq(Operand(rcx, 2 * kIntSize), rax); | 1783 __ movq(Operand(rcx, 2 * kIntSize), rax); |
1600 __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset)); | 1784 __ fstp_d(FieldOperand(rax, HeapNumber::kValueOffset)); |
1601 if (tagged) { | 1785 if (tagged) { |
1602 __ ret(kPointerSize); | 1786 __ ret(kPointerSize); |
1603 } else { // UNTAGGED. | 1787 } else { // UNTAGGED. |
1604 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); | 1788 __ movsd(xmm1, FieldOperand(rax, HeapNumber::kValueOffset)); |
1605 __ Ret(); | 1789 __ Ret(); |
1606 | 1790 |
1607 // Skip cache and return answer directly, only in untagged case. | 1791 // Skip cache and return answer directly, only in untagged case. |
1608 __ bind(&skip_cache); | 1792 __ bind(&skip_cache); |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1671 Label done; | 1855 Label done; |
1672 if (type == TranscendentalCache::SIN || | 1856 if (type == TranscendentalCache::SIN || |
1673 type == TranscendentalCache::COS || | 1857 type == TranscendentalCache::COS || |
1674 type == TranscendentalCache::TAN) { | 1858 type == TranscendentalCache::TAN) { |
1675 // Both fsin and fcos require arguments in the range +/-2^63 and | 1859 // Both fsin and fcos require arguments in the range +/-2^63 and |
1676 // return NaN for infinities and NaN. They can share all code except | 1860 // return NaN for infinities and NaN. They can share all code except |
1677 // the actual fsin/fcos operation. | 1861 // the actual fsin/fcos operation. |
1678 Label in_range; | 1862 Label in_range; |
1679 // If argument is outside the range -2^63..2^63, fsin/cos doesn't | 1863 // If argument is outside the range -2^63..2^63, fsin/cos doesn't |
1680 // work. We must reduce it to the appropriate range. | 1864 // work. We must reduce it to the appropriate range. |
1681 __ movq(rdi, rbx); | 1865 __k movq(rdi, rbx); |
1682 // Move exponent and sign bits to low bits. | 1866 // Move exponent and sign bits to low bits. |
1683 __ shr(rdi, Immediate(HeapNumber::kMantissaBits)); | 1867 __k shr(rdi, Immediate(HeapNumber::kMantissaBits)); |
1684 // Remove sign bit. | 1868 // Remove sign bit. |
1685 __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1)); | 1869 __ andl(rdi, Immediate((1 << HeapNumber::kExponentBits) - 1)); |
1686 int supported_exponent_limit = (63 + HeapNumber::kExponentBias); | 1870 int supported_exponent_limit = (63 + HeapNumber::kExponentBias); |
1687 __ cmpl(rdi, Immediate(supported_exponent_limit)); | 1871 __ cmpl(rdi, Immediate(supported_exponent_limit)); |
1688 __ j(below, &in_range); | 1872 __ j(below, &in_range); |
1689 // Check for infinity and NaN. Both return NaN for sin. | 1873 // Check for infinity and NaN. Both return NaN for sin. |
1690 __ cmpl(rdi, Immediate(0x7ff)); | 1874 __ cmpl(rdi, Immediate(0x7ff)); |
1691 Label non_nan_result; | 1875 Label non_nan_result; |
1692 __ j(not_equal, &non_nan_result, Label::kNear); | 1876 __ j(not_equal, &non_nan_result, Label::kNear); |
1693 // Input is +/-Infinity or NaN. Result is NaN. | 1877 // Input is +/-Infinity or NaN. Result is NaN. |
1694 __ fstp(0); | 1878 __ fstp(0); |
1695 // NaN is represented by 0x7ff8000000000000. | 1879 // NaN is represented by 0x7ff8000000000000. |
1696 __ subq(rsp, Immediate(kPointerSize)); | 1880 __ subq(rsp, Immediate(kDoubleSize)); |
1697 __ movl(Operand(rsp, 4), Immediate(0x7ff80000)); | 1881 __ movl(Operand(rsp, 4), Immediate(0x7ff80000)); |
1698 __ movl(Operand(rsp, 0), Immediate(0x00000000)); | 1882 __ movl(Operand(rsp, 0), Immediate(0x00000000)); |
1699 __ fld_d(Operand(rsp, 0)); | 1883 __ fld_d(Operand(rsp, 0)); |
1700 __ addq(rsp, Immediate(kPointerSize)); | 1884 __ addq(rsp, Immediate(kDoubleSize)); |
1701 __ jmp(&done); | 1885 __ jmp(&done); |
1702 | 1886 |
1703 __ bind(&non_nan_result); | 1887 __ bind(&non_nan_result); |
1704 | 1888 |
1705 // Use fpmod to restrict argument to the range +/-2*PI. | 1889 // Use fpmod to restrict argument to the range +/-2*PI. |
1706 __ movq(rdi, rax); // Save rax before using fnstsw_ax. | 1890 __k movq(rdi, rax); // Save rax before using fnstsw_ax. |
1707 __ fldpi(); | 1891 __ fldpi(); |
1708 __ fadd(0); | 1892 __ fadd(0); |
1709 __ fld(1); | 1893 __ fld(1); |
1710 // FPU Stack: input, 2*pi, input. | 1894 // FPU Stack: input, 2*pi, input. |
1711 { | 1895 { |
1712 Label no_exceptions; | 1896 Label no_exceptions; |
1713 __ fwait(); | 1897 __ fwait(); |
1714 __ fnstsw_ax(); | 1898 __ fnstsw_ax(); |
1715 // Clear if Illegal Operand or Zero Division exceptions are set. | 1899 // Clear if Illegal Operand or Zero Division exceptions are set. |
1716 __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word. | 1900 __ testl(rax, Immediate(5)); // #IO and #ZD flags of FPU status word. |
(...skipping 212 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1929 ? &maybe_undefined_first | 2113 ? &maybe_undefined_first |
1930 : on_not_smis); | 2114 : on_not_smis); |
1931 // Convert HeapNumber to smi if possible. | 2115 // Convert HeapNumber to smi if possible. |
1932 __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset)); | 2116 __ movsd(xmm0, FieldOperand(first, HeapNumber::kValueOffset)); |
1933 __ movq(scratch2, xmm0); | 2117 __ movq(scratch2, xmm0); |
1934 __ cvttsd2siq(smi_result, xmm0); | 2118 __ cvttsd2siq(smi_result, xmm0); |
1935 // Check if conversion was successful by converting back and | 2119 // Check if conversion was successful by converting back and |
1936 // comparing to the original double's bits. | 2120 // comparing to the original double's bits. |
1937 __ cvtlsi2sd(xmm1, smi_result); | 2121 __ cvtlsi2sd(xmm1, smi_result); |
1938 __ movq(kScratchRegister, xmm1); | 2122 __ movq(kScratchRegister, xmm1); |
1939 __ cmpq(scratch2, kScratchRegister); | 2123 __k cmpq(scratch2, kScratchRegister); |
1940 __ j(not_equal, on_not_smis); | 2124 __ j(not_equal, on_not_smis); |
| 2125 #ifdef V8_TARGET_ARCH_X32 |
| 2126 __ cmpl(smi_result, Immediate(0xc0000000)); |
| 2127 __ j(negative, on_not_smis); |
| 2128 #endif |
1941 __ Integer32ToSmi(first, smi_result); | 2129 __ Integer32ToSmi(first, smi_result); |
1942 | 2130 |
1943 __ bind(&first_done); | 2131 __ bind(&first_done); |
1944 __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done); | 2132 __ JumpIfSmi(second, (on_success != NULL) ? on_success : &done); |
1945 __ bind(&first_smi); | 2133 __ bind(&first_smi); |
1946 __ AssertNotSmi(second); | 2134 __ AssertNotSmi(second); |
1947 __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map); | 2135 __ cmpq(FieldOperand(second, HeapObject::kMapOffset), heap_number_map); |
1948 __ j(not_equal, | 2136 __ j(not_equal, |
1949 (convert_undefined == CONVERT_UNDEFINED_TO_ZERO) | 2137 (convert_undefined == CONVERT_UNDEFINED_TO_ZERO) |
1950 ? &maybe_undefined_second | 2138 ? &maybe_undefined_second |
1951 : on_not_smis); | 2139 : on_not_smis); |
1952 // Convert second to smi, if possible. | 2140 // Convert second to smi, if possible. |
1953 __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset)); | 2141 __ movsd(xmm0, FieldOperand(second, HeapNumber::kValueOffset)); |
1954 __ movq(scratch2, xmm0); | 2142 __ movq(scratch2, xmm0); |
1955 __ cvttsd2siq(smi_result, xmm0); | 2143 __ cvttsd2siq(smi_result, xmm0); |
1956 __ cvtlsi2sd(xmm1, smi_result); | 2144 __ cvtlsi2sd(xmm1, smi_result); |
1957 __ movq(kScratchRegister, xmm1); | 2145 __ movq(kScratchRegister, xmm1); |
1958 __ cmpq(scratch2, kScratchRegister); | 2146 __k cmpq(scratch2, kScratchRegister); |
1959 __ j(not_equal, on_not_smis); | 2147 __ j(not_equal, on_not_smis); |
| 2148 #ifdef V8_TARGET_ARCH_X32 |
| 2149 __ cmpl(smi_result, Immediate(0xc0000000)); |
| 2150 __ j(negative, on_not_smis); |
| 2151 #endif |
1960 __ Integer32ToSmi(second, smi_result); | 2152 __ Integer32ToSmi(second, smi_result); |
1961 if (on_success != NULL) { | 2153 if (on_success != NULL) { |
1962 __ jmp(on_success); | 2154 __ jmp(on_success); |
1963 } else { | 2155 } else { |
1964 __ jmp(&done); | 2156 __ jmp(&done); |
1965 } | 2157 } |
1966 | 2158 |
1967 __ bind(&maybe_undefined_first); | 2159 __ bind(&maybe_undefined_first); |
1968 __ CompareRoot(first, Heap::kUndefinedValueRootIndex); | 2160 __ CompareRoot(first, Heap::kUndefinedValueRootIndex); |
1969 __ j(not_equal, on_not_smis); | 2161 __ j(not_equal, on_not_smis); |
(...skipping 26 matching lines...) Expand all Loading... |
1996 | 2188 |
1997 // Save 1 in double_result - we need this several times later on. | 2189 // Save 1 in double_result - we need this several times later on. |
1998 __ movq(scratch, Immediate(1)); | 2190 __ movq(scratch, Immediate(1)); |
1999 __ cvtlsi2sd(double_result, scratch); | 2191 __ cvtlsi2sd(double_result, scratch); |
2000 | 2192 |
2001 if (exponent_type_ == ON_STACK) { | 2193 if (exponent_type_ == ON_STACK) { |
2002 Label base_is_smi, unpack_exponent; | 2194 Label base_is_smi, unpack_exponent; |
2003 // The exponent and base are supplied as arguments on the stack. | 2195 // The exponent and base are supplied as arguments on the stack. |
2004 // This can only happen if the stub is called from non-optimized code. | 2196 // This can only happen if the stub is called from non-optimized code. |
2005 // Load input parameters from stack. | 2197 // Load input parameters from stack. |
2006 __ movq(base, Operand(rsp, 2 * kPointerSize)); | 2198 __a movq(base, Operand(rsp, 2 * kPointerSize)); |
2007 __ movq(exponent, Operand(rsp, 1 * kPointerSize)); | 2199 __a movq(exponent, Operand(rsp, 1 * kPointerSize)); |
2008 __ JumpIfSmi(base, &base_is_smi, Label::kNear); | 2200 __ JumpIfSmi(base, &base_is_smi, Label::kNear); |
2009 __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset), | 2201 __ CompareRoot(FieldOperand(base, HeapObject::kMapOffset), |
2010 Heap::kHeapNumberMapRootIndex); | 2202 Heap::kHeapNumberMapRootIndex); |
2011 __ j(not_equal, &call_runtime); | 2203 __ j(not_equal, &call_runtime); |
2012 | 2204 |
2013 __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset)); | 2205 __ movsd(double_base, FieldOperand(base, HeapNumber::kValueOffset)); |
2014 __ jmp(&unpack_exponent, Label::kNear); | 2206 __ jmp(&unpack_exponent, Label::kNear); |
2015 | 2207 |
2016 __ bind(&base_is_smi); | 2208 __ bind(&base_is_smi); |
2017 __ SmiToInteger32(base, base); | 2209 __ SmiToInteger32(base, base); |
(...skipping 322 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2340 // property might have been redefined. | 2532 // property might have been redefined. |
2341 __ movq(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset)); | 2533 __ movq(scratch, FieldOperand(receiver, JSArray::kPropertiesOffset)); |
2342 __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset), | 2534 __ CompareRoot(FieldOperand(scratch, FixedArray::kMapOffset), |
2343 Heap::kHashTableMapRootIndex); | 2535 Heap::kHashTableMapRootIndex); |
2344 __ j(equal, &miss); | 2536 __ j(equal, &miss); |
2345 | 2537 |
2346 // Check that value is a smi. | 2538 // Check that value is a smi. |
2347 __ JumpIfNotSmi(value, &miss); | 2539 __ JumpIfNotSmi(value, &miss); |
2348 | 2540 |
2349 // Prepare tail call to StoreIC_ArrayLength. | 2541 // Prepare tail call to StoreIC_ArrayLength. |
2350 __ pop(scratch); | 2542 __k pop(scratch); |
2351 __ push(receiver); | 2543 __ push(receiver); |
2352 __ push(value); | 2544 __ push(value); |
2353 __ push(scratch); // return address | 2545 __k push(scratch); // return address |
2354 | 2546 |
2355 ExternalReference ref = | 2547 ExternalReference ref = |
2356 ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate()); | 2548 ExternalReference(IC_Utility(IC::kStoreIC_ArrayLength), masm->isolate()); |
2357 __ TailCallExternalReference(ref, 2, 1); | 2549 __ TailCallExternalReference(ref, 2, 1); |
2358 | 2550 |
2359 __ bind(&miss); | 2551 __ bind(&miss); |
2360 | 2552 |
2361 StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); | 2553 StubCompiler::TailCallBuiltin(masm, StubCompiler::MissBuiltin(kind())); |
2362 } | 2554 } |
2363 | 2555 |
2364 | 2556 |
2365 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { | 2557 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { |
2366 // The key is in rdx and the parameter count is in rax. | 2558 // The key is in rdx and the parameter count is in rax. |
2367 | 2559 |
2368 // The displacement is used for skipping the frame pointer on the | 2560 // The displacement is used for skipping the frame pointer on the |
2369 // stack. It is the offset of the last parameter (if any) relative | 2561 // stack. It is the offset of the last parameter (if any) relative |
2370 // to the frame pointer. | 2562 // to the frame pointer. |
| 2563 #ifndef V8_TARGET_ARCH_X32 |
2371 static const int kDisplacement = 1 * kPointerSize; | 2564 static const int kDisplacement = 1 * kPointerSize; |
| 2565 #else |
| 2566 static const int kDisplacement = 2 * kHWRegSize - 1 * kPointerSize; |
| 2567 #endif |
2372 | 2568 |
2373 // Check that the key is a smi. | 2569 // Check that the key is a smi. |
2374 Label slow; | 2570 Label slow; |
2375 __ JumpIfNotSmi(rdx, &slow); | 2571 __ JumpIfNotSmi(rdx, &slow); |
2376 | 2572 |
2377 // Check if the calling frame is an arguments adaptor frame. We look at the | 2573 // Check if the calling frame is an arguments adaptor frame. We look at the |
2378 // context offset, and if the frame is not a regular one, then we find a | 2574 // context offset, and if the frame is not a regular one, then we find a |
2379 // Smi instead of the context. We can't use SmiCompare here, because that | 2575 // Smi instead of the context. We can't use SmiCompare here, because that |
2380 // only works for comparing two smis. | 2576 // only works for comparing two smis. |
2381 Label adaptor; | 2577 Label adaptor; |
(...skipping 26 matching lines...) Expand all Loading... |
2408 // Read the argument from the stack and return it. | 2604 // Read the argument from the stack and return it. |
2409 index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2); | 2605 index = masm->SmiToIndex(rax, rcx, kPointerSizeLog2); |
2410 __ lea(rbx, Operand(rbx, index.reg, index.scale, 0)); | 2606 __ lea(rbx, Operand(rbx, index.reg, index.scale, 0)); |
2411 index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2); | 2607 index = masm->SmiToNegativeIndex(rdx, rdx, kPointerSizeLog2); |
2412 __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement)); | 2608 __ movq(rax, Operand(rbx, index.reg, index.scale, kDisplacement)); |
2413 __ Ret(); | 2609 __ Ret(); |
2414 | 2610 |
2415 // Slow-case: Handle non-smi or out-of-bounds access to arguments | 2611 // Slow-case: Handle non-smi or out-of-bounds access to arguments |
2416 // by calling the runtime system. | 2612 // by calling the runtime system. |
2417 __ bind(&slow); | 2613 __ bind(&slow); |
2418 __ pop(rbx); // Return address. | 2614 __k pop(rbx); // Return address. |
2419 __ push(rdx); | 2615 __ push(rdx); |
2420 __ push(rbx); | 2616 __k push(rbx); |
2421 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); | 2617 __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); |
2422 } | 2618 } |
2423 | 2619 |
2424 | 2620 |
2425 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { | 2621 void ArgumentsAccessStub::GenerateNewNonStrictFast(MacroAssembler* masm) { |
2426 // Stack layout: | 2622 // Stack layout: |
2427 // rsp[0] : return address | 2623 // rsp[0] : return address |
2428 // rsp[8] : number of parameters (tagged) | 2624 // rsp[8] : number of parameters (tagged) |
2429 // rsp[16] : receiver displacement | 2625 // rsp[16] : receiver displacement |
2430 // rsp[24] : function | 2626 // rsp[24] : function |
2431 // Registers used over the whole function: | 2627 // Registers used over the whole function: |
2432 // rbx: the mapped parameter count (untagged) | 2628 // rbx: the mapped parameter count (untagged) |
2433 // rax: the allocated object (tagged). | 2629 // rax: the allocated object (tagged). |
2434 | 2630 |
2435 Factory* factory = masm->isolate()->factory(); | 2631 Factory* factory = masm->isolate()->factory(); |
2436 | 2632 |
2437 __ SmiToInteger64(rbx, Operand(rsp, 1 * kPointerSize)); | 2633 __a SmiToInteger64(rbx, Operand(rsp, 1 * kPointerSize)); |
2438 // rbx = parameter count (untagged) | 2634 // rbx = parameter count (untagged) |
2439 | 2635 |
2440 // Check if the calling frame is an arguments adaptor frame. | 2636 // Check if the calling frame is an arguments adaptor frame. |
2441 Label runtime; | 2637 Label runtime; |
2442 Label adaptor_frame, try_allocate; | 2638 Label adaptor_frame, try_allocate; |
2443 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); | 2639 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); |
2444 __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset)); | 2640 __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset)); |
2445 __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); | 2641 __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); |
2446 __ j(equal, &adaptor_frame); | 2642 __ j(equal, &adaptor_frame); |
2447 | 2643 |
2448 // No adaptor, parameter count = argument count. | 2644 // No adaptor, parameter count = argument count. |
2449 __ movq(rcx, rbx); | 2645 __ movq(rcx, rbx); |
2450 __ jmp(&try_allocate, Label::kNear); | 2646 __ jmp(&try_allocate, Label::kNear); |
2451 | 2647 |
2452 // We have an adaptor frame. Patch the parameters pointer. | 2648 // We have an adaptor frame. Patch the parameters pointer. |
2453 __ bind(&adaptor_frame); | 2649 __ bind(&adaptor_frame); |
2454 __ SmiToInteger64(rcx, | 2650 __ SmiToInteger64(rcx, |
2455 Operand(rdx, | 2651 Operand(rdx, |
2456 ArgumentsAdaptorFrameConstants::kLengthOffset)); | 2652 ArgumentsAdaptorFrameConstants::kLengthOffset)); |
2457 __ lea(rdx, Operand(rdx, rcx, times_pointer_size, | 2653 __ lea(rdx, Operand(rdx, rcx, times_pointer_size, |
2458 StandardFrameConstants::kCallerSPOffset)); | 2654 StandardFrameConstants::kCallerSPOffset)); |
2459 __ movq(Operand(rsp, 2 * kPointerSize), rdx); | 2655 __a movq(Operand(rsp, 2 * kPointerSize), rdx); |
2460 | 2656 |
2461 // rbx = parameter count (untagged) | 2657 // rbx = parameter count (untagged) |
2462 // rcx = argument count (untagged) | 2658 // rcx = argument count (untagged) |
2463 // Compute the mapped parameter count = min(rbx, rcx) in rbx. | 2659 // Compute the mapped parameter count = min(rbx, rcx) in rbx. |
2464 __ cmpq(rbx, rcx); | 2660 __ cmpq(rbx, rcx); |
2465 __ j(less_equal, &try_allocate, Label::kNear); | 2661 __ j(less_equal, &try_allocate, Label::kNear); |
2466 __ movq(rbx, rcx); | 2662 __ movq(rbx, rcx); |
2467 | 2663 |
2468 __ bind(&try_allocate); | 2664 __ bind(&try_allocate); |
2469 | 2665 |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2510 // rcx = argument count (untagged) | 2706 // rcx = argument count (untagged) |
2511 // rdi = address of boilerplate object (tagged) | 2707 // rdi = address of boilerplate object (tagged) |
2512 // Copy the JS object part. | 2708 // Copy the JS object part. |
2513 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { | 2709 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { |
2514 __ movq(rdx, FieldOperand(rdi, i)); | 2710 __ movq(rdx, FieldOperand(rdi, i)); |
2515 __ movq(FieldOperand(rax, i), rdx); | 2711 __ movq(FieldOperand(rax, i), rdx); |
2516 } | 2712 } |
2517 | 2713 |
2518 // Set up the callee in-object property. | 2714 // Set up the callee in-object property. |
2519 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); | 2715 STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); |
2520 __ movq(rdx, Operand(rsp, 3 * kPointerSize)); | 2716 __a movq(rdx, Operand(rsp, 3 * kPointerSize)); |
2521 __ movq(FieldOperand(rax, JSObject::kHeaderSize + | 2717 __ movq(FieldOperand(rax, JSObject::kHeaderSize + |
2522 Heap::kArgumentsCalleeIndex * kPointerSize), | 2718 Heap::kArgumentsCalleeIndex * kPointerSize), |
2523 rdx); | 2719 rdx); |
2524 | 2720 |
2525 // Use the length (smi tagged) and set that as an in-object property too. | 2721 // Use the length (smi tagged) and set that as an in-object property too. |
2526 // Note: rcx is tagged from here on. | 2722 // Note: rcx is tagged from here on. |
2527 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); | 2723 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); |
2528 __ Integer32ToSmi(rcx, rcx); | 2724 __ Integer32ToSmi(rcx, rcx); |
2529 __ movq(FieldOperand(rax, JSObject::kHeaderSize + | 2725 __ movq(FieldOperand(rax, JSObject::kHeaderSize + |
2530 Heap::kArgumentsLengthIndex * kPointerSize), | 2726 Heap::kArgumentsLengthIndex * kPointerSize), |
(...skipping 30 matching lines...) Expand all Loading... |
2561 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 | 2757 // MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 |
2562 // The mapped parameter thus need to get indices | 2758 // The mapped parameter thus need to get indices |
2563 // MIN_CONTEXT_SLOTS+parameter_count-1 .. | 2759 // MIN_CONTEXT_SLOTS+parameter_count-1 .. |
2564 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count | 2760 // MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count |
2565 // We loop from right to left. | 2761 // We loop from right to left. |
2566 Label parameters_loop, parameters_test; | 2762 Label parameters_loop, parameters_test; |
2567 | 2763 |
2568 // Load tagged parameter count into r9. | 2764 // Load tagged parameter count into r9. |
2569 __ Integer32ToSmi(r9, rbx); | 2765 __ Integer32ToSmi(r9, rbx); |
2570 __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS)); | 2766 __ Move(r8, Smi::FromInt(Context::MIN_CONTEXT_SLOTS)); |
2571 __ addq(r8, Operand(rsp, 1 * kPointerSize)); | 2767 __a addq(r8, Operand(rsp, 1 * kPointerSize)); |
2572 __ subq(r8, r9); | 2768 __ subq(r8, r9); |
2573 __ Move(r11, factory->the_hole_value()); | 2769 __ Move(r11, factory->the_hole_value()); |
2574 __ movq(rdx, rdi); | 2770 __ movq(rdx, rdi); |
2575 __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize)); | 2771 __ lea(rdi, Operand(rdi, rbx, times_pointer_size, kParameterMapHeaderSize)); |
2576 // r9 = loop variable (tagged) | 2772 // r9 = loop variable (tagged) |
2577 // r8 = mapping index (tagged) | 2773 // r8 = mapping index (tagged) |
2578 // r11 = the hole value | 2774 // r11 = the hole value |
2579 // rdx = address of parameter map (tagged) | 2775 // rdx = address of parameter map (tagged) |
2580 // rdi = address of backing store (tagged) | 2776 // rdi = address of backing store (tagged) |
2581 __ jmp(¶meters_test, Label::kNear); | 2777 __ jmp(¶meters_test, Label::kNear); |
(...skipping 18 matching lines...) Expand all Loading... |
2600 | 2796 |
2601 // rcx = argument count (tagged) | 2797 // rcx = argument count (tagged) |
2602 // rdi = address of backing store (tagged) | 2798 // rdi = address of backing store (tagged) |
2603 // Copy arguments header and remaining slots (if there are any). | 2799 // Copy arguments header and remaining slots (if there are any). |
2604 __ Move(FieldOperand(rdi, FixedArray::kMapOffset), | 2800 __ Move(FieldOperand(rdi, FixedArray::kMapOffset), |
2605 factory->fixed_array_map()); | 2801 factory->fixed_array_map()); |
2606 __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx); | 2802 __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx); |
2607 | 2803 |
2608 Label arguments_loop, arguments_test; | 2804 Label arguments_loop, arguments_test; |
2609 __ movq(r8, rbx); | 2805 __ movq(r8, rbx); |
2610 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); | 2806 __a movq(rdx, Operand(rsp, 2 * kPointerSize)); |
2611 // Untag rcx for the loop below. | 2807 // Untag rcx for the loop below. |
2612 __ SmiToInteger64(rcx, rcx); | 2808 __ SmiToInteger64(rcx, rcx); |
2613 __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0)); | 2809 __ lea(kScratchRegister, Operand(r8, times_pointer_size, 0)); |
2614 __ subq(rdx, kScratchRegister); | 2810 __ subq(rdx, kScratchRegister); |
2615 __ jmp(&arguments_test, Label::kNear); | 2811 __ jmp(&arguments_test, Label::kNear); |
2616 | 2812 |
2617 __ bind(&arguments_loop); | 2813 __ bind(&arguments_loop); |
2618 __ subq(rdx, Immediate(kPointerSize)); | 2814 __ subq(rdx, Immediate(kPointerSize)); |
2619 __ movq(r9, Operand(rdx, 0)); | 2815 __ movq(r9, Operand(rdx, 0)); |
2620 __ movq(FieldOperand(rdi, r8, | 2816 __ movq(FieldOperand(rdi, r8, |
2621 times_pointer_size, | 2817 times_pointer_size, |
2622 FixedArray::kHeaderSize), | 2818 FixedArray::kHeaderSize), |
2623 r9); | 2819 r9); |
2624 __ addq(r8, Immediate(1)); | 2820 __ addq(r8, Immediate(1)); |
2625 | 2821 |
2626 __ bind(&arguments_test); | 2822 __ bind(&arguments_test); |
2627 __ cmpq(r8, rcx); | 2823 __ cmpq(r8, rcx); |
2628 __ j(less, &arguments_loop, Label::kNear); | 2824 __ j(less, &arguments_loop, Label::kNear); |
2629 | 2825 |
2630 // Return and remove the on-stack parameters. | 2826 // Return and remove the on-stack parameters. |
2631 __ ret(3 * kPointerSize); | 2827 __ ret(3 * kPointerSize); |
2632 | 2828 |
2633 // Do the runtime call to allocate the arguments object. | 2829 // Do the runtime call to allocate the arguments object. |
2634 // rcx = argument count (untagged) | 2830 // rcx = argument count (untagged) |
2635 __ bind(&runtime); | 2831 __ bind(&runtime); |
2636 __ Integer32ToSmi(rcx, rcx); | 2832 __ Integer32ToSmi(rcx, rcx); |
2637 __ movq(Operand(rsp, 1 * kPointerSize), rcx); // Patch argument count. | 2833 __a movq(Operand(rsp, 1 * kPointerSize), rcx); // Patch argument count. |
2638 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); | 2834 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); |
2639 } | 2835 } |
2640 | 2836 |
2641 | 2837 |
2642 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) { | 2838 void ArgumentsAccessStub::GenerateNewNonStrictSlow(MacroAssembler* masm) { |
2643 // rsp[0] : return address | 2839 // rsp[0] : return address |
2644 // rsp[8] : number of parameters | 2840 // rsp[8] : number of parameters |
2645 // rsp[16] : receiver displacement | 2841 // rsp[16] : receiver displacement |
2646 // rsp[24] : function | 2842 // rsp[24] : function |
2647 | 2843 |
2648 // Check if the calling frame is an arguments adaptor frame. | 2844 // Check if the calling frame is an arguments adaptor frame. |
2649 Label runtime; | 2845 Label runtime; |
2650 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); | 2846 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); |
2651 __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset)); | 2847 __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset)); |
2652 __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); | 2848 __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); |
2653 __ j(not_equal, &runtime); | 2849 __ j(not_equal, &runtime); |
2654 | 2850 |
2655 // Patch the arguments.length and the parameters pointer. | 2851 // Patch the arguments.length and the parameters pointer. |
2656 __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 2852 __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
2657 __ movq(Operand(rsp, 1 * kPointerSize), rcx); | 2853 __a movq(Operand(rsp, 1 * kPointerSize), rcx); |
2658 __ SmiToInteger64(rcx, rcx); | 2854 __ SmiToInteger64(rcx, rcx); |
2659 __ lea(rdx, Operand(rdx, rcx, times_pointer_size, | 2855 __ lea(rdx, Operand(rdx, rcx, times_pointer_size, |
2660 StandardFrameConstants::kCallerSPOffset)); | 2856 StandardFrameConstants::kCallerSPOffset)); |
2661 __ movq(Operand(rsp, 2 * kPointerSize), rdx); | 2857 __a movq(Operand(rsp, 2 * kPointerSize), rdx); |
2662 | 2858 |
2663 __ bind(&runtime); | 2859 __ bind(&runtime); |
2664 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); | 2860 __ TailCallRuntime(Runtime::kNewArgumentsFast, 3, 1); |
2665 } | 2861 } |
2666 | 2862 |
2667 | 2863 |
2668 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { | 2864 void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { |
2669 // rsp[0] : return address | 2865 // rsp[0] : return address |
2670 // rsp[8] : number of parameters | 2866 // rsp[8] : number of parameters |
2671 // rsp[16] : receiver displacement | 2867 // rsp[16] : receiver displacement |
2672 // rsp[24] : function | 2868 // rsp[24] : function |
2673 | 2869 |
2674 // Check if the calling frame is an arguments adaptor frame. | 2870 // Check if the calling frame is an arguments adaptor frame. |
2675 Label adaptor_frame, try_allocate, runtime; | 2871 Label adaptor_frame, try_allocate, runtime; |
2676 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); | 2872 __ movq(rdx, Operand(rbp, StandardFrameConstants::kCallerFPOffset)); |
2677 __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset)); | 2873 __ movq(rcx, Operand(rdx, StandardFrameConstants::kContextOffset)); |
2678 __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); | 2874 __ Cmp(rcx, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)); |
2679 __ j(equal, &adaptor_frame); | 2875 __ j(equal, &adaptor_frame); |
2680 | 2876 |
2681 // Get the length from the frame. | 2877 // Get the length from the frame. |
2682 __ movq(rcx, Operand(rsp, 1 * kPointerSize)); | 2878 __a movq(rcx, Operand(rsp, 1 * kPointerSize)); |
2683 __ SmiToInteger64(rcx, rcx); | 2879 __ SmiToInteger64(rcx, rcx); |
2684 __ jmp(&try_allocate); | 2880 __ jmp(&try_allocate); |
2685 | 2881 |
2686 // Patch the arguments.length and the parameters pointer. | 2882 // Patch the arguments.length and the parameters pointer. |
2687 __ bind(&adaptor_frame); | 2883 __ bind(&adaptor_frame); |
2688 __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 2884 __ movq(rcx, Operand(rdx, ArgumentsAdaptorFrameConstants::kLengthOffset)); |
2689 __ movq(Operand(rsp, 1 * kPointerSize), rcx); | 2885 __a movq(Operand(rsp, 1 * kPointerSize), rcx); |
2690 __ SmiToInteger64(rcx, rcx); | 2886 __ SmiToInteger64(rcx, rcx); |
2691 __ lea(rdx, Operand(rdx, rcx, times_pointer_size, | 2887 __ lea(rdx, Operand(rdx, rcx, times_pointer_size, |
2692 StandardFrameConstants::kCallerSPOffset)); | 2888 StandardFrameConstants::kCallerSPOffset)); |
2693 __ movq(Operand(rsp, 2 * kPointerSize), rdx); | 2889 __a movq(Operand(rsp, 2 * kPointerSize), rdx); |
2694 | 2890 |
2695 // Try the new space allocation. Start out with computing the size of | 2891 // Try the new space allocation. Start out with computing the size of |
2696 // the arguments object and the elements array. | 2892 // the arguments object and the elements array. |
2697 Label add_arguments_object; | 2893 Label add_arguments_object; |
2698 __ bind(&try_allocate); | 2894 __ bind(&try_allocate); |
2699 __ testq(rcx, rcx); | 2895 __ testq(rcx, rcx); |
2700 __ j(zero, &add_arguments_object, Label::kNear); | 2896 __ j(zero, &add_arguments_object, Label::kNear); |
2701 __ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize)); | 2897 __ lea(rcx, Operand(rcx, times_pointer_size, FixedArray::kHeaderSize)); |
2702 __ bind(&add_arguments_object); | 2898 __ bind(&add_arguments_object); |
2703 __ addq(rcx, Immediate(Heap::kArgumentsObjectSizeStrict)); | 2899 __ addq(rcx, Immediate(Heap::kArgumentsObjectSizeStrict)); |
2704 | 2900 |
2705 // Do the allocation of both objects in one go. | 2901 // Do the allocation of both objects in one go. |
2706 __ Allocate(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT); | 2902 __ Allocate(rcx, rax, rdx, rbx, &runtime, TAG_OBJECT); |
2707 | 2903 |
2708 // Get the arguments boilerplate from the current native context. | 2904 // Get the arguments boilerplate from the current native context. |
2709 __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 2905 __ movq(rdi, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
2710 __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset)); | 2906 __ movq(rdi, FieldOperand(rdi, GlobalObject::kNativeContextOffset)); |
2711 const int offset = | 2907 const int offset = |
2712 Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX); | 2908 Context::SlotOffset(Context::STRICT_MODE_ARGUMENTS_BOILERPLATE_INDEX); |
2713 __ movq(rdi, Operand(rdi, offset)); | 2909 __ movq(rdi, Operand(rdi, offset)); |
2714 | 2910 |
2715 // Copy the JS object part. | 2911 // Copy the JS object part. |
2716 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { | 2912 for (int i = 0; i < JSObject::kHeaderSize; i += kPointerSize) { |
2717 __ movq(rbx, FieldOperand(rdi, i)); | 2913 __ movq(rbx, FieldOperand(rdi, i)); |
2718 __ movq(FieldOperand(rax, i), rbx); | 2914 __ movq(FieldOperand(rax, i), rbx); |
2719 } | 2915 } |
2720 | 2916 |
2721 // Get the length (smi tagged) and set that as an in-object property too. | 2917 // Get the length (smi tagged) and set that as an in-object property too. |
2722 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); | 2918 STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); |
2723 __ movq(rcx, Operand(rsp, 1 * kPointerSize)); | 2919 __a movq(rcx, Operand(rsp, 1 * kPointerSize)); |
2724 __ movq(FieldOperand(rax, JSObject::kHeaderSize + | 2920 __ movq(FieldOperand(rax, JSObject::kHeaderSize + |
2725 Heap::kArgumentsLengthIndex * kPointerSize), | 2921 Heap::kArgumentsLengthIndex * kPointerSize), |
2726 rcx); | 2922 rcx); |
2727 | 2923 |
2728 // If there are no actual arguments, we're done. | 2924 // If there are no actual arguments, we're done. |
2729 Label done; | 2925 Label done; |
2730 __ testq(rcx, rcx); | 2926 __ testq(rcx, rcx); |
2731 __ j(zero, &done); | 2927 __ j(zero, &done); |
2732 | 2928 |
2733 // Get the parameters pointer from the stack. | 2929 // Get the parameters pointer from the stack. |
2734 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); | 2930 __a movq(rdx, Operand(rsp, 2 * kPointerSize)); |
2735 | 2931 |
2736 // Set up the elements pointer in the allocated arguments object and | 2932 // Set up the elements pointer in the allocated arguments object and |
2737 // initialize the header in the elements fixed array. | 2933 // initialize the header in the elements fixed array. |
2738 __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict)); | 2934 __ lea(rdi, Operand(rax, Heap::kArgumentsObjectSizeStrict)); |
2739 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi); | 2935 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rdi); |
2740 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex); | 2936 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex); |
2741 __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister); | 2937 __ movq(FieldOperand(rdi, FixedArray::kMapOffset), kScratchRegister); |
2742 | 2938 |
2743 | 2939 |
2744 __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx); | 2940 __ movq(FieldOperand(rdi, FixedArray::kLengthOffset), rcx); |
(...skipping 28 matching lines...) Expand all Loading... |
2773 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); | 2969 __ TailCallRuntime(Runtime::kRegExpExec, 4, 1); |
2774 #else // V8_INTERPRETED_REGEXP | 2970 #else // V8_INTERPRETED_REGEXP |
2775 | 2971 |
2776 // Stack frame on entry. | 2972 // Stack frame on entry. |
2777 // rsp[0] : return address | 2973 // rsp[0] : return address |
2778 // rsp[8] : last_match_info (expected JSArray) | 2974 // rsp[8] : last_match_info (expected JSArray) |
2779 // rsp[16] : previous index | 2975 // rsp[16] : previous index |
2780 // rsp[24] : subject string | 2976 // rsp[24] : subject string |
2781 // rsp[32] : JSRegExp object | 2977 // rsp[32] : JSRegExp object |
2782 | 2978 |
| 2979 #ifndef V8_TARGET_ARCH_X32 |
2783 static const int kLastMatchInfoOffset = 1 * kPointerSize; | 2980 static const int kLastMatchInfoOffset = 1 * kPointerSize; |
2784 static const int kPreviousIndexOffset = 2 * kPointerSize; | 2981 static const int kPreviousIndexOffset = 2 * kPointerSize; |
2785 static const int kSubjectOffset = 3 * kPointerSize; | 2982 static const int kSubjectOffset = 3 * kPointerSize; |
2786 static const int kJSRegExpOffset = 4 * kPointerSize; | 2983 static const int kJSRegExpOffset = 4 * kPointerSize; |
| 2984 #else |
| 2985 static const int kLastMatchInfoOffset = 1 * kHWRegSize; |
| 2986 static const int kPreviousIndexOffset = 1 * kHWRegSize + 1 * kPointerSize; |
| 2987 static const int kSubjectOffset = 1 * kHWRegSize + 2 * kPointerSize; |
| 2988 static const int kJSRegExpOffset = 1 * kHWRegSize + 3 * kPointerSize; |
| 2989 #endif |
2787 | 2990 |
2788 Label runtime; | 2991 Label runtime; |
2789 // Ensure that a RegExp stack is allocated. | 2992 // Ensure that a RegExp stack is allocated. |
2790 Isolate* isolate = masm->isolate(); | 2993 Isolate* isolate = masm->isolate(); |
2791 ExternalReference address_of_regexp_stack_memory_address = | 2994 ExternalReference address_of_regexp_stack_memory_address = |
2792 ExternalReference::address_of_regexp_stack_memory_address(isolate); | 2995 ExternalReference::address_of_regexp_stack_memory_address(isolate); |
2793 ExternalReference address_of_regexp_stack_memory_size = | 2996 ExternalReference address_of_regexp_stack_memory_size = |
2794 ExternalReference::address_of_regexp_stack_memory_size(isolate); | 2997 ExternalReference::address_of_regexp_stack_memory_size(isolate); |
2795 __ Load(kScratchRegister, address_of_regexp_stack_memory_size); | 2998 __ Load(kScratchRegister, address_of_regexp_stack_memory_size); |
2796 __ testq(kScratchRegister, kScratchRegister); | 2999 __ testq(kScratchRegister, kScratchRegister); |
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2948 | 3151 |
2949 // Isolates: note we add an additional parameter here (isolate pointer). | 3152 // Isolates: note we add an additional parameter here (isolate pointer). |
2950 static const int kRegExpExecuteArguments = 9; | 3153 static const int kRegExpExecuteArguments = 9; |
2951 int argument_slots_on_stack = | 3154 int argument_slots_on_stack = |
2952 masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments); | 3155 masm->ArgumentStackSlotsForCFunctionCall(kRegExpExecuteArguments); |
2953 __ EnterApiExitFrame(argument_slots_on_stack); | 3156 __ EnterApiExitFrame(argument_slots_on_stack); |
2954 | 3157 |
2955 // Argument 9: Pass current isolate address. | 3158 // Argument 9: Pass current isolate address. |
2956 __ LoadAddress(kScratchRegister, | 3159 __ LoadAddress(kScratchRegister, |
2957 ExternalReference::isolate_address(masm->isolate())); | 3160 ExternalReference::isolate_address(masm->isolate())); |
2958 __ movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize), | 3161 __s movq(Operand(rsp, (argument_slots_on_stack - 1) * kPointerSize), |
2959 kScratchRegister); | 3162 kScratchRegister); |
2960 | 3163 |
2961 // Argument 8: Indicate that this is a direct call from JavaScript. | 3164 // Argument 8: Indicate that this is a direct call from JavaScript. |
2962 __ movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), | 3165 __s movq(Operand(rsp, (argument_slots_on_stack - 2) * kPointerSize), |
2963 Immediate(1)); | 3166 Immediate(1)); |
2964 | 3167 |
2965 // Argument 7: Start (high end) of backtracking stack memory area. | 3168 // Argument 7: Start (high end) of backtracking stack memory area. |
2966 __ movq(kScratchRegister, address_of_regexp_stack_memory_address); | 3169 __ movq(kScratchRegister, address_of_regexp_stack_memory_address); |
2967 __ movq(r9, Operand(kScratchRegister, 0)); | 3170 __ movq(r9, Operand(kScratchRegister, 0)); |
2968 __ movq(kScratchRegister, address_of_regexp_stack_memory_size); | 3171 __ movq(kScratchRegister, address_of_regexp_stack_memory_size); |
2969 __ addq(r9, Operand(kScratchRegister, 0)); | 3172 __ addq(r9, Operand(kScratchRegister, 0)); |
2970 __ movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9); | 3173 __s movq(Operand(rsp, (argument_slots_on_stack - 3) * kPointerSize), r9); |
2971 | 3174 |
2972 // Argument 6: Set the number of capture registers to zero to force global | 3175 // Argument 6: Set the number of capture registers to zero to force global |
2973 // regexps to behave as non-global. This does not affect non-global regexps. | 3176 // regexps to behave as non-global. This does not affect non-global regexps. |
2974 // Argument 6 is passed in r9 on Linux and on the stack on Windows. | 3177 // Argument 6 is passed in r9 on Linux and on the stack on Windows. |
2975 #ifdef _WIN64 | 3178 #ifdef _WIN64 |
2976 __ movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize), | 3179 __s movq(Operand(rsp, (argument_slots_on_stack - 4) * kPointerSize), |
2977 Immediate(0)); | 3180 Immediate(0)); |
2978 #else | 3181 #else |
2979 __ Set(r9, 0); | 3182 __ Set(r9, 0); |
2980 #endif | 3183 #endif |
2981 | 3184 |
2982 // Argument 5: static offsets vector buffer. | 3185 // Argument 5: static offsets vector buffer. |
2983 __ LoadAddress(r8, | 3186 __ LoadAddress(r8, |
2984 ExternalReference::address_of_static_offsets_vector(isolate)); | 3187 ExternalReference::address_of_static_offsets_vector(isolate)); |
2985 // Argument 5 passed in r8 on Linux and on the stack on Windows. | 3188 // Argument 5 passed in r8 on Linux and on the stack on Windows. |
2986 #ifdef _WIN64 | 3189 #ifdef _WIN64 |
2987 __ movq(Operand(rsp, (argument_slots_on_stack - 5) * kPointerSize), r8); | 3190 __s movq(Operand(rsp, (argument_slots_on_stack - 5) * kPointerSize), r8); |
2988 #endif | 3191 #endif |
2989 | 3192 |
2990 // rdi: subject string | 3193 // rdi: subject string |
2991 // rbx: previous index | 3194 // rbx: previous index |
2992 // rcx: encoding of subject string (1 if ASCII 0 if two_byte); | 3195 // rcx: encoding of subject string (1 if ASCII 0 if two_byte); |
2993 // r11: code | 3196 // r11: code |
2994 // r14: slice offset | 3197 // r14: slice offset |
2995 // r15: original subject string | 3198 // r15: original subject string |
2996 | 3199 |
2997 // Argument 2: Previous index. | 3200 // Argument 2: Previous index. |
(...skipping 209 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3207 __ movq(rdi, FieldOperand(rdi, SlicedString::kParentOffset)); | 3410 __ movq(rdi, FieldOperand(rdi, SlicedString::kParentOffset)); |
3208 __ jmp(&check_underlying); | 3411 __ jmp(&check_underlying); |
3209 #endif // V8_INTERPRETED_REGEXP | 3412 #endif // V8_INTERPRETED_REGEXP |
3210 } | 3413 } |
3211 | 3414 |
3212 | 3415 |
3213 void RegExpConstructResultStub::Generate(MacroAssembler* masm) { | 3416 void RegExpConstructResultStub::Generate(MacroAssembler* masm) { |
3214 const int kMaxInlineLength = 100; | 3417 const int kMaxInlineLength = 100; |
3215 Label slowcase; | 3418 Label slowcase; |
3216 Label done; | 3419 Label done; |
3217 __ movq(r8, Operand(rsp, kPointerSize * 3)); | 3420 __a movq(r8, Operand(rsp, 3 * kPointerSize)); |
3218 __ JumpIfNotSmi(r8, &slowcase); | 3421 __ JumpIfNotSmi(r8, &slowcase); |
3219 __ SmiToInteger32(rbx, r8); | 3422 __ SmiToInteger32(rbx, r8); |
3220 __ cmpl(rbx, Immediate(kMaxInlineLength)); | 3423 __ cmpl(rbx, Immediate(kMaxInlineLength)); |
3221 __ j(above, &slowcase); | 3424 __ j(above, &slowcase); |
3222 // Smi-tagging is equivalent to multiplying by 2. | 3425 // Smi-tagging is equivalent to multiplying by 2. |
3223 STATIC_ASSERT(kSmiTag == 0); | 3426 STATIC_ASSERT(kSmiTag == 0); |
3224 STATIC_ASSERT(kSmiTagSize == 1); | 3427 STATIC_ASSERT(kSmiTagSize == 1); |
3225 // Allocate RegExpResult followed by FixedArray with size in rbx. | 3428 // Allocate RegExpResult followed by FixedArray with size in rbx. |
3226 // JSArray: [Map][empty properties][Elements][Length-smi][index][input] | 3429 // JSArray: [Map][empty properties][Elements][Length-smi][index][input] |
3227 // Elements: [Map][Length][..elements..] | 3430 // Elements: [Map][Length][..elements..] |
(...skipping 17 matching lines...) Expand all Loading... |
3245 | 3448 |
3246 // Set empty properties FixedArray. | 3449 // Set empty properties FixedArray. |
3247 __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex); | 3450 __ LoadRoot(kScratchRegister, Heap::kEmptyFixedArrayRootIndex); |
3248 __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister); | 3451 __ movq(FieldOperand(rax, JSObject::kPropertiesOffset), kScratchRegister); |
3249 | 3452 |
3250 // Set elements to point to FixedArray allocated right after the JSArray. | 3453 // Set elements to point to FixedArray allocated right after the JSArray. |
3251 __ lea(rcx, Operand(rax, JSRegExpResult::kSize)); | 3454 __ lea(rcx, Operand(rax, JSRegExpResult::kSize)); |
3252 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx); | 3455 __ movq(FieldOperand(rax, JSObject::kElementsOffset), rcx); |
3253 | 3456 |
3254 // Set input, index and length fields from arguments. | 3457 // Set input, index and length fields from arguments. |
3255 __ movq(r8, Operand(rsp, kPointerSize * 1)); | 3458 __a movq(r8, Operand(rsp, 1 * kPointerSize)); |
3256 __ movq(FieldOperand(rax, JSRegExpResult::kInputOffset), r8); | 3459 __ movq(FieldOperand(rax, JSRegExpResult::kInputOffset), r8); |
3257 __ movq(r8, Operand(rsp, kPointerSize * 2)); | 3460 __a movq(r8, Operand(rsp, 2 * kPointerSize)); |
3258 __ movq(FieldOperand(rax, JSRegExpResult::kIndexOffset), r8); | 3461 __ movq(FieldOperand(rax, JSRegExpResult::kIndexOffset), r8); |
3259 __ movq(r8, Operand(rsp, kPointerSize * 3)); | 3462 __a movq(r8, Operand(rsp, 3 * kPointerSize)); |
3260 __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8); | 3463 __ movq(FieldOperand(rax, JSArray::kLengthOffset), r8); |
3261 | 3464 |
3262 // Fill out the elements FixedArray. | 3465 // Fill out the elements FixedArray. |
3263 // rax: JSArray. | 3466 // rax: JSArray. |
3264 // rcx: FixedArray. | 3467 // rcx: FixedArray. |
3265 // rbx: Number of elements in array as int32. | 3468 // rbx: Number of elements in array as int32. |
3266 | 3469 |
3267 // Set map. | 3470 // Set map. |
3268 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex); | 3471 __ LoadRoot(kScratchRegister, Heap::kFixedArrayMapRootIndex); |
3269 __ movq(FieldOperand(rcx, HeapObject::kMapOffset), kScratchRegister); | 3472 __ movq(FieldOperand(rcx, HeapObject::kMapOffset), kScratchRegister); |
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3384 // but times_twice_pointer_size (multiplication by 16) scale factor | 3587 // but times_twice_pointer_size (multiplication by 16) scale factor |
3385 // is not supported by addrmode on x64 platform. | 3588 // is not supported by addrmode on x64 platform. |
3386 // So we have to premultiply entry index before lookup. | 3589 // So we have to premultiply entry index before lookup. |
3387 __ shl(hash, Immediate(kPointerSizeLog2 + 1)); | 3590 __ shl(hash, Immediate(kPointerSizeLog2 + 1)); |
3388 } | 3591 } |
3389 | 3592 |
3390 | 3593 |
3391 void NumberToStringStub::Generate(MacroAssembler* masm) { | 3594 void NumberToStringStub::Generate(MacroAssembler* masm) { |
3392 Label runtime; | 3595 Label runtime; |
3393 | 3596 |
3394 __ movq(rbx, Operand(rsp, kPointerSize)); | 3597 __a movq(rbx, Operand(rsp, 1 * kPointerSize)); |
3395 | 3598 |
3396 // Generate code to lookup number in the number string cache. | 3599 // Generate code to lookup number in the number string cache. |
3397 GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime); | 3600 GenerateLookupNumberStringCache(masm, rbx, rax, r8, r9, false, &runtime); |
3398 __ ret(1 * kPointerSize); | 3601 __ ret(1 * kPointerSize); |
3399 | 3602 |
3400 __ bind(&runtime); | 3603 __ bind(&runtime); |
3401 // Handle number to string in the runtime system if not found in the cache. | 3604 // Handle number to string in the runtime system if not found in the cache. |
3402 __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); | 3605 __ TailCallRuntime(Runtime::kNumberToStringSkipCache, 1, 1); |
3403 } | 3606 } |
3404 | 3607 |
(...skipping 270 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3675 // undefined, and are equal. | 3878 // undefined, and are equal. |
3676 __ Set(rax, EQUAL); | 3879 __ Set(rax, EQUAL); |
3677 __ bind(&return_unequal); | 3880 __ bind(&return_unequal); |
3678 // Return non-equal by returning the non-zero object pointer in rax, | 3881 // Return non-equal by returning the non-zero object pointer in rax, |
3679 // or return equal if we fell through to here. | 3882 // or return equal if we fell through to here. |
3680 __ ret(0); | 3883 __ ret(0); |
3681 __ bind(¬_both_objects); | 3884 __ bind(¬_both_objects); |
3682 } | 3885 } |
3683 | 3886 |
3684 // Push arguments below the return address to prepare jump to builtin. | 3887 // Push arguments below the return address to prepare jump to builtin. |
3685 __ pop(rcx); | 3888 __k pop(rcx); |
3686 __ push(rdx); | 3889 __ push(rdx); |
3687 __ push(rax); | 3890 __ push(rax); |
3688 | 3891 |
3689 // Figure out which native to call and setup the arguments. | 3892 // Figure out which native to call and setup the arguments. |
3690 Builtins::JavaScript builtin; | 3893 Builtins::JavaScript builtin; |
3691 if (cc == equal) { | 3894 if (cc == equal) { |
3692 builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; | 3895 builtin = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; |
3693 } else { | 3896 } else { |
3694 builtin = Builtins::COMPARE; | 3897 builtin = Builtins::COMPARE; |
3695 __ Push(Smi::FromInt(NegativeComparisonResult(cc))); | 3898 __ Push(Smi::FromInt(NegativeComparisonResult(cc))); |
3696 } | 3899 } |
3697 | 3900 |
3698 // Restore return address on the stack. | 3901 // Restore return address on the stack. |
3699 __ push(rcx); | 3902 __k push(rcx); |
3700 | 3903 |
3701 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) | 3904 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) |
3702 // tagged as a small integer. | 3905 // tagged as a small integer. |
3703 __ InvokeBuiltin(builtin, JUMP_FUNCTION); | 3906 __ InvokeBuiltin(builtin, JUMP_FUNCTION); |
3704 | 3907 |
3705 __ bind(&miss); | 3908 __ bind(&miss); |
3706 GenerateMiss(masm); | 3909 GenerateMiss(masm); |
3707 } | 3910 } |
3708 | 3911 |
3709 | 3912 |
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3799 Isolate* isolate = masm->isolate(); | 4002 Isolate* isolate = masm->isolate(); |
3800 Label slow, non_function; | 4003 Label slow, non_function; |
3801 | 4004 |
3802 // The receiver might implicitly be the global object. This is | 4005 // The receiver might implicitly be the global object. This is |
3803 // indicated by passing the hole as the receiver to the call | 4006 // indicated by passing the hole as the receiver to the call |
3804 // function stub. | 4007 // function stub. |
3805 if (ReceiverMightBeImplicit()) { | 4008 if (ReceiverMightBeImplicit()) { |
3806 Label call; | 4009 Label call; |
3807 // Get the receiver from the stack. | 4010 // Get the receiver from the stack. |
3808 // +1 ~ return address | 4011 // +1 ~ return address |
3809 __ movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize)); | 4012 __a movq(rax, Operand(rsp, (argc_ + 1) * kPointerSize)); |
3810 // Call as function is indicated with the hole. | 4013 // Call as function is indicated with the hole. |
3811 __ CompareRoot(rax, Heap::kTheHoleValueRootIndex); | 4014 __ CompareRoot(rax, Heap::kTheHoleValueRootIndex); |
3812 __ j(not_equal, &call, Label::kNear); | 4015 __ j(not_equal, &call, Label::kNear); |
3813 // Patch the receiver on the stack with the global receiver object. | 4016 // Patch the receiver on the stack with the global receiver object. |
3814 __ movq(rcx, GlobalObjectOperand()); | 4017 __ movq(rcx, GlobalObjectOperand()); |
3815 __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset)); | 4018 __ movq(rcx, FieldOperand(rcx, GlobalObject::kGlobalReceiverOffset)); |
3816 __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rcx); | 4019 __a movq(Operand(rsp, (argc_ + 1) * kPointerSize), rcx); |
3817 __ bind(&call); | 4020 __ bind(&call); |
3818 } | 4021 } |
3819 | 4022 |
3820 // Check that the function really is a JavaScript function. | 4023 // Check that the function really is a JavaScript function. |
3821 __ JumpIfSmi(rdi, &non_function); | 4024 __ JumpIfSmi(rdi, &non_function); |
3822 // Goto slow case if we do not have a function. | 4025 // Goto slow case if we do not have a function. |
3823 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); | 4026 __ CmpObjectType(rdi, JS_FUNCTION_TYPE, rcx); |
3824 __ j(not_equal, &slow); | 4027 __ j(not_equal, &slow); |
3825 | 4028 |
3826 if (RecordCallTarget()) { | 4029 if (RecordCallTarget()) { |
(...skipping 25 matching lines...) Expand all Loading... |
3852 if (RecordCallTarget()) { | 4055 if (RecordCallTarget()) { |
3853 // If there is a call target cache, mark it megamorphic in the | 4056 // If there is a call target cache, mark it megamorphic in the |
3854 // non-function case. MegamorphicSentinel is an immortal immovable | 4057 // non-function case. MegamorphicSentinel is an immortal immovable |
3855 // object (undefined) so no write barrier is needed. | 4058 // object (undefined) so no write barrier is needed. |
3856 __ Move(FieldOperand(rbx, Cell::kValueOffset), | 4059 __ Move(FieldOperand(rbx, Cell::kValueOffset), |
3857 TypeFeedbackCells::MegamorphicSentinel(isolate)); | 4060 TypeFeedbackCells::MegamorphicSentinel(isolate)); |
3858 } | 4061 } |
3859 // Check for function proxy. | 4062 // Check for function proxy. |
3860 __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE); | 4063 __ CmpInstanceType(rcx, JS_FUNCTION_PROXY_TYPE); |
3861 __ j(not_equal, &non_function); | 4064 __ j(not_equal, &non_function); |
3862 __ pop(rcx); | 4065 __k pop(rcx); |
3863 __ push(rdi); // put proxy as additional argument under return address | 4066 __ push(rdi); // put proxy as additional argument under return address |
3864 __ push(rcx); | 4067 __k push(rcx); |
3865 __ Set(rax, argc_ + 1); | 4068 __ Set(rax, argc_ + 1); |
3866 __ Set(rbx, 0); | 4069 __ Set(rbx, 0); |
3867 __ SetCallKind(rcx, CALL_AS_METHOD); | 4070 __ SetCallKind(rcx, CALL_AS_METHOD); |
3868 __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY); | 4071 __ GetBuiltinEntry(rdx, Builtins::CALL_FUNCTION_PROXY); |
3869 { | 4072 { |
3870 Handle<Code> adaptor = | 4073 Handle<Code> adaptor = |
3871 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); | 4074 masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); |
3872 __ jmp(adaptor, RelocInfo::CODE_TARGET); | 4075 __ jmp(adaptor, RelocInfo::CODE_TARGET); |
3873 } | 4076 } |
3874 | 4077 |
3875 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead | 4078 // CALL_NON_FUNCTION expects the non-function callee as receiver (instead |
3876 // of the original receiver from the call site). | 4079 // of the original receiver from the call site). |
3877 __ bind(&non_function); | 4080 __ bind(&non_function); |
3878 __ movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi); | 4081 __a movq(Operand(rsp, (argc_ + 1) * kPointerSize), rdi); |
3879 __ Set(rax, argc_); | 4082 __ Set(rax, argc_); |
3880 __ Set(rbx, 0); | 4083 __ Set(rbx, 0); |
3881 __ SetCallKind(rcx, CALL_AS_METHOD); | 4084 __ SetCallKind(rcx, CALL_AS_METHOD); |
3882 __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION); | 4085 __ GetBuiltinEntry(rdx, Builtins::CALL_NON_FUNCTION); |
3883 Handle<Code> adaptor = | 4086 Handle<Code> adaptor = |
3884 Isolate::Current()->builtins()->ArgumentsAdaptorTrampoline(); | 4087 Isolate::Current()->builtins()->ArgumentsAdaptorTrampoline(); |
3885 __ Jump(adaptor, RelocInfo::CODE_TARGET); | 4088 __ Jump(adaptor, RelocInfo::CODE_TARGET); |
3886 } | 4089 } |
3887 | 4090 |
3888 | 4091 |
(...skipping 171 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4060 // Check for failure result. | 4263 // Check for failure result. |
4061 Label failure_returned; | 4264 Label failure_returned; |
4062 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); | 4265 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); |
4063 #ifdef _WIN64 | 4266 #ifdef _WIN64 |
4064 // If return value is on the stack, pop it to registers. | 4267 // If return value is on the stack, pop it to registers. |
4065 if (result_size_ > 1) { | 4268 if (result_size_ > 1) { |
4066 ASSERT_EQ(2, result_size_); | 4269 ASSERT_EQ(2, result_size_); |
4067 // Read result values stored on stack. Result is stored | 4270 // Read result values stored on stack. Result is stored |
4068 // above the four argument mirror slots and the two | 4271 // above the four argument mirror slots and the two |
4069 // Arguments object slots. | 4272 // Arguments object slots. |
4070 __ movq(rax, Operand(rsp, 6 * kPointerSize)); | 4273 __s movq(rax, Operand(rsp, 6 * kPointerSize)); |
4071 __ movq(rdx, Operand(rsp, 7 * kPointerSize)); | 4274 __s movq(rdx, Operand(rsp, 7 * kPointerSize)); |
4072 } | 4275 } |
4073 #endif | 4276 #endif |
4074 __ lea(rcx, Operand(rax, 1)); | 4277 __ lea(rcx, Operand(rax, 1)); |
4075 // Lower 2 bits of rcx are 0 iff rax has failure tag. | 4278 // Lower 2 bits of rcx are 0 iff rax has failure tag. |
4076 __ testl(rcx, Immediate(kFailureTagMask)); | 4279 __ testl(rcx, Immediate(kFailureTagMask)); |
4077 __ j(zero, &failure_returned); | 4280 __ j(zero, &failure_returned); |
4078 | 4281 |
4079 // Exit the JavaScript to C++ exit frame. | 4282 // Exit the JavaScript to C++ exit frame. |
4080 __ LeaveExitFrame(save_doubles_); | 4283 __ LeaveExitFrame(save_doubles_); |
4081 __ ret(0); | 4284 __ ret(0); |
(...skipping 88 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4170 // Do space-specific GC and retry runtime call. | 4373 // Do space-specific GC and retry runtime call. |
4171 GenerateCore(masm, | 4374 GenerateCore(masm, |
4172 &throw_normal_exception, | 4375 &throw_normal_exception, |
4173 &throw_termination_exception, | 4376 &throw_termination_exception, |
4174 &throw_out_of_memory_exception, | 4377 &throw_out_of_memory_exception, |
4175 true, | 4378 true, |
4176 false); | 4379 false); |
4177 | 4380 |
4178 // Do full GC and retry runtime call one final time. | 4381 // Do full GC and retry runtime call one final time. |
4179 Failure* failure = Failure::InternalError(); | 4382 Failure* failure = Failure::InternalError(); |
4180 __ movq(rax, failure, RelocInfo::NONE64); | 4383 __n movq(rax, failure, RelocInfo::NONE64); |
4181 GenerateCore(masm, | 4384 GenerateCore(masm, |
4182 &throw_normal_exception, | 4385 &throw_normal_exception, |
4183 &throw_termination_exception, | 4386 &throw_termination_exception, |
4184 &throw_out_of_memory_exception, | 4387 &throw_out_of_memory_exception, |
4185 true, | 4388 true, |
4186 true); | 4389 true); |
4187 | 4390 |
4188 __ bind(&throw_out_of_memory_exception); | 4391 __ bind(&throw_out_of_memory_exception); |
4189 // Set external caught exception to false. | 4392 // Set external caught exception to false. |
4190 Isolate* isolate = masm->isolate(); | 4393 Isolate* isolate = masm->isolate(); |
4191 ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress, | 4394 ExternalReference external_caught(Isolate::kExternalCaughtExceptionAddress, |
4192 isolate); | 4395 isolate); |
4193 __ Set(rax, static_cast<int64_t>(false)); | 4396 __ Set(rax, static_cast<int64_t>(false)); |
4194 __ Store(external_caught, rax); | 4397 __ Store(external_caught, rax); |
4195 | 4398 |
4196 // Set pending exception and rax to out of memory exception. | 4399 // Set pending exception and rax to out of memory exception. |
4197 ExternalReference pending_exception(Isolate::kPendingExceptionAddress, | 4400 ExternalReference pending_exception(Isolate::kPendingExceptionAddress, |
4198 isolate); | 4401 isolate); |
4199 Label already_have_failure; | 4402 Label already_have_failure; |
4200 JumpIfOOM(masm, rax, kScratchRegister, &already_have_failure); | 4403 JumpIfOOM(masm, rax, kScratchRegister, &already_have_failure); |
4201 __ movq(rax, Failure::OutOfMemoryException(0x1), RelocInfo::NONE64); | 4404 __n movq(rax, Failure::OutOfMemoryException(0x1), RelocInfo::NONE64); |
4202 __ bind(&already_have_failure); | 4405 __ bind(&already_have_failure); |
4203 __ Store(pending_exception, rax); | 4406 __ Store(pending_exception, rax); |
4204 // Fall through to the next label. | 4407 // Fall through to the next label. |
4205 | 4408 |
4206 __ bind(&throw_termination_exception); | 4409 __ bind(&throw_termination_exception); |
4207 __ ThrowUncatchable(rax); | 4410 __ ThrowUncatchable(rax); |
4208 | 4411 |
4209 __ bind(&throw_normal_exception); | 4412 __ bind(&throw_normal_exception); |
4210 __ Throw(rax); | 4413 __ Throw(rax); |
4211 } | 4414 } |
4212 | 4415 |
4213 | 4416 |
4214 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { | 4417 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { |
4215 Label invoke, handler_entry, exit; | 4418 Label invoke, handler_entry, exit; |
4216 Label not_outermost_js, not_outermost_js_2; | 4419 Label not_outermost_js, not_outermost_js_2; |
4217 | 4420 |
4218 ProfileEntryHookStub::MaybeCallEntryHook(masm); | 4421 ProfileEntryHookStub::MaybeCallEntryHook(masm); |
4219 | 4422 |
4220 { // NOLINT. Scope block confuses linter. | 4423 { // NOLINT. Scope block confuses linter. |
4221 MacroAssembler::NoRootArrayScope uninitialized_root_register(masm); | 4424 MacroAssembler::NoRootArrayScope uninitialized_root_register(masm); |
4222 // Set up frame. | 4425 // Set up frame. |
4223 __ push(rbp); | 4426 __ push(rbp); |
4224 __ movq(rbp, rsp); | 4427 __ movq(rbp, rsp); |
4225 | 4428 |
4226 // Push the stack frame type marker twice. | 4429 // Push the stack frame type marker twice. |
4227 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; | 4430 int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; |
4228 // Scratch register is neither callee-save, nor an argument register on any | 4431 // Scratch register is neither callee-save, nor an argument register on any |
4229 // platform. It's free to use at this point. | 4432 // platform. It's free to use at this point. |
4230 // Cannot use smi-register for loading yet. | 4433 // Cannot use smi-register for loading yet. |
| 4434 #ifndef V8_TARGET_ARCH_X32 |
4231 __ movq(kScratchRegister, | 4435 __ movq(kScratchRegister, |
4232 reinterpret_cast<uint64_t>(Smi::FromInt(marker)), | 4436 reinterpret_cast<uint64_t>(Smi::FromInt(marker)), |
4233 RelocInfo::NONE64); | 4437 RelocInfo::NONE64); |
| 4438 #else |
| 4439 __ movl(kScratchRegister, |
| 4440 reinterpret_cast<uint32_t>(Smi::FromInt(marker)), |
| 4441 RelocInfo::NONE32); |
| 4442 #endif |
4234 __ push(kScratchRegister); // context slot | 4443 __ push(kScratchRegister); // context slot |
4235 __ push(kScratchRegister); // function slot | 4444 __ push(kScratchRegister); // function slot |
4236 // Save callee-saved registers (X64/Win64 calling conventions). | 4445 // Save callee-saved registers (X64/Win64 calling conventions). |
4237 __ push(r12); | 4446 __k push(r12); |
4238 __ push(r13); | 4447 __k push(r13); |
4239 __ push(r14); | 4448 __k push(r14); |
4240 __ push(r15); | 4449 __k push(r15); |
4241 #ifdef _WIN64 | 4450 #ifdef _WIN64 |
4242 __ push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI. | 4451 __k push(rdi); // Only callee save in Win64 ABI, argument in AMD64 ABI. |
4243 __ push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI. | 4452 __k push(rsi); // Only callee save in Win64 ABI, argument in AMD64 ABI. |
4244 #endif | 4453 #endif |
4245 __ push(rbx); | 4454 __k push(rbx); |
4246 | 4455 |
4247 #ifdef _WIN64 | 4456 #ifdef _WIN64 |
4248 // On Win64 XMM6-XMM15 are callee-save | 4457 // On Win64 XMM6-XMM15 are callee-save |
4249 __ subq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize)); | 4458 __ subq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize)); |
4250 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6); | 4459 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 0), xmm6); |
4251 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7); | 4460 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 1), xmm7); |
4252 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8); | 4461 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 2), xmm8); |
4253 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3), xmm9); | 4462 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3), xmm9); |
4254 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4), xmm10); | 4463 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4), xmm10); |
4255 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5), xmm11); | 4464 __ movdqu(Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5), xmm11); |
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4291 // Jump to a faked try block that does the invoke, with a faked catch | 4500 // Jump to a faked try block that does the invoke, with a faked catch |
4292 // block that sets the pending exception. | 4501 // block that sets the pending exception. |
4293 __ jmp(&invoke); | 4502 __ jmp(&invoke); |
4294 __ bind(&handler_entry); | 4503 __ bind(&handler_entry); |
4295 handler_offset_ = handler_entry.pos(); | 4504 handler_offset_ = handler_entry.pos(); |
4296 // Caught exception: Store result (exception) in the pending exception | 4505 // Caught exception: Store result (exception) in the pending exception |
4297 // field in the JSEnv and return a failure sentinel. | 4506 // field in the JSEnv and return a failure sentinel. |
4298 ExternalReference pending_exception(Isolate::kPendingExceptionAddress, | 4507 ExternalReference pending_exception(Isolate::kPendingExceptionAddress, |
4299 isolate); | 4508 isolate); |
4300 __ Store(pending_exception, rax); | 4509 __ Store(pending_exception, rax); |
4301 __ movq(rax, Failure::Exception(), RelocInfo::NONE64); | 4510 __n movq(rax, Failure::Exception(), RelocInfo::NONE64); |
4302 __ jmp(&exit); | 4511 __ jmp(&exit); |
4303 | 4512 |
4304 // Invoke: Link this frame into the handler chain. There's only one | 4513 // Invoke: Link this frame into the handler chain. There's only one |
4305 // handler block in this code object, so its index is 0. | 4514 // handler block in this code object, so its index is 0. |
4306 __ bind(&invoke); | 4515 __ bind(&invoke); |
4307 __ PushTryHandler(StackHandler::JS_ENTRY, 0); | 4516 __ PushTryHandler(StackHandler::JS_ENTRY, 0); |
4308 | 4517 |
4309 // Clear any pending exceptions. | 4518 // Clear any pending exceptions. |
4310 __ LoadRoot(rax, Heap::kTheHoleValueRootIndex); | 4519 __ LoadRoot(rax, Heap::kTheHoleValueRootIndex); |
4311 __ Store(pending_exception, rax); | 4520 __ Store(pending_exception, rax); |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4355 __ movdqu(xmm9, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3)); | 4564 __ movdqu(xmm9, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 3)); |
4356 __ movdqu(xmm10, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4)); | 4565 __ movdqu(xmm10, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 4)); |
4357 __ movdqu(xmm11, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5)); | 4566 __ movdqu(xmm11, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 5)); |
4358 __ movdqu(xmm12, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6)); | 4567 __ movdqu(xmm12, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 6)); |
4359 __ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7)); | 4568 __ movdqu(xmm13, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 7)); |
4360 __ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8)); | 4569 __ movdqu(xmm14, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 8)); |
4361 __ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9)); | 4570 __ movdqu(xmm15, Operand(rsp, EntryFrameConstants::kXMMRegisterSize * 9)); |
4362 __ addq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize)); | 4571 __ addq(rsp, Immediate(EntryFrameConstants::kXMMRegistersBlockSize)); |
4363 #endif | 4572 #endif |
4364 | 4573 |
4365 __ pop(rbx); | 4574 __k pop(rbx); |
4366 #ifdef _WIN64 | 4575 #ifdef _WIN64 |
4367 // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI. | 4576 // Callee save on in Win64 ABI, arguments/volatile in AMD64 ABI. |
4368 __ pop(rsi); | 4577 __k pop(rsi); |
4369 __ pop(rdi); | 4578 __k pop(rdi); |
4370 #endif | 4579 #endif |
4371 __ pop(r15); | 4580 __k pop(r15); |
4372 __ pop(r14); | 4581 __k pop(r14); |
4373 __ pop(r13); | 4582 __k pop(r13); |
4374 __ pop(r12); | 4583 __k pop(r12); |
4375 __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers | 4584 __ addq(rsp, Immediate(2 * kPointerSize)); // remove markers |
4376 | 4585 |
4377 // Restore frame pointer and return. | 4586 // Restore frame pointer and return. |
4378 __ pop(rbp); | 4587 __ pop(rbp); |
4379 __ ret(0); | 4588 __ ret(0); |
4380 } | 4589 } |
4381 | 4590 |
4382 | 4591 |
4383 void InstanceofStub::Generate(MacroAssembler* masm) { | 4592 void InstanceofStub::Generate(MacroAssembler* masm) { |
4384 // Implements "value instanceof function" operator. | 4593 // Implements "value instanceof function" operator. |
4385 // Expected input state with no inline cache: | 4594 // Expected input state with no inline cache: |
4386 // rsp[0] : return address | 4595 // rsp[0] : return address |
4387 // rsp[8] : function pointer | 4596 // rsp[8] : function pointer |
4388 // rsp[16] : value | 4597 // rsp[16] : value |
4389 // Expected input state with an inline one-element cache: | 4598 // Expected input state with an inline one-element cache: |
4390 // rsp[0] : return address | 4599 // rsp[0] : return address |
4391 // rsp[8] : offset from return address to location of inline cache | 4600 // rsp[8] : offset from return address to location of inline cache |
4392 // rsp[16] : function pointer | 4601 // rsp[16] : function pointer |
4393 // rsp[24] : value | 4602 // rsp[24] : value |
4394 // Returns a bitwise zero to indicate that the value | 4603 // Returns a bitwise zero to indicate that the value |
4395 // is and instance of the function and anything else to | 4604 // is and instance of the function and anything else to |
4396 // indicate that the value is not an instance. | 4605 // indicate that the value is not an instance. |
4397 | 4606 |
4398 static const int kOffsetToMapCheckValue = 2; | 4607 static const int kOffsetToMapCheckValue = 2; |
| 4608 #ifndef V8_TARGET_ARCH_X32 |
4399 static const int kOffsetToResultValue = 18; | 4609 static const int kOffsetToResultValue = 18; |
4400 // The last 4 bytes of the instruction sequence | 4610 // The last 4 bytes of the instruction sequence |
4401 // movq(rdi, FieldOperand(rax, HeapObject::kMapOffset)) | 4611 // movq(rdi, FieldOperand(rax, HeapObject::kMapOffset)) |
4402 // Move(kScratchRegister, Factory::the_hole_value()) | 4612 // Move(kScratchRegister, Factory::the_hole_value()) |
4403 // in front of the hole value address. | 4613 // in front of the hole value address. |
4404 static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78; | 4614 static const unsigned int kWordBeforeMapCheckValue = 0xBA49FF78; |
4405 // The last 4 bytes of the instruction sequence | 4615 // The last 4 bytes of the instruction sequence |
4406 // __ j(not_equal, &cache_miss); | 4616 // __ j(not_equal, &cache_miss); |
4407 // __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex); | 4617 // __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex); |
4408 // before the offset of the hole value in the root array. | 4618 // before the offset of the hole value in the root array. |
4409 static const unsigned int kWordBeforeResultValue = 0x458B4909; | 4619 static const unsigned int kWordBeforeResultValue = 0x458B4909; |
| 4620 #else |
| 4621 static const int kOffsetToResultValue = 14; |
| 4622 // The last 4 bytes of the instruction sequence |
| 4623 // movl(rdi, FieldOperand(rax, HeapObject::kMapOffset)) |
| 4624 // Move(kScratchRegister, Factory::the_hole_value()) |
| 4625 // in front of the hole value address. |
| 4626 static const unsigned int kWordBeforeMapCheckValue = 0xBA41FF78; |
| 4627 // The last 4 bytes of the instruction sequence |
| 4628 // __ j(not_equal, &cache_miss); |
| 4629 // __ LoadRoot(ToRegister(instr->result()), Heap::kTheHoleValueRootIndex); |
| 4630 // before the offset of the hole value in the root array. |
| 4631 static const unsigned int kWordBeforeResultValue = 0x458B4109; |
| 4632 #endif |
4410 // Only the inline check flag is supported on X64. | 4633 // Only the inline check flag is supported on X64. |
4411 ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck()); | 4634 ASSERT(flags_ == kNoFlags || HasCallSiteInlineCheck()); |
4412 int extra_stack_space = HasCallSiteInlineCheck() ? kPointerSize : 0; | 4635 int extra_stack_space = HasCallSiteInlineCheck() ? kPointerSize : 0; |
4413 | 4636 |
4414 // Get the object - go slow case if it's a smi. | 4637 // Get the object - go slow case if it's a smi. |
4415 Label slow; | 4638 Label slow; |
4416 | 4639 |
| 4640 #ifndef V8_TARGET_ARCH_X32 |
4417 __ movq(rax, Operand(rsp, 2 * kPointerSize + extra_stack_space)); | 4641 __ movq(rax, Operand(rsp, 2 * kPointerSize + extra_stack_space)); |
| 4642 #else |
| 4643 __ movl(rax, Operand(rsp, 1 * kHWRegSize + 1 * kPointerSize + |
| 4644 extra_stack_space)); |
| 4645 #endif |
4418 __ JumpIfSmi(rax, &slow); | 4646 __ JumpIfSmi(rax, &slow); |
4419 | 4647 |
4420 // Check that the left hand is a JS object. Leave its map in rax. | 4648 // Check that the left hand is a JS object. Leave its map in rax. |
4421 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax); | 4649 __ CmpObjectType(rax, FIRST_SPEC_OBJECT_TYPE, rax); |
4422 __ j(below, &slow); | 4650 __ j(below, &slow); |
4423 __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE); | 4651 __ CmpInstanceType(rax, LAST_SPEC_OBJECT_TYPE); |
4424 __ j(above, &slow); | 4652 __ j(above, &slow); |
4425 | 4653 |
4426 // Get the prototype of the function. | 4654 // Get the prototype of the function. |
4427 __ movq(rdx, Operand(rsp, 1 * kPointerSize + extra_stack_space)); | 4655 __a movq(rdx, Operand(rsp, 1 * kPointerSize + extra_stack_space)); |
4428 // rdx is function, rax is map. | 4656 // rdx is function, rax is map. |
4429 | 4657 |
4430 // If there is a call site cache don't look in the global cache, but do the | 4658 // If there is a call site cache don't look in the global cache, but do the |
4431 // real lookup and update the call site cache. | 4659 // real lookup and update the call site cache. |
4432 if (!HasCallSiteInlineCheck()) { | 4660 if (!HasCallSiteInlineCheck()) { |
4433 // Look up the function and the map in the instanceof cache. | 4661 // Look up the function and the map in the instanceof cache. |
4434 Label miss; | 4662 Label miss; |
4435 __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex); | 4663 __ CompareRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex); |
4436 __ j(not_equal, &miss, Label::kNear); | 4664 __ j(not_equal, &miss, Label::kNear); |
4437 __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex); | 4665 __ CompareRoot(rax, Heap::kInstanceofCacheMapRootIndex); |
(...skipping 14 matching lines...) Expand all Loading... |
4452 | 4680 |
4453 // Register mapping: | 4681 // Register mapping: |
4454 // rax is object map. | 4682 // rax is object map. |
4455 // rdx is function. | 4683 // rdx is function. |
4456 // rbx is function prototype. | 4684 // rbx is function prototype. |
4457 if (!HasCallSiteInlineCheck()) { | 4685 if (!HasCallSiteInlineCheck()) { |
4458 __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex); | 4686 __ StoreRoot(rdx, Heap::kInstanceofCacheFunctionRootIndex); |
4459 __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex); | 4687 __ StoreRoot(rax, Heap::kInstanceofCacheMapRootIndex); |
4460 } else { | 4688 } else { |
4461 // Get return address and delta to inlined map check. | 4689 // Get return address and delta to inlined map check. |
4462 __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize)); | 4690 __q movq(kScratchRegister, Operand(rsp, 0 * kPointerSize)); |
4463 __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); | 4691 __a subq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); |
4464 if (FLAG_debug_code) { | 4692 if (FLAG_debug_code) { |
4465 __ movl(rdi, Immediate(kWordBeforeMapCheckValue)); | 4693 __ movl(rdi, Immediate(kWordBeforeMapCheckValue)); |
4466 __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi); | 4694 __ cmpl(Operand(kScratchRegister, kOffsetToMapCheckValue - 4), rdi); |
4467 __ Assert(equal, "InstanceofStub unexpected call site cache (check)."); | 4695 __ Assert(equal, "InstanceofStub unexpected call site cache (check)."); |
4468 } | 4696 } |
4469 __ movq(kScratchRegister, | 4697 __ movq(kScratchRegister, |
4470 Operand(kScratchRegister, kOffsetToMapCheckValue)); | 4698 Operand(kScratchRegister, kOffsetToMapCheckValue)); |
4471 __ movq(Operand(kScratchRegister, 0), rax); | 4699 __ movq(Operand(kScratchRegister, 0), rax); |
4472 } | 4700 } |
4473 | 4701 |
(...skipping 19 matching lines...) Expand all Loading... |
4493 // Store bitwise zero in the cache. This is a Smi in GC terms. | 4721 // Store bitwise zero in the cache. This is a Smi in GC terms. |
4494 STATIC_ASSERT(kSmiTag == 0); | 4722 STATIC_ASSERT(kSmiTag == 0); |
4495 __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); | 4723 __ StoreRoot(rax, Heap::kInstanceofCacheAnswerRootIndex); |
4496 } else { | 4724 } else { |
4497 // Store offset of true in the root array at the inline check site. | 4725 // Store offset of true in the root array at the inline check site. |
4498 int true_offset = 0x100 + | 4726 int true_offset = 0x100 + |
4499 (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias; | 4727 (Heap::kTrueValueRootIndex << kPointerSizeLog2) - kRootRegisterBias; |
4500 // Assert it is a 1-byte signed value. | 4728 // Assert it is a 1-byte signed value. |
4501 ASSERT(true_offset >= 0 && true_offset < 0x100); | 4729 ASSERT(true_offset >= 0 && true_offset < 0x100); |
4502 __ movl(rax, Immediate(true_offset)); | 4730 __ movl(rax, Immediate(true_offset)); |
4503 __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize)); | 4731 __q movq(kScratchRegister, Operand(rsp, 0 * kPointerSize)); |
4504 __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); | 4732 __a subq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); |
4505 __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax); | 4733 __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax); |
4506 if (FLAG_debug_code) { | 4734 if (FLAG_debug_code) { |
4507 __ movl(rax, Immediate(kWordBeforeResultValue)); | 4735 __ movl(rax, Immediate(kWordBeforeResultValue)); |
4508 __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax); | 4736 __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax); |
4509 __ Assert(equal, "InstanceofStub unexpected call site cache (mov)."); | 4737 __ Assert(equal, "InstanceofStub unexpected call site cache (mov)."); |
4510 } | 4738 } |
4511 __ Set(rax, 0); | 4739 __ Set(rax, 0); |
4512 } | 4740 } |
4513 __ ret(2 * kPointerSize + extra_stack_space); | 4741 __ ret(2 * kPointerSize + extra_stack_space); |
4514 | 4742 |
4515 __ bind(&is_not_instance); | 4743 __ bind(&is_not_instance); |
4516 if (!HasCallSiteInlineCheck()) { | 4744 if (!HasCallSiteInlineCheck()) { |
4517 // We have to store a non-zero value in the cache. | 4745 // We have to store a non-zero value in the cache. |
4518 __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex); | 4746 __ StoreRoot(kScratchRegister, Heap::kInstanceofCacheAnswerRootIndex); |
4519 } else { | 4747 } else { |
4520 // Store offset of false in the root array at the inline check site. | 4748 // Store offset of false in the root array at the inline check site. |
4521 int false_offset = 0x100 + | 4749 int false_offset = 0x100 + |
4522 (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias; | 4750 (Heap::kFalseValueRootIndex << kPointerSizeLog2) - kRootRegisterBias; |
4523 // Assert it is a 1-byte signed value. | 4751 // Assert it is a 1-byte signed value. |
4524 ASSERT(false_offset >= 0 && false_offset < 0x100); | 4752 ASSERT(false_offset >= 0 && false_offset < 0x100); |
4525 __ movl(rax, Immediate(false_offset)); | 4753 __ movl(rax, Immediate(false_offset)); |
4526 __ movq(kScratchRegister, Operand(rsp, 0 * kPointerSize)); | 4754 __q movq(kScratchRegister, Operand(rsp, 0 * kPointerSize)); |
4527 __ subq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); | 4755 __a subq(kScratchRegister, Operand(rsp, 1 * kPointerSize)); |
4528 __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax); | 4756 __ movb(Operand(kScratchRegister, kOffsetToResultValue), rax); |
4529 if (FLAG_debug_code) { | 4757 if (FLAG_debug_code) { |
4530 __ movl(rax, Immediate(kWordBeforeResultValue)); | 4758 __ movl(rax, Immediate(kWordBeforeResultValue)); |
4531 __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax); | 4759 __ cmpl(Operand(kScratchRegister, kOffsetToResultValue - 4), rax); |
4532 __ Assert(equal, "InstanceofStub unexpected call site cache (mov)"); | 4760 __ Assert(equal, "InstanceofStub unexpected call site cache (mov)"); |
4533 } | 4761 } |
4534 } | 4762 } |
4535 __ ret(2 * kPointerSize + extra_stack_space); | 4763 __ ret(2 * kPointerSize + extra_stack_space); |
4536 | 4764 |
4537 // Slow-case: Go through the JavaScript implementation. | 4765 // Slow-case: Go through the JavaScript implementation. |
4538 __ bind(&slow); | 4766 __ bind(&slow); |
4539 if (HasCallSiteInlineCheck()) { | 4767 if (HasCallSiteInlineCheck()) { |
4540 // Remove extra value from the stack. | 4768 // Remove extra value from the stack. |
4541 __ pop(rcx); | 4769 __k pop(rcx); |
4542 __ pop(rax); | 4770 __ pop(rax); |
4543 __ push(rcx); | 4771 __k push(rcx); |
4544 } | 4772 } |
4545 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); | 4773 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); |
4546 } | 4774 } |
4547 | 4775 |
4548 | 4776 |
4549 // Passing arguments in registers is not supported. | 4777 // Passing arguments in registers is not supported. |
4550 Register InstanceofStub::left() { return no_reg; } | 4778 Register InstanceofStub::left() { return no_reg; } |
4551 | 4779 |
4552 | 4780 |
4553 Register InstanceofStub::right() { return no_reg; } | 4781 Register InstanceofStub::right() { return no_reg; } |
(...skipping 129 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4683 | 4911 |
4684 __ Abort("Unexpected fallthrough from CharFromCode slow case"); | 4912 __ Abort("Unexpected fallthrough from CharFromCode slow case"); |
4685 } | 4913 } |
4686 | 4914 |
4687 | 4915 |
4688 void StringAddStub::Generate(MacroAssembler* masm) { | 4916 void StringAddStub::Generate(MacroAssembler* masm) { |
4689 Label call_runtime, call_builtin; | 4917 Label call_runtime, call_builtin; |
4690 Builtins::JavaScript builtin_id = Builtins::ADD; | 4918 Builtins::JavaScript builtin_id = Builtins::ADD; |
4691 | 4919 |
4692 // Load the two arguments. | 4920 // Load the two arguments. |
4693 __ movq(rax, Operand(rsp, 2 * kPointerSize)); // First argument (left). | 4921 // First argument (left). |
4694 __ movq(rdx, Operand(rsp, 1 * kPointerSize)); // Second argument (right). | 4922 __a movq(rax, Operand(rsp, 2 * kPointerSize)); |
| 4923 // Second argument (right). |
| 4924 __a movq(rdx, Operand(rsp, 1 * kPointerSize)); |
4695 | 4925 |
4696 // Make sure that both arguments are strings if not known in advance. | 4926 // Make sure that both arguments are strings if not known in advance. |
4697 if ((flags_ & NO_STRING_ADD_FLAGS) != 0) { | 4927 if ((flags_ & NO_STRING_ADD_FLAGS) != 0) { |
4698 __ JumpIfSmi(rax, &call_runtime); | 4928 __ JumpIfSmi(rax, &call_runtime); |
4699 __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8); | 4929 __ CmpObjectType(rax, FIRST_NONSTRING_TYPE, r8); |
4700 __ j(above_equal, &call_runtime); | 4930 __ j(above_equal, &call_runtime); |
4701 | 4931 |
4702 // First argument is a a string, test second. | 4932 // First argument is a a string, test second. |
4703 __ JumpIfSmi(rdx, &call_runtime); | 4933 __ JumpIfSmi(rdx, &call_runtime); |
4704 __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9); | 4934 __ CmpObjectType(rdx, FIRST_NONSTRING_TYPE, r9); |
4705 __ j(above_equal, &call_runtime); | 4935 __ j(above_equal, &call_runtime); |
4706 } else { | 4936 } else { |
4707 // Here at least one of the arguments is definitely a string. | 4937 // Here at least one of the arguments is definitely a string. |
4708 // We convert the one that is not known to be a string. | 4938 // We convert the one that is not known to be a string. |
4709 if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) { | 4939 if ((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) == 0) { |
4710 ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0); | 4940 ASSERT((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) != 0); |
| 4941 #ifndef V8_TARGET_ARCH_X32 |
4711 GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi, | 4942 GenerateConvertArgument(masm, 2 * kPointerSize, rax, rbx, rcx, rdi, |
4712 &call_builtin); | 4943 &call_builtin); |
| 4944 #else |
| 4945 GenerateConvertArgument(masm, 1 * kHWRegSize + 1 * kPointerSize, rax, |
| 4946 rbx, rcx, rdi, &call_builtin); |
| 4947 #endif |
4713 builtin_id = Builtins::STRING_ADD_RIGHT; | 4948 builtin_id = Builtins::STRING_ADD_RIGHT; |
4714 } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) { | 4949 } else if ((flags_ & NO_STRING_CHECK_RIGHT_IN_STUB) == 0) { |
4715 ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0); | 4950 ASSERT((flags_ & NO_STRING_CHECK_LEFT_IN_STUB) != 0); |
| 4951 #ifndef V8_TARGET_ARCH_X32 |
4716 GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi, | 4952 GenerateConvertArgument(masm, 1 * kPointerSize, rdx, rbx, rcx, rdi, |
4717 &call_builtin); | 4953 &call_builtin); |
| 4954 #else |
| 4955 GenerateConvertArgument(masm, 1 * kHWRegSize, rdx, rbx, rcx, rdi, |
| 4956 &call_builtin); |
| 4957 #endif |
4718 builtin_id = Builtins::STRING_ADD_LEFT; | 4958 builtin_id = Builtins::STRING_ADD_LEFT; |
4719 } | 4959 } |
4720 } | 4960 } |
4721 | 4961 |
4722 // Both arguments are strings. | 4962 // Both arguments are strings. |
4723 // rax: first string | 4963 // rax: first string |
4724 // rdx: second string | 4964 // rdx: second string |
4725 // Check if either of the strings are empty. In that case return the other. | 4965 // Check if either of the strings are empty. In that case return the other. |
4726 Label second_not_zero_length, both_not_zero_length; | 4966 Label second_not_zero_length, both_not_zero_length; |
4727 __ movq(rcx, FieldOperand(rdx, String::kLengthOffset)); | 4967 __ movq(rcx, FieldOperand(rdx, String::kLengthOffset)); |
(...skipping 26 matching lines...) Expand all Loading... |
4754 // by the code above. | 4994 // by the code above. |
4755 if (flags_ != NO_STRING_ADD_FLAGS) { | 4995 if (flags_ != NO_STRING_ADD_FLAGS) { |
4756 __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset)); | 4996 __ movq(r8, FieldOperand(rax, HeapObject::kMapOffset)); |
4757 __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset)); | 4997 __ movq(r9, FieldOperand(rdx, HeapObject::kMapOffset)); |
4758 } | 4998 } |
4759 // Get the instance types of the two strings as they will be needed soon. | 4999 // Get the instance types of the two strings as they will be needed soon. |
4760 __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset)); | 5000 __ movzxbl(r8, FieldOperand(r8, Map::kInstanceTypeOffset)); |
4761 __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset)); | 5001 __ movzxbl(r9, FieldOperand(r9, Map::kInstanceTypeOffset)); |
4762 | 5002 |
4763 // Look at the length of the result of adding the two strings. | 5003 // Look at the length of the result of adding the two strings. |
| 5004 #ifndef V8_TARGET_ARCH_X32 |
4764 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2); | 5005 STATIC_ASSERT(String::kMaxLength <= Smi::kMaxValue / 2); |
4765 __ SmiAdd(rbx, rbx, rcx); | 5006 __ SmiAdd(rbx, rbx, rcx); |
| 5007 #else |
| 5008 __ SmiAdd(rbx, rbx, rcx, &call_runtime); |
| 5009 #endif |
4766 // Use the string table when adding two one character strings, as it | 5010 // Use the string table when adding two one character strings, as it |
4767 // helps later optimizations to return an internalized string here. | 5011 // helps later optimizations to return an internalized string here. |
4768 __ SmiCompare(rbx, Smi::FromInt(2)); | 5012 __ SmiCompare(rbx, Smi::FromInt(2)); |
4769 __ j(not_equal, &longer_than_two); | 5013 __ j(not_equal, &longer_than_two); |
4770 | 5014 |
4771 // Check that both strings are non-external ASCII strings. | 5015 // Check that both strings are non-external ASCII strings. |
4772 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx, | 5016 __ JumpIfBothInstanceTypesAreNotSequentialAscii(r8, r9, rbx, rcx, |
4773 &call_runtime); | 5017 &call_runtime); |
4774 | 5018 |
4775 // Get the two characters forming the sub string. | 5019 // Get the two characters forming the sub string. |
(...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5004 | 5248 |
5005 | 5249 |
5006 void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) { | 5250 void StringAddStub::GenerateRegisterArgsPush(MacroAssembler* masm) { |
5007 __ push(rax); | 5251 __ push(rax); |
5008 __ push(rdx); | 5252 __ push(rdx); |
5009 } | 5253 } |
5010 | 5254 |
5011 | 5255 |
5012 void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm, | 5256 void StringAddStub::GenerateRegisterArgsPop(MacroAssembler* masm, |
5013 Register temp) { | 5257 Register temp) { |
5014 __ pop(temp); | 5258 __k pop(temp); |
5015 __ pop(rdx); | 5259 __ pop(rdx); |
5016 __ pop(rax); | 5260 __ pop(rax); |
5017 __ push(temp); | 5261 __k push(temp); |
5018 } | 5262 } |
5019 | 5263 |
5020 | 5264 |
5021 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, | 5265 void StringAddStub::GenerateConvertArgument(MacroAssembler* masm, |
5022 int stack_offset, | 5266 int stack_offset, |
5023 Register arg, | 5267 Register arg, |
5024 Register scratch1, | 5268 Register scratch1, |
5025 Register scratch2, | 5269 Register scratch2, |
5026 Register scratch3, | 5270 Register scratch3, |
5027 Label* slow) { | 5271 Label* slow) { |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5110 __ addl(count, count); | 5354 __ addl(count, count); |
5111 } | 5355 } |
5112 | 5356 |
5113 // Don't enter the rep movs if there are less than 4 bytes to copy. | 5357 // Don't enter the rep movs if there are less than 4 bytes to copy. |
5114 Label last_bytes; | 5358 Label last_bytes; |
5115 __ testl(count, Immediate(~(kPointerSize - 1))); | 5359 __ testl(count, Immediate(~(kPointerSize - 1))); |
5116 __ j(zero, &last_bytes, Label::kNear); | 5360 __ j(zero, &last_bytes, Label::kNear); |
5117 | 5361 |
5118 // Copy from edi to esi using rep movs instruction. | 5362 // Copy from edi to esi using rep movs instruction. |
5119 __ movl(kScratchRegister, count); | 5363 __ movl(kScratchRegister, count); |
5120 __ shr(count, Immediate(kPointerSizeLog2)); // Number of doublewords to copy. | 5364 // Number of doublewords to copy. |
| 5365 __ shr(count, Immediate(kPointerSizeLog2)); |
5121 __ repmovsq(); | 5366 __ repmovsq(); |
5122 | 5367 |
5123 // Find number of bytes left. | 5368 // Find number of bytes left. |
5124 __ movl(count, kScratchRegister); | 5369 __ movl(count, kScratchRegister); |
5125 __ and_(count, Immediate(kPointerSize - 1)); | 5370 __ and_(count, Immediate(kPointerSize - 1)); |
5126 | 5371 |
5127 // Check if there are more bytes to copy. | 5372 // Check if there are more bytes to copy. |
5128 __ bind(&last_bytes); | 5373 __ bind(&last_bytes); |
5129 __ testl(count, count); | 5374 __ testl(count, count); |
5130 __ j(zero, &done, Label::kNear); | 5375 __ j(zero, &done, Label::kNear); |
(...skipping 199 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5330 | 5575 |
5331 void SubStringStub::Generate(MacroAssembler* masm) { | 5576 void SubStringStub::Generate(MacroAssembler* masm) { |
5332 Label runtime; | 5577 Label runtime; |
5333 | 5578 |
5334 // Stack frame on entry. | 5579 // Stack frame on entry. |
5335 // rsp[0] : return address | 5580 // rsp[0] : return address |
5336 // rsp[8] : to | 5581 // rsp[8] : to |
5337 // rsp[16] : from | 5582 // rsp[16] : from |
5338 // rsp[24] : string | 5583 // rsp[24] : string |
5339 | 5584 |
| 5585 #ifndef V8_TARGET_ARCH_X32 |
5340 const int kToOffset = 1 * kPointerSize; | 5586 const int kToOffset = 1 * kPointerSize; |
| 5587 #else |
| 5588 const int kToOffset = 1 * kHWRegSize; |
| 5589 #endif |
5341 const int kFromOffset = kToOffset + kPointerSize; | 5590 const int kFromOffset = kToOffset + kPointerSize; |
5342 const int kStringOffset = kFromOffset + kPointerSize; | 5591 const int kStringOffset = kFromOffset + kPointerSize; |
5343 const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset; | 5592 const int kArgumentsSize = (kStringOffset + kPointerSize) - kToOffset; |
5344 | 5593 |
5345 // Make sure first argument is a string. | 5594 // Make sure first argument is a string. |
5346 __ movq(rax, Operand(rsp, kStringOffset)); | 5595 __ movq(rax, Operand(rsp, kStringOffset)); |
5347 STATIC_ASSERT(kSmiTag == 0); | 5596 STATIC_ASSERT(kSmiTag == 0); |
5348 __ testl(rax, Immediate(kSmiTagMask)); | 5597 __ testl(rax, Immediate(kSmiTagMask)); |
5349 __ j(zero, &runtime); | 5598 __ j(zero, &runtime); |
5350 Condition is_string = masm->IsObjectStringType(rax, rbx, rbx); | 5599 Condition is_string = masm->IsObjectStringType(rax, rbx, rbx); |
(...skipping 318 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5669 Label* chars_not_equal, | 5918 Label* chars_not_equal, |
5670 Label::Distance near_jump) { | 5919 Label::Distance near_jump) { |
5671 // Change index to run from -length to -1 by adding length to string | 5920 // Change index to run from -length to -1 by adding length to string |
5672 // start. This means that loop ends when index reaches zero, which | 5921 // start. This means that loop ends when index reaches zero, which |
5673 // doesn't need an additional compare. | 5922 // doesn't need an additional compare. |
5674 __ SmiToInteger32(length, length); | 5923 __ SmiToInteger32(length, length); |
5675 __ lea(left, | 5924 __ lea(left, |
5676 FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize)); | 5925 FieldOperand(left, length, times_1, SeqOneByteString::kHeaderSize)); |
5677 __ lea(right, | 5926 __ lea(right, |
5678 FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize)); | 5927 FieldOperand(right, length, times_1, SeqOneByteString::kHeaderSize)); |
5679 __ neg(length); | 5928 __k neg(length); |
5680 Register index = length; // index = -length; | 5929 Register index = length; // index = -length; |
5681 | 5930 |
5682 // Compare loop. | 5931 // Compare loop. |
5683 Label loop; | 5932 Label loop; |
5684 __ bind(&loop); | 5933 __ bind(&loop); |
5685 __ movb(scratch, Operand(left, index, times_1, 0)); | 5934 __ movb(scratch, Operand(left, index, times_1, 0)); |
5686 __ cmpb(scratch, Operand(right, index, times_1, 0)); | 5935 __ cmpb(scratch, Operand(right, index, times_1, 0)); |
5687 __ j(not_equal, chars_not_equal, near_jump); | 5936 __ j(not_equal, chars_not_equal, near_jump); |
5688 __ incq(index); | 5937 __k incq(index); |
5689 __ j(not_zero, &loop); | 5938 __ j(not_zero, &loop); |
5690 } | 5939 } |
5691 | 5940 |
5692 | 5941 |
5693 void StringCompareStub::Generate(MacroAssembler* masm) { | 5942 void StringCompareStub::Generate(MacroAssembler* masm) { |
5694 Label runtime; | 5943 Label runtime; |
5695 | 5944 |
5696 // Stack frame on entry. | 5945 // Stack frame on entry. |
5697 // rsp[0] : return address | 5946 // rsp[0] : return address |
5698 // rsp[8] : right string | 5947 // rsp[8] : right string |
5699 // rsp[16] : left string | 5948 // rsp[16] : left string |
5700 | 5949 |
5701 __ movq(rdx, Operand(rsp, 2 * kPointerSize)); // left | 5950 __a movq(rdx, Operand(rsp, 2 * kPointerSize)); // left |
5702 __ movq(rax, Operand(rsp, 1 * kPointerSize)); // right | 5951 __a movq(rax, Operand(rsp, 1 * kPointerSize)); // right |
5703 | 5952 |
5704 // Check for identity. | 5953 // Check for identity. |
5705 Label not_same; | 5954 Label not_same; |
5706 __ cmpq(rdx, rax); | 5955 __ cmpq(rdx, rax); |
5707 __ j(not_equal, ¬_same, Label::kNear); | 5956 __ j(not_equal, ¬_same, Label::kNear); |
5708 __ Move(rax, Smi::FromInt(EQUAL)); | 5957 __ Move(rax, Smi::FromInt(EQUAL)); |
5709 Counters* counters = masm->isolate()->counters(); | 5958 Counters* counters = masm->isolate()->counters(); |
5710 __ IncrementCounter(counters->string_compare_native(), 1); | 5959 __ IncrementCounter(counters->string_compare_native(), 1); |
5711 __ ret(2 * kPointerSize); | 5960 __ ret(2 * kPointerSize); |
5712 | 5961 |
5713 __ bind(¬_same); | 5962 __ bind(¬_same); |
5714 | 5963 |
5715 // Check that both are sequential ASCII strings. | 5964 // Check that both are sequential ASCII strings. |
5716 __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime); | 5965 __ JumpIfNotBothSequentialAsciiStrings(rdx, rax, rcx, rbx, &runtime); |
5717 | 5966 |
5718 // Inline comparison of ASCII strings. | 5967 // Inline comparison of ASCII strings. |
5719 __ IncrementCounter(counters->string_compare_native(), 1); | 5968 __ IncrementCounter(counters->string_compare_native(), 1); |
5720 // Drop arguments from the stack | 5969 // Drop arguments from the stack |
5721 __ pop(rcx); | 5970 __k pop(rcx); |
5722 __ addq(rsp, Immediate(2 * kPointerSize)); | 5971 __ addq(rsp, Immediate(2 * kPointerSize)); |
5723 __ push(rcx); | 5972 __k push(rcx); |
5724 GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8); | 5973 GenerateCompareFlatAsciiStrings(masm, rdx, rax, rcx, rbx, rdi, r8); |
5725 | 5974 |
5726 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) | 5975 // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) |
5727 // tagged as a small integer. | 5976 // tagged as a small integer. |
5728 __ bind(&runtime); | 5977 __ bind(&runtime); |
5729 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); | 5978 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); |
5730 } | 5979 } |
5731 | 5980 |
5732 | 5981 |
5733 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { | 5982 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { |
(...skipping 253 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5987 if (equality) { | 6236 if (equality) { |
5988 StringCompareStub::GenerateFlatAsciiStringEquals( | 6237 StringCompareStub::GenerateFlatAsciiStringEquals( |
5989 masm, left, right, tmp1, tmp2); | 6238 masm, left, right, tmp1, tmp2); |
5990 } else { | 6239 } else { |
5991 StringCompareStub::GenerateCompareFlatAsciiStrings( | 6240 StringCompareStub::GenerateCompareFlatAsciiStrings( |
5992 masm, left, right, tmp1, tmp2, tmp3, kScratchRegister); | 6241 masm, left, right, tmp1, tmp2, tmp3, kScratchRegister); |
5993 } | 6242 } |
5994 | 6243 |
5995 // Handle more complex cases in runtime. | 6244 // Handle more complex cases in runtime. |
5996 __ bind(&runtime); | 6245 __ bind(&runtime); |
5997 __ pop(tmp1); // Return address. | 6246 __k pop(tmp1); // Return address. |
5998 __ push(left); | 6247 __ push(left); |
5999 __ push(right); | 6248 __ push(right); |
6000 __ push(tmp1); | 6249 __k push(tmp1); |
6001 if (equality) { | 6250 if (equality) { |
6002 __ TailCallRuntime(Runtime::kStringEquals, 2, 1); | 6251 __ TailCallRuntime(Runtime::kStringEquals, 2, 1); |
6003 } else { | 6252 } else { |
6004 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); | 6253 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); |
6005 } | 6254 } |
6006 | 6255 |
6007 __ bind(&miss); | 6256 __ bind(&miss); |
6008 GenerateMiss(masm); | 6257 GenerateMiss(masm); |
6009 } | 6258 } |
6010 | 6259 |
(...skipping 201 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6212 __ decl(scratch); | 6461 __ decl(scratch); |
6213 __ push(scratch); | 6462 __ push(scratch); |
6214 | 6463 |
6215 // If names of slots in range from 1 to kProbes - 1 for the hash value are | 6464 // If names of slots in range from 1 to kProbes - 1 for the hash value are |
6216 // not equal to the name and kProbes-th slot is not used (its name is the | 6465 // not equal to the name and kProbes-th slot is not used (its name is the |
6217 // undefined value), it guarantees the hash table doesn't contain the | 6466 // undefined value), it guarantees the hash table doesn't contain the |
6218 // property. It's true even if some slots represent deleted properties | 6467 // property. It's true even if some slots represent deleted properties |
6219 // (their names are the null value). | 6468 // (their names are the null value). |
6220 for (int i = kInlinedProbes; i < kTotalProbes; i++) { | 6469 for (int i = kInlinedProbes; i < kTotalProbes; i++) { |
6221 // Compute the masked index: (hash + i + i * i) & mask. | 6470 // Compute the masked index: (hash + i + i * i) & mask. |
6222 __ movq(scratch, Operand(rsp, 2 * kPointerSize)); | 6471 __a movq(scratch, Operand(rsp, 2 * kPointerSize)); |
6223 if (i > 0) { | 6472 if (i > 0) { |
6224 __ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i))); | 6473 __ addl(scratch, Immediate(NameDictionary::GetProbeOffset(i))); |
6225 } | 6474 } |
6226 __ and_(scratch, Operand(rsp, 0)); | 6475 __ and_(scratch, Operand(rsp, 0)); |
6227 | 6476 |
6228 // Scale the index by multiplying by the entry size. | 6477 // Scale the index by multiplying by the entry size. |
6229 ASSERT(NameDictionary::kEntrySize == 3); | 6478 ASSERT(NameDictionary::kEntrySize == 3); |
6230 __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3. | 6479 __ lea(index_, Operand(scratch, scratch, times_2, 0)); // index *= 3. |
6231 | 6480 |
6232 // Having undefined at this place means the name is not contained. | 6481 // Having undefined at this place means the name is not contained. |
6233 __ movq(scratch, Operand(dictionary_, | 6482 __ movq(scratch, Operand(dictionary_, |
6234 index_, | 6483 index_, |
6235 times_pointer_size, | 6484 times_pointer_size, |
6236 kElementsStartOffset - kHeapObjectTag)); | 6485 kElementsStartOffset - kHeapObjectTag)); |
6237 | 6486 |
6238 __ Cmp(scratch, masm->isolate()->factory()->undefined_value()); | 6487 __ Cmp(scratch, masm->isolate()->factory()->undefined_value()); |
6239 __ j(equal, ¬_in_dictionary); | 6488 __ j(equal, ¬_in_dictionary); |
6240 | 6489 |
6241 // Stop if found the property. | 6490 // Stop if found the property. |
6242 __ cmpq(scratch, Operand(rsp, 3 * kPointerSize)); | 6491 __a cmpq(scratch, Operand(rsp, 3 * kPointerSize)); |
6243 __ j(equal, &in_dictionary); | 6492 __ j(equal, &in_dictionary); |
6244 | 6493 |
6245 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { | 6494 if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { |
6246 // If we hit a key that is not a unique name during negative | 6495 // If we hit a key that is not a unique name during negative |
6247 // lookup we have to bailout as this key might be equal to the | 6496 // lookup we have to bailout as this key might be equal to the |
6248 // key we are looking for. | 6497 // key we are looking for. |
6249 | 6498 |
6250 // Check if the entry name is not a unique name. | 6499 // Check if the entry name is not a unique name. |
6251 __ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset)); | 6500 __ movq(scratch, FieldOperand(scratch, HeapObject::kMapOffset)); |
6252 __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset), | 6501 __ JumpIfNotUniqueName(FieldOperand(scratch, Map::kInstanceTypeOffset), |
(...skipping 331 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6584 // clobbers rbx, rdx, rdi | 6833 // clobbers rbx, rdx, rdi |
6585 // ----------------------------------- | 6834 // ----------------------------------- |
6586 | 6835 |
6587 Label element_done; | 6836 Label element_done; |
6588 Label double_elements; | 6837 Label double_elements; |
6589 Label smi_element; | 6838 Label smi_element; |
6590 Label slow_elements; | 6839 Label slow_elements; |
6591 Label fast_elements; | 6840 Label fast_elements; |
6592 | 6841 |
6593 // Get array literal index, array literal and its map. | 6842 // Get array literal index, array literal and its map. |
6594 __ movq(rdx, Operand(rsp, 1 * kPointerSize)); | 6843 __a movq(rdx, Operand(rsp, 1 * kPointerSize)); |
6595 __ movq(rbx, Operand(rsp, 2 * kPointerSize)); | 6844 __a movq(rbx, Operand(rsp, 2 * kPointerSize)); |
6596 __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset)); | 6845 __ movq(rdi, FieldOperand(rbx, JSObject::kMapOffset)); |
6597 | 6846 |
6598 __ CheckFastElements(rdi, &double_elements); | 6847 __ CheckFastElements(rdi, &double_elements); |
6599 | 6848 |
6600 // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS | 6849 // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS |
6601 __ JumpIfSmi(rax, &smi_element); | 6850 __ JumpIfSmi(rax, &smi_element); |
6602 __ CheckFastSmiElements(rdi, &fast_elements); | 6851 __ CheckFastSmiElements(rdi, &fast_elements); |
6603 | 6852 |
6604 // Store into the array literal requires a elements transition. Call into | 6853 // Store into the array literal requires a elements transition. Call into |
6605 // the runtime. | 6854 // the runtime. |
6606 | 6855 |
6607 __ bind(&slow_elements); | 6856 __ bind(&slow_elements); |
6608 __ pop(rdi); // Pop return address and remember to put back later for tail | 6857 __k pop(rdi); // Pop return address and remember to put back later for tail |
6609 // call. | 6858 // call. |
6610 __ push(rbx); | 6859 __ push(rbx); |
6611 __ push(rcx); | 6860 __ push(rcx); |
6612 __ push(rax); | 6861 __ push(rax); |
6613 __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); | 6862 __ movq(rbx, Operand(rbp, JavaScriptFrameConstants::kFunctionOffset)); |
6614 __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset)); | 6863 __ push(FieldOperand(rbx, JSFunction::kLiteralsOffset)); |
6615 __ push(rdx); | 6864 __ push(rdx); |
6616 __ push(rdi); // Return return address so that tail call returns to right | 6865 __k push(rdi); // Return return address so that tail call returns to right |
6617 // place. | 6866 // place. |
6618 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); | 6867 __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); |
6619 | 6868 |
6620 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. | 6869 // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. |
6621 __ bind(&fast_elements); | 6870 __ bind(&fast_elements); |
6622 __ SmiToInteger32(kScratchRegister, rcx); | 6871 __ SmiToInteger32(kScratchRegister, rcx); |
6623 __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset)); | 6872 __ movq(rbx, FieldOperand(rbx, JSObject::kElementsOffset)); |
6624 __ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size, | 6873 __ lea(rcx, FieldOperand(rbx, kScratchRegister, times_pointer_size, |
6625 FixedArrayBase::kHeaderSize)); | 6874 FixedArrayBase::kHeaderSize)); |
6626 __ movq(Operand(rcx, 0), rax); | 6875 __ movq(Operand(rcx, 0), rax); |
(...skipping 27 matching lines...) Expand all Loading... |
6654 } | 6903 } |
6655 | 6904 |
6656 | 6905 |
6657 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { | 6906 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { |
6658 CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); | 6907 CEntryStub ces(1, fp_registers_ ? kSaveFPRegs : kDontSaveFPRegs); |
6659 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); | 6908 __ Call(ces.GetCode(masm->isolate()), RelocInfo::CODE_TARGET); |
6660 int parameter_count_offset = | 6909 int parameter_count_offset = |
6661 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; | 6910 StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; |
6662 __ movq(rbx, MemOperand(rbp, parameter_count_offset)); | 6911 __ movq(rbx, MemOperand(rbp, parameter_count_offset)); |
6663 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); | 6912 masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); |
6664 __ pop(rcx); | 6913 __k pop(rcx); |
6665 int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE | 6914 int additional_offset = function_mode_ == JS_FUNCTION_STUB_MODE |
6666 ? kPointerSize | 6915 ? kPointerSize |
6667 : 0; | 6916 : 0; |
6668 __ lea(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset)); | 6917 __ lea(rsp, MemOperand(rsp, rbx, times_pointer_size, additional_offset)); |
6669 __ jmp(rcx); // Return to IC Miss stub, continuation still on stack. | 6918 __ jmp(rcx); // Return to IC Miss stub, continuation still on stack. |
6670 } | 6919 } |
6671 | 6920 |
6672 | 6921 |
6673 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { | 6922 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { |
6674 if (masm->isolate()->function_entry_hook() != NULL) { | 6923 if (masm->isolate()->function_entry_hook() != NULL) { |
6675 // It's always safe to call the entry hook stub, as the hook itself | 6924 // It's always safe to call the entry hook stub, as the hook itself |
6676 // is not allowed to call back to V8. | 6925 // is not allowed to call back to V8. |
6677 AllowStubCallsScope allow_stub_calls(masm, true); | 6926 AllowStubCallsScope allow_stub_calls(masm, true); |
6678 | 6927 |
6679 ProfileEntryHookStub stub; | 6928 ProfileEntryHookStub stub; |
6680 masm->CallStub(&stub); | 6929 masm->CallStub(&stub); |
6681 } | 6930 } |
6682 } | 6931 } |
6683 | 6932 |
6684 | 6933 |
6685 void ProfileEntryHookStub::Generate(MacroAssembler* masm) { | 6934 void ProfileEntryHookStub::Generate(MacroAssembler* masm) { |
6686 // This stub can be called from essentially anywhere, so it needs to save | 6935 // This stub can be called from essentially anywhere, so it needs to save |
6687 // all volatile and callee-save registers. | 6936 // all volatile and callee-save registers. |
6688 const size_t kNumSavedRegisters = 2; | 6937 const size_t kNumSavedRegisters = 2; |
6689 __ push(arg_reg_1); | 6938 __k push(arg_reg_1); |
6690 __ push(arg_reg_2); | 6939 __k push(arg_reg_2); |
6691 | 6940 |
6692 // Calculate the original stack pointer and store it in the second arg. | 6941 // Calculate the original stack pointer and store it in the second arg. |
6693 __ lea(arg_reg_2, Operand(rsp, (kNumSavedRegisters + 1) * kPointerSize)); | 6942 __q lea(arg_reg_2, Operand(rsp, (kNumSavedRegisters + 1) * kPointerSize)); |
6694 | 6943 |
6695 // Calculate the function address to the first arg. | 6944 // Calculate the function address to the first arg. |
6696 __ movq(arg_reg_1, Operand(rsp, kNumSavedRegisters * kPointerSize)); | 6945 __s movq(arg_reg_1, Operand(rsp, kNumSavedRegisters * kPointerSize)); |
6697 __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength)); | 6946 __ subq(arg_reg_1, Immediate(Assembler::kShortCallInstructionLength)); |
6698 | 6947 |
6699 // Save the remainder of the volatile registers. | 6948 // Save the remainder of the volatile registers. |
6700 masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2); | 6949 masm->PushCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2); |
6701 | 6950 |
6702 // Call the entry hook function. | 6951 // Call the entry hook function. |
6703 __ movq(rax, FUNCTION_ADDR(masm->isolate()->function_entry_hook()), | 6952 __ movq(rax, FUNCTION_ADDR(masm->isolate()->function_entry_hook()), |
| 6953 #ifndef V8_TARGET_ARCH_X32 |
6704 RelocInfo::NONE64); | 6954 RelocInfo::NONE64); |
| 6955 #else |
| 6956 RelocInfo::NONE32); |
| 6957 #endif |
6705 | 6958 |
6706 AllowExternalCallThatCantCauseGC scope(masm); | 6959 AllowExternalCallThatCantCauseGC scope(masm); |
6707 | 6960 |
6708 const int kArgumentCount = 2; | 6961 const int kArgumentCount = 2; |
6709 __ PrepareCallCFunction(kArgumentCount); | 6962 __ PrepareCallCFunction(kArgumentCount); |
6710 __ CallCFunction(rax, kArgumentCount); | 6963 __ CallCFunction(rax, kArgumentCount); |
6711 | 6964 |
6712 // Restore volatile regs. | 6965 // Restore volatile regs. |
6713 masm->PopCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2); | 6966 masm->PopCallerSaved(kSaveFPRegs, arg_reg_1, arg_reg_2); |
6714 __ pop(arg_reg_2); | 6967 __k pop(arg_reg_2); |
6715 __ pop(arg_reg_1); | 6968 __k pop(arg_reg_1); |
6716 | |
6717 __ Ret(); | 6969 __ Ret(); |
6718 } | 6970 } |
6719 | 6971 |
6720 | 6972 |
6721 template<class T> | 6973 template<class T> |
6722 static void CreateArrayDispatch(MacroAssembler* masm) { | 6974 static void CreateArrayDispatch(MacroAssembler* masm) { |
6723 int last_index = GetSequenceIndexFromFastElementsKind( | 6975 int last_index = GetSequenceIndexFromFastElementsKind( |
6724 TERMINAL_FAST_ELEMENTS_KIND); | 6976 TERMINAL_FAST_ELEMENTS_KIND); |
6725 for (int i = 0; i <= last_index; ++i) { | 6977 for (int i = 0; i <= last_index; ++i) { |
6726 Label next; | 6978 Label next; |
(...skipping 27 matching lines...) Expand all Loading... |
6754 Handle<Object> undefined_sentinel( | 7006 Handle<Object> undefined_sentinel( |
6755 masm->isolate()->heap()->undefined_value(), | 7007 masm->isolate()->heap()->undefined_value(), |
6756 masm->isolate()); | 7008 masm->isolate()); |
6757 | 7009 |
6758 // is the low bit set? If so, we are holey and that is good. | 7010 // is the low bit set? If so, we are holey and that is good. |
6759 __ testb(rdx, Immediate(1)); | 7011 __ testb(rdx, Immediate(1)); |
6760 Label normal_sequence; | 7012 Label normal_sequence; |
6761 __ j(not_zero, &normal_sequence); | 7013 __ j(not_zero, &normal_sequence); |
6762 | 7014 |
6763 // look at the first argument | 7015 // look at the first argument |
6764 __ movq(rcx, Operand(rsp, kPointerSize)); | 7016 __a movq(rcx, Operand(rsp, 1 * kPointerSize)); |
6765 __ testq(rcx, rcx); | 7017 __ testq(rcx, rcx); |
6766 __ j(zero, &normal_sequence); | 7018 __ j(zero, &normal_sequence); |
6767 | 7019 |
6768 // We are going to create a holey array, but our kind is non-holey. | 7020 // We are going to create a holey array, but our kind is non-holey. |
6769 // Fix kind and retry | 7021 // Fix kind and retry |
6770 __ incl(rdx); | 7022 __ incl(rdx); |
6771 __ Cmp(rbx, undefined_sentinel); | 7023 __ Cmp(rbx, undefined_sentinel); |
6772 __ j(equal, &normal_sequence); | 7024 __ j(equal, &normal_sequence); |
6773 | 7025 |
6774 // The type cell may have gone megamorphic, don't overwrite if so | 7026 // The type cell may have gone megamorphic, don't overwrite if so |
(...skipping 146 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6921 InternalArrayNoArgumentConstructorStub stub0(kind); | 7173 InternalArrayNoArgumentConstructorStub stub0(kind); |
6922 __ TailCallStub(&stub0); | 7174 __ TailCallStub(&stub0); |
6923 | 7175 |
6924 __ bind(¬_zero_case); | 7176 __ bind(¬_zero_case); |
6925 __ cmpl(rax, Immediate(1)); | 7177 __ cmpl(rax, Immediate(1)); |
6926 __ j(greater, ¬_one_case); | 7178 __ j(greater, ¬_one_case); |
6927 | 7179 |
6928 if (IsFastPackedElementsKind(kind)) { | 7180 if (IsFastPackedElementsKind(kind)) { |
6929 // We might need to create a holey array | 7181 // We might need to create a holey array |
6930 // look at the first argument | 7182 // look at the first argument |
6931 __ movq(rcx, Operand(rsp, kPointerSize)); | 7183 __a movq(rcx, Operand(rsp, 1 * kPointerSize)); |
6932 __ testq(rcx, rcx); | 7184 __ testq(rcx, rcx); |
6933 __ j(zero, &normal_sequence); | 7185 __ j(zero, &normal_sequence); |
6934 | 7186 |
6935 InternalArraySingleArgumentConstructorStub | 7187 InternalArraySingleArgumentConstructorStub |
6936 stub1_holey(GetHoleyElementsKind(kind)); | 7188 stub1_holey(GetHoleyElementsKind(kind)); |
6937 __ TailCallStub(&stub1_holey); | 7189 __ TailCallStub(&stub1_holey); |
6938 } | 7190 } |
6939 | 7191 |
6940 __ bind(&normal_sequence); | 7192 __ bind(&normal_sequence); |
6941 InternalArraySingleArgumentConstructorStub stub1(kind); | 7193 InternalArraySingleArgumentConstructorStub stub1(kind); |
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6992 | 7244 |
6993 Label fast_elements_case; | 7245 Label fast_elements_case; |
6994 __ cmpl(rcx, Immediate(FAST_ELEMENTS)); | 7246 __ cmpl(rcx, Immediate(FAST_ELEMENTS)); |
6995 __ j(equal, &fast_elements_case); | 7247 __ j(equal, &fast_elements_case); |
6996 GenerateCase(masm, FAST_HOLEY_ELEMENTS); | 7248 GenerateCase(masm, FAST_HOLEY_ELEMENTS); |
6997 | 7249 |
6998 __ bind(&fast_elements_case); | 7250 __ bind(&fast_elements_case); |
6999 GenerateCase(masm, FAST_ELEMENTS); | 7251 GenerateCase(masm, FAST_ELEMENTS); |
7000 } | 7252 } |
7001 | 7253 |
7002 | 7254 #undef __n |
| 7255 #undef __s |
| 7256 #undef __q |
| 7257 #undef __a |
| 7258 #undef __k |
7003 #undef __ | 7259 #undef __ |
7004 | 7260 |
7005 } } // namespace v8::internal | 7261 } } // namespace v8::internal |
7006 | 7262 |
7007 #endif // V8_TARGET_ARCH_X64 | 7263 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |