| OLD | NEW |
| 1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_ARM64 | 7 #if V8_TARGET_ARCH_ARM64 |
| 8 | 8 |
| 9 #include "src/bootstrapper.h" | 9 #include "src/bootstrapper.h" |
| 10 #include "src/code-stubs.h" | 10 #include "src/code-stubs.h" |
| (...skipping 382 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 393 | 393 |
| 394 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { | 394 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { |
| 395 // Update the static counter each time a new code stub is generated. | 395 // Update the static counter each time a new code stub is generated. |
| 396 isolate()->counters()->code_stubs()->Increment(); | 396 isolate()->counters()->code_stubs()->Increment(); |
| 397 | 397 |
| 398 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(); | 398 CodeStubInterfaceDescriptor* descriptor = GetInterfaceDescriptor(); |
| 399 int param_count = descriptor->GetEnvironmentParameterCount(); | 399 int param_count = descriptor->GetEnvironmentParameterCount(); |
| 400 { | 400 { |
| 401 // Call the runtime system in a fresh internal frame. | 401 // Call the runtime system in a fresh internal frame. |
| 402 FrameScope scope(masm, StackFrame::INTERNAL); | 402 FrameScope scope(masm, StackFrame::INTERNAL); |
| 403 ASSERT((param_count == 0) || | 403 DCHECK((param_count == 0) || |
| 404 x0.Is(descriptor->GetEnvironmentParameterRegister(param_count - 1))); | 404 x0.Is(descriptor->GetEnvironmentParameterRegister(param_count - 1))); |
| 405 | 405 |
| 406 // Push arguments | 406 // Push arguments |
| 407 MacroAssembler::PushPopQueue queue(masm); | 407 MacroAssembler::PushPopQueue queue(masm); |
| 408 for (int i = 0; i < param_count; ++i) { | 408 for (int i = 0; i < param_count; ++i) { |
| 409 queue.Queue(descriptor->GetEnvironmentParameterRegister(i)); | 409 queue.Queue(descriptor->GetEnvironmentParameterRegister(i)); |
| 410 } | 410 } |
| 411 queue.PushQueued(); | 411 queue.PushQueued(); |
| 412 | 412 |
| 413 ExternalReference miss = descriptor->miss_handler(); | 413 ExternalReference miss = descriptor->miss_handler(); |
| 414 __ CallExternalReference(miss, param_count); | 414 __ CallExternalReference(miss, param_count); |
| 415 } | 415 } |
| 416 | 416 |
| 417 __ Ret(); | 417 __ Ret(); |
| 418 } | 418 } |
| 419 | 419 |
| 420 | 420 |
| 421 void DoubleToIStub::Generate(MacroAssembler* masm) { | 421 void DoubleToIStub::Generate(MacroAssembler* masm) { |
| 422 Label done; | 422 Label done; |
| 423 Register input = source(); | 423 Register input = source(); |
| 424 Register result = destination(); | 424 Register result = destination(); |
| 425 ASSERT(is_truncating()); | 425 DCHECK(is_truncating()); |
| 426 | 426 |
| 427 ASSERT(result.Is64Bits()); | 427 DCHECK(result.Is64Bits()); |
| 428 ASSERT(jssp.Is(masm->StackPointer())); | 428 DCHECK(jssp.Is(masm->StackPointer())); |
| 429 | 429 |
| 430 int double_offset = offset(); | 430 int double_offset = offset(); |
| 431 | 431 |
| 432 DoubleRegister double_scratch = d0; // only used if !skip_fastpath() | 432 DoubleRegister double_scratch = d0; // only used if !skip_fastpath() |
| 433 Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result); | 433 Register scratch1 = GetAllocatableRegisterThatIsNotOneOf(input, result); |
| 434 Register scratch2 = | 434 Register scratch2 = |
| 435 GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1); | 435 GetAllocatableRegisterThatIsNotOneOf(input, result, scratch1); |
| 436 | 436 |
| 437 __ Push(scratch1, scratch2); | 437 __ Push(scratch1, scratch2); |
| 438 // Account for saved regs if input is jssp. | 438 // Account for saved regs if input is jssp. |
| (...skipping 59 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 498 | 498 |
| 499 | 499 |
| 500 // See call site for description. | 500 // See call site for description. |
| 501 static void EmitIdenticalObjectComparison(MacroAssembler* masm, | 501 static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
| 502 Register left, | 502 Register left, |
| 503 Register right, | 503 Register right, |
| 504 Register scratch, | 504 Register scratch, |
| 505 FPRegister double_scratch, | 505 FPRegister double_scratch, |
| 506 Label* slow, | 506 Label* slow, |
| 507 Condition cond) { | 507 Condition cond) { |
| 508 ASSERT(!AreAliased(left, right, scratch)); | 508 DCHECK(!AreAliased(left, right, scratch)); |
| 509 Label not_identical, return_equal, heap_number; | 509 Label not_identical, return_equal, heap_number; |
| 510 Register result = x0; | 510 Register result = x0; |
| 511 | 511 |
| 512 __ Cmp(right, left); | 512 __ Cmp(right, left); |
| 513 __ B(ne, ¬_identical); | 513 __ B(ne, ¬_identical); |
| 514 | 514 |
| 515 // Test for NaN. Sadly, we can't just compare to factory::nan_value(), | 515 // Test for NaN. Sadly, we can't just compare to factory::nan_value(), |
| 516 // so we do the second best thing - test it ourselves. | 516 // so we do the second best thing - test it ourselves. |
| 517 // They are both equal and they are not both Smis so both of them are not | 517 // They are both equal and they are not both Smis so both of them are not |
| 518 // Smis. If it's not a heap number, then return equal. | 518 // Smis. If it's not a heap number, then return equal. |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 553 __ Mov(result, LESS); // Things aren't greater than themselves. | 553 __ Mov(result, LESS); // Things aren't greater than themselves. |
| 554 } else { | 554 } else { |
| 555 __ Mov(result, EQUAL); // Things are <=, >=, ==, === themselves. | 555 __ Mov(result, EQUAL); // Things are <=, >=, ==, === themselves. |
| 556 } | 556 } |
| 557 __ Ret(); | 557 __ Ret(); |
| 558 | 558 |
| 559 // Cases lt and gt have been handled earlier, and case ne is never seen, as | 559 // Cases lt and gt have been handled earlier, and case ne is never seen, as |
| 560 // it is handled in the parser (see Parser::ParseBinaryExpression). We are | 560 // it is handled in the parser (see Parser::ParseBinaryExpression). We are |
| 561 // only concerned with cases ge, le and eq here. | 561 // only concerned with cases ge, le and eq here. |
| 562 if ((cond != lt) && (cond != gt)) { | 562 if ((cond != lt) && (cond != gt)) { |
| 563 ASSERT((cond == ge) || (cond == le) || (cond == eq)); | 563 DCHECK((cond == ge) || (cond == le) || (cond == eq)); |
| 564 __ Bind(&heap_number); | 564 __ Bind(&heap_number); |
| 565 // Left and right are identical pointers to a heap number object. Return | 565 // Left and right are identical pointers to a heap number object. Return |
| 566 // non-equal if the heap number is a NaN, and equal otherwise. Comparing | 566 // non-equal if the heap number is a NaN, and equal otherwise. Comparing |
| 567 // the number to itself will set the overflow flag iff the number is NaN. | 567 // the number to itself will set the overflow flag iff the number is NaN. |
| 568 __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset)); | 568 __ Ldr(double_scratch, FieldMemOperand(right, HeapNumber::kValueOffset)); |
| 569 __ Fcmp(double_scratch, double_scratch); | 569 __ Fcmp(double_scratch, double_scratch); |
| 570 __ B(vc, &return_equal); // Not NaN, so treat as normal heap number. | 570 __ B(vc, &return_equal); // Not NaN, so treat as normal heap number. |
| 571 | 571 |
| 572 if (cond == le) { | 572 if (cond == le) { |
| 573 __ Mov(result, GREATER); | 573 __ Mov(result, GREATER); |
| (...skipping 12 matching lines...) Expand all Loading... |
| 586 } | 586 } |
| 587 | 587 |
| 588 | 588 |
| 589 // See call site for description. | 589 // See call site for description. |
| 590 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 590 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
| 591 Register left, | 591 Register left, |
| 592 Register right, | 592 Register right, |
| 593 Register left_type, | 593 Register left_type, |
| 594 Register right_type, | 594 Register right_type, |
| 595 Register scratch) { | 595 Register scratch) { |
| 596 ASSERT(!AreAliased(left, right, left_type, right_type, scratch)); | 596 DCHECK(!AreAliased(left, right, left_type, right_type, scratch)); |
| 597 | 597 |
| 598 if (masm->emit_debug_code()) { | 598 if (masm->emit_debug_code()) { |
| 599 // We assume that the arguments are not identical. | 599 // We assume that the arguments are not identical. |
| 600 __ Cmp(left, right); | 600 __ Cmp(left, right); |
| 601 __ Assert(ne, kExpectedNonIdenticalObjects); | 601 __ Assert(ne, kExpectedNonIdenticalObjects); |
| 602 } | 602 } |
| 603 | 603 |
| 604 // If either operand is a JS object or an oddball value, then they are not | 604 // If either operand is a JS object or an oddball value, then they are not |
| 605 // equal since their pointers are different. | 605 // equal since their pointers are different. |
| 606 // There is no test for undetectability in strict equality. | 606 // There is no test for undetectability in strict equality. |
| 607 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); | 607 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); |
| 608 Label right_non_object; | 608 Label right_non_object; |
| 609 | 609 |
| 610 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE); | 610 __ Cmp(right_type, FIRST_SPEC_OBJECT_TYPE); |
| 611 __ B(lt, &right_non_object); | 611 __ B(lt, &right_non_object); |
| 612 | 612 |
| 613 // Return non-zero - x0 already contains a non-zero pointer. | 613 // Return non-zero - x0 already contains a non-zero pointer. |
| 614 ASSERT(left.is(x0) || right.is(x0)); | 614 DCHECK(left.is(x0) || right.is(x0)); |
| 615 Label return_not_equal; | 615 Label return_not_equal; |
| 616 __ Bind(&return_not_equal); | 616 __ Bind(&return_not_equal); |
| 617 __ Ret(); | 617 __ Ret(); |
| 618 | 618 |
| 619 __ Bind(&right_non_object); | 619 __ Bind(&right_non_object); |
| 620 | 620 |
| 621 // Check for oddballs: true, false, null, undefined. | 621 // Check for oddballs: true, false, null, undefined. |
| 622 __ Cmp(right_type, ODDBALL_TYPE); | 622 __ Cmp(right_type, ODDBALL_TYPE); |
| 623 | 623 |
| 624 // If right is not ODDBALL, test left. Otherwise, set eq condition. | 624 // If right is not ODDBALL, test left. Otherwise, set eq condition. |
| (...skipping 17 matching lines...) Expand all Loading... |
| 642 | 642 |
| 643 // See call site for description. | 643 // See call site for description. |
| 644 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 644 static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
| 645 Register left, | 645 Register left, |
| 646 Register right, | 646 Register right, |
| 647 FPRegister left_d, | 647 FPRegister left_d, |
| 648 FPRegister right_d, | 648 FPRegister right_d, |
| 649 Register scratch, | 649 Register scratch, |
| 650 Label* slow, | 650 Label* slow, |
| 651 bool strict) { | 651 bool strict) { |
| 652 ASSERT(!AreAliased(left, right, scratch)); | 652 DCHECK(!AreAliased(left, right, scratch)); |
| 653 ASSERT(!AreAliased(left_d, right_d)); | 653 DCHECK(!AreAliased(left_d, right_d)); |
| 654 ASSERT((left.is(x0) && right.is(x1)) || | 654 DCHECK((left.is(x0) && right.is(x1)) || |
| 655 (right.is(x0) && left.is(x1))); | 655 (right.is(x0) && left.is(x1))); |
| 656 Register result = x0; | 656 Register result = x0; |
| 657 | 657 |
| 658 Label right_is_smi, done; | 658 Label right_is_smi, done; |
| 659 __ JumpIfSmi(right, &right_is_smi); | 659 __ JumpIfSmi(right, &right_is_smi); |
| 660 | 660 |
| 661 // Left is the smi. Check whether right is a heap number. | 661 // Left is the smi. Check whether right is a heap number. |
| 662 if (strict) { | 662 if (strict) { |
| 663 // If right is not a number and left is a smi, then strict equality cannot | 663 // If right is not a number and left is a smi, then strict equality cannot |
| 664 // succeed. Return non-equal. | 664 // succeed. Return non-equal. |
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 717 // See call site for description. | 717 // See call site for description. |
| 718 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, | 718 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, |
| 719 Register left, | 719 Register left, |
| 720 Register right, | 720 Register right, |
| 721 Register left_map, | 721 Register left_map, |
| 722 Register right_map, | 722 Register right_map, |
| 723 Register left_type, | 723 Register left_type, |
| 724 Register right_type, | 724 Register right_type, |
| 725 Label* possible_strings, | 725 Label* possible_strings, |
| 726 Label* not_both_strings) { | 726 Label* not_both_strings) { |
| 727 ASSERT(!AreAliased(left, right, left_map, right_map, left_type, right_type)); | 727 DCHECK(!AreAliased(left, right, left_map, right_map, left_type, right_type)); |
| 728 Register result = x0; | 728 Register result = x0; |
| 729 | 729 |
| 730 Label object_test; | 730 Label object_test; |
| 731 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0)); | 731 STATIC_ASSERT((kInternalizedTag == 0) && (kStringTag == 0)); |
| 732 // TODO(all): reexamine this branch sequence for optimisation wrt branch | 732 // TODO(all): reexamine this branch sequence for optimisation wrt branch |
| 733 // prediction. | 733 // prediction. |
| 734 __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test); | 734 __ Tbnz(right_type, MaskToBit(kIsNotStringMask), &object_test); |
| 735 __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings); | 735 __ Tbnz(right_type, MaskToBit(kIsNotInternalizedMask), possible_strings); |
| 736 __ Tbnz(left_type, MaskToBit(kIsNotStringMask), not_both_strings); | 736 __ Tbnz(left_type, MaskToBit(kIsNotStringMask), not_both_strings); |
| 737 __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings); | 737 __ Tbnz(left_type, MaskToBit(kIsNotInternalizedMask), possible_strings); |
| (...skipping 99 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 837 __ B(vs, &nan); // Overflow flag set if either is NaN. | 837 __ B(vs, &nan); // Overflow flag set if either is NaN. |
| 838 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1)); | 838 STATIC_ASSERT((LESS == -1) && (EQUAL == 0) && (GREATER == 1)); |
| 839 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL). | 839 __ Cset(result, gt); // gt => 1, otherwise (lt, eq) => 0 (EQUAL). |
| 840 __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0. | 840 __ Csinv(result, result, xzr, ge); // lt => -1, gt => 1, eq => 0. |
| 841 __ Ret(); | 841 __ Ret(); |
| 842 | 842 |
| 843 __ Bind(&nan); | 843 __ Bind(&nan); |
| 844 // Left and/or right is a NaN. Load the result register with whatever makes | 844 // Left and/or right is a NaN. Load the result register with whatever makes |
| 845 // the comparison fail, since comparisons with NaN always fail (except ne, | 845 // the comparison fail, since comparisons with NaN always fail (except ne, |
| 846 // which is filtered out at a higher level.) | 846 // which is filtered out at a higher level.) |
| 847 ASSERT(cond != ne); | 847 DCHECK(cond != ne); |
| 848 if ((cond == lt) || (cond == le)) { | 848 if ((cond == lt) || (cond == le)) { |
| 849 __ Mov(result, GREATER); | 849 __ Mov(result, GREATER); |
| 850 } else { | 850 } else { |
| 851 __ Mov(result, LESS); | 851 __ Mov(result, LESS); |
| 852 } | 852 } |
| 853 __ Ret(); | 853 __ Ret(); |
| 854 | 854 |
| 855 __ Bind(¬_smis); | 855 __ Bind(¬_smis); |
| 856 // At this point we know we are dealing with two different objects, and | 856 // At this point we know we are dealing with two different objects, and |
| 857 // neither of them is a smi. The objects are in rhs_ and lhs_. | 857 // neither of them is a smi. The objects are in rhs_ and lhs_. |
| (...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 928 // Figure out which native to call and setup the arguments. | 928 // Figure out which native to call and setup the arguments. |
| 929 Builtins::JavaScript native; | 929 Builtins::JavaScript native; |
| 930 if (cond == eq) { | 930 if (cond == eq) { |
| 931 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; | 931 native = strict() ? Builtins::STRICT_EQUALS : Builtins::EQUALS; |
| 932 } else { | 932 } else { |
| 933 native = Builtins::COMPARE; | 933 native = Builtins::COMPARE; |
| 934 int ncr; // NaN compare result | 934 int ncr; // NaN compare result |
| 935 if ((cond == lt) || (cond == le)) { | 935 if ((cond == lt) || (cond == le)) { |
| 936 ncr = GREATER; | 936 ncr = GREATER; |
| 937 } else { | 937 } else { |
| 938 ASSERT((cond == gt) || (cond == ge)); // remaining cases | 938 DCHECK((cond == gt) || (cond == ge)); // remaining cases |
| 939 ncr = LESS; | 939 ncr = LESS; |
| 940 } | 940 } |
| 941 __ Mov(x10, Smi::FromInt(ncr)); | 941 __ Mov(x10, Smi::FromInt(ncr)); |
| 942 __ Push(x10); | 942 __ Push(x10); |
| 943 } | 943 } |
| 944 | 944 |
| 945 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) | 945 // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) |
| 946 // tagged as a small integer. | 946 // tagged as a small integer. |
| 947 __ InvokeBuiltin(native, JUMP_FUNCTION); | 947 __ InvokeBuiltin(native, JUMP_FUNCTION); |
| 948 | 948 |
| (...skipping 287 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1236 // Bail out to runtime code. | 1236 // Bail out to runtime code. |
| 1237 __ Bind(&call_runtime); | 1237 __ Bind(&call_runtime); |
| 1238 // Put the arguments back on the stack. | 1238 // Put the arguments back on the stack. |
| 1239 __ Push(base_tagged, exponent_tagged); | 1239 __ Push(base_tagged, exponent_tagged); |
| 1240 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1); | 1240 __ TailCallRuntime(Runtime::kMathPowRT, 2, 1); |
| 1241 | 1241 |
| 1242 // Return. | 1242 // Return. |
| 1243 __ Bind(&done); | 1243 __ Bind(&done); |
| 1244 __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1, | 1244 __ AllocateHeapNumber(result_tagged, &call_runtime, scratch0, scratch1, |
| 1245 result_double); | 1245 result_double); |
| 1246 ASSERT(result_tagged.is(x0)); | 1246 DCHECK(result_tagged.is(x0)); |
| 1247 __ IncrementCounter( | 1247 __ IncrementCounter( |
| 1248 isolate()->counters()->math_pow(), 1, scratch0, scratch1); | 1248 isolate()->counters()->math_pow(), 1, scratch0, scratch1); |
| 1249 __ Ret(); | 1249 __ Ret(); |
| 1250 } else { | 1250 } else { |
| 1251 AllowExternalCallThatCantCauseGC scope(masm); | 1251 AllowExternalCallThatCantCauseGC scope(masm); |
| 1252 __ Mov(saved_lr, lr); | 1252 __ Mov(saved_lr, lr); |
| 1253 __ Fmov(base_double, base_double_copy); | 1253 __ Fmov(base_double, base_double_copy); |
| 1254 __ Scvtf(exponent_double, exponent_integer); | 1254 __ Scvtf(exponent_double, exponent_integer); |
| 1255 __ CallCFunction( | 1255 __ CallCFunction( |
| 1256 ExternalReference::power_double_double_function(isolate()), | 1256 ExternalReference::power_double_double_function(isolate()), |
| (...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1340 // at the highest address: | 1340 // at the highest address: |
| 1341 // | 1341 // |
| 1342 // jssp]argc-1]: receiver | 1342 // jssp]argc-1]: receiver |
| 1343 // jssp[argc-2]: arg[argc-2] | 1343 // jssp[argc-2]: arg[argc-2] |
| 1344 // ... ... | 1344 // ... ... |
| 1345 // jssp[1]: arg[1] | 1345 // jssp[1]: arg[1] |
| 1346 // jssp[0]: arg[0] | 1346 // jssp[0]: arg[0] |
| 1347 // | 1347 // |
| 1348 // The arguments are in reverse order, so that arg[argc-2] is actually the | 1348 // The arguments are in reverse order, so that arg[argc-2] is actually the |
| 1349 // first argument to the target function and arg[0] is the last. | 1349 // first argument to the target function and arg[0] is the last. |
| 1350 ASSERT(jssp.Is(__ StackPointer())); | 1350 DCHECK(jssp.Is(__ StackPointer())); |
| 1351 const Register& argc_input = x0; | 1351 const Register& argc_input = x0; |
| 1352 const Register& target_input = x1; | 1352 const Register& target_input = x1; |
| 1353 | 1353 |
| 1354 // Calculate argv, argc and the target address, and store them in | 1354 // Calculate argv, argc and the target address, and store them in |
| 1355 // callee-saved registers so we can retry the call without having to reload | 1355 // callee-saved registers so we can retry the call without having to reload |
| 1356 // these arguments. | 1356 // these arguments. |
| 1357 // TODO(jbramley): If the first call attempt succeeds in the common case (as | 1357 // TODO(jbramley): If the first call attempt succeeds in the common case (as |
| 1358 // it should), then we might be better off putting these parameters directly | 1358 // it should), then we might be better off putting these parameters directly |
| 1359 // into their argument registers, rather than using callee-saved registers and | 1359 // into their argument registers, rather than using callee-saved registers and |
| 1360 // preserving them on the stack. | 1360 // preserving them on the stack. |
| 1361 const Register& argv = x21; | 1361 const Register& argv = x21; |
| 1362 const Register& argc = x22; | 1362 const Register& argc = x22; |
| 1363 const Register& target = x23; | 1363 const Register& target = x23; |
| 1364 | 1364 |
| 1365 // Derive argv from the stack pointer so that it points to the first argument | 1365 // Derive argv from the stack pointer so that it points to the first argument |
| 1366 // (arg[argc-2]), or just below the receiver in case there are no arguments. | 1366 // (arg[argc-2]), or just below the receiver in case there are no arguments. |
| 1367 // - Adjust for the arg[] array. | 1367 // - Adjust for the arg[] array. |
| 1368 Register temp_argv = x11; | 1368 Register temp_argv = x11; |
| 1369 __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2)); | 1369 __ Add(temp_argv, jssp, Operand(x0, LSL, kPointerSizeLog2)); |
| 1370 // - Adjust for the receiver. | 1370 // - Adjust for the receiver. |
| 1371 __ Sub(temp_argv, temp_argv, 1 * kPointerSize); | 1371 __ Sub(temp_argv, temp_argv, 1 * kPointerSize); |
| 1372 | 1372 |
| 1373 // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved | 1373 // Enter the exit frame. Reserve three slots to preserve x21-x23 callee-saved |
| 1374 // registers. | 1374 // registers. |
| 1375 FrameScope scope(masm, StackFrame::MANUAL); | 1375 FrameScope scope(masm, StackFrame::MANUAL); |
| 1376 __ EnterExitFrame(save_doubles_, x10, 3); | 1376 __ EnterExitFrame(save_doubles_, x10, 3); |
| 1377 ASSERT(csp.Is(__ StackPointer())); | 1377 DCHECK(csp.Is(__ StackPointer())); |
| 1378 | 1378 |
| 1379 // Poke callee-saved registers into reserved space. | 1379 // Poke callee-saved registers into reserved space. |
| 1380 __ Poke(argv, 1 * kPointerSize); | 1380 __ Poke(argv, 1 * kPointerSize); |
| 1381 __ Poke(argc, 2 * kPointerSize); | 1381 __ Poke(argc, 2 * kPointerSize); |
| 1382 __ Poke(target, 3 * kPointerSize); | 1382 __ Poke(target, 3 * kPointerSize); |
| 1383 | 1383 |
| 1384 // We normally only keep tagged values in callee-saved registers, as they | 1384 // We normally only keep tagged values in callee-saved registers, as they |
| 1385 // could be pushed onto the stack by called stubs and functions, and on the | 1385 // could be pushed onto the stack by called stubs and functions, and on the |
| 1386 // stack they can confuse the GC. However, we're only calling C functions | 1386 // stack they can confuse the GC. However, we're only calling C functions |
| 1387 // which can push arbitrary data onto the stack anyway, and so the GC won't | 1387 // which can push arbitrary data onto the stack anyway, and so the GC won't |
| (...skipping 29 matching lines...) Expand all Loading... |
| 1417 // csp -> csp[0]: Space reserved for the return address. | 1417 // csp -> csp[0]: Space reserved for the return address. |
| 1418 // | 1418 // |
| 1419 // After a successful call, the exit frame, preserved registers (x21-x23) and | 1419 // After a successful call, the exit frame, preserved registers (x21-x23) and |
| 1420 // the arguments (including the receiver) are dropped or popped as | 1420 // the arguments (including the receiver) are dropped or popped as |
| 1421 // appropriate. The stub then returns. | 1421 // appropriate. The stub then returns. |
| 1422 // | 1422 // |
| 1423 // After an unsuccessful call, the exit frame and suchlike are left | 1423 // After an unsuccessful call, the exit frame and suchlike are left |
| 1424 // untouched, and the stub either throws an exception by jumping to one of | 1424 // untouched, and the stub either throws an exception by jumping to one of |
| 1425 // the exception_returned label. | 1425 // the exception_returned label. |
| 1426 | 1426 |
| 1427 ASSERT(csp.Is(__ StackPointer())); | 1427 DCHECK(csp.Is(__ StackPointer())); |
| 1428 | 1428 |
| 1429 // Prepare AAPCS64 arguments to pass to the builtin. | 1429 // Prepare AAPCS64 arguments to pass to the builtin. |
| 1430 __ Mov(x0, argc); | 1430 __ Mov(x0, argc); |
| 1431 __ Mov(x1, argv); | 1431 __ Mov(x1, argv); |
| 1432 __ Mov(x2, ExternalReference::isolate_address(isolate())); | 1432 __ Mov(x2, ExternalReference::isolate_address(isolate())); |
| 1433 | 1433 |
| 1434 Label return_location; | 1434 Label return_location; |
| 1435 __ Adr(x12, &return_location); | 1435 __ Adr(x12, &return_location); |
| 1436 __ Poke(x12, 0); | 1436 __ Poke(x12, 0); |
| 1437 | 1437 |
| (...skipping 26 matching lines...) Expand all Loading... |
| 1464 // The call succeeded, so unwind the stack and return. | 1464 // The call succeeded, so unwind the stack and return. |
| 1465 | 1465 |
| 1466 // Restore callee-saved registers x21-x23. | 1466 // Restore callee-saved registers x21-x23. |
| 1467 __ Mov(x11, argc); | 1467 __ Mov(x11, argc); |
| 1468 | 1468 |
| 1469 __ Peek(argv, 1 * kPointerSize); | 1469 __ Peek(argv, 1 * kPointerSize); |
| 1470 __ Peek(argc, 2 * kPointerSize); | 1470 __ Peek(argc, 2 * kPointerSize); |
| 1471 __ Peek(target, 3 * kPointerSize); | 1471 __ Peek(target, 3 * kPointerSize); |
| 1472 | 1472 |
| 1473 __ LeaveExitFrame(save_doubles_, x10, true); | 1473 __ LeaveExitFrame(save_doubles_, x10, true); |
| 1474 ASSERT(jssp.Is(__ StackPointer())); | 1474 DCHECK(jssp.Is(__ StackPointer())); |
| 1475 // Pop or drop the remaining stack slots and return from the stub. | 1475 // Pop or drop the remaining stack slots and return from the stub. |
| 1476 // jssp[24]: Arguments array (of size argc), including receiver. | 1476 // jssp[24]: Arguments array (of size argc), including receiver. |
| 1477 // jssp[16]: Preserved x23 (used for target). | 1477 // jssp[16]: Preserved x23 (used for target). |
| 1478 // jssp[8]: Preserved x22 (used for argc). | 1478 // jssp[8]: Preserved x22 (used for argc). |
| 1479 // jssp[0]: Preserved x21 (used for argv). | 1479 // jssp[0]: Preserved x21 (used for argv). |
| 1480 __ Drop(x11); | 1480 __ Drop(x11); |
| 1481 __ AssertFPCRState(); | 1481 __ AssertFPCRState(); |
| 1482 __ Ret(); | 1482 __ Ret(); |
| 1483 | 1483 |
| 1484 // The stack pointer is still csp if we aren't returning, and the frame | 1484 // The stack pointer is still csp if we aren't returning, and the frame |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1536 // See use of the CALL_GENERATED_CODE macro for example in src/execution.cc. | 1536 // See use of the CALL_GENERATED_CODE macro for example in src/execution.cc. |
| 1537 // Input: | 1537 // Input: |
| 1538 // x0: code entry. | 1538 // x0: code entry. |
| 1539 // x1: function. | 1539 // x1: function. |
| 1540 // x2: receiver. | 1540 // x2: receiver. |
| 1541 // x3: argc. | 1541 // x3: argc. |
| 1542 // x4: argv. | 1542 // x4: argv. |
| 1543 // Output: | 1543 // Output: |
| 1544 // x0: result. | 1544 // x0: result. |
| 1545 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { | 1545 void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { |
| 1546 ASSERT(jssp.Is(__ StackPointer())); | 1546 DCHECK(jssp.Is(__ StackPointer())); |
| 1547 Register code_entry = x0; | 1547 Register code_entry = x0; |
| 1548 | 1548 |
| 1549 // Enable instruction instrumentation. This only works on the simulator, and | 1549 // Enable instruction instrumentation. This only works on the simulator, and |
| 1550 // will have no effect on the model or real hardware. | 1550 // will have no effect on the model or real hardware. |
| 1551 __ EnableInstrumentation(); | 1551 __ EnableInstrumentation(); |
| 1552 | 1552 |
| 1553 Label invoke, handler_entry, exit; | 1553 Label invoke, handler_entry, exit; |
| 1554 | 1554 |
| 1555 // Push callee-saved registers and synchronize the system stack pointer (csp) | 1555 // Push callee-saved registers and synchronize the system stack pointer (csp) |
| 1556 // and the JavaScript stack pointer (jssp). | 1556 // and the JavaScript stack pointer (jssp). |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1590 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate()); | 1590 ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate()); |
| 1591 __ Mov(x10, ExternalReference(js_entry_sp)); | 1591 __ Mov(x10, ExternalReference(js_entry_sp)); |
| 1592 __ Ldr(x11, MemOperand(x10)); | 1592 __ Ldr(x11, MemOperand(x10)); |
| 1593 __ Cbnz(x11, &non_outermost_js); | 1593 __ Cbnz(x11, &non_outermost_js); |
| 1594 __ Str(fp, MemOperand(x10)); | 1594 __ Str(fp, MemOperand(x10)); |
| 1595 __ Mov(x12, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)); | 1595 __ Mov(x12, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)); |
| 1596 __ Push(x12); | 1596 __ Push(x12); |
| 1597 __ B(&done); | 1597 __ B(&done); |
| 1598 __ Bind(&non_outermost_js); | 1598 __ Bind(&non_outermost_js); |
| 1599 // We spare one instruction by pushing xzr since the marker is 0. | 1599 // We spare one instruction by pushing xzr since the marker is 0. |
| 1600 ASSERT(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL); | 1600 DCHECK(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME) == NULL); |
| 1601 __ Push(xzr); | 1601 __ Push(xzr); |
| 1602 __ Bind(&done); | 1602 __ Bind(&done); |
| 1603 | 1603 |
| 1604 // The frame set up looks like this: | 1604 // The frame set up looks like this: |
| 1605 // jssp[0] : JS entry frame marker. | 1605 // jssp[0] : JS entry frame marker. |
| 1606 // jssp[1] : C entry FP. | 1606 // jssp[1] : C entry FP. |
| 1607 // jssp[2] : stack frame marker. | 1607 // jssp[2] : stack frame marker. |
| 1608 // jssp[3] : stack frmae marker. | 1608 // jssp[3] : stack frmae marker. |
| 1609 // jssp[4] : bad frame pointer 0xfff...ff <- fp points here. | 1609 // jssp[4] : bad frame pointer 0xfff...ff <- fp points here. |
| 1610 | 1610 |
| (...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1692 __ Bind(&non_outermost_js_2); | 1692 __ Bind(&non_outermost_js_2); |
| 1693 | 1693 |
| 1694 // Restore the top frame descriptors from the stack. | 1694 // Restore the top frame descriptors from the stack. |
| 1695 __ Pop(x10); | 1695 __ Pop(x10); |
| 1696 __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate())); | 1696 __ Mov(x11, ExternalReference(Isolate::kCEntryFPAddress, isolate())); |
| 1697 __ Str(x10, MemOperand(x11)); | 1697 __ Str(x10, MemOperand(x11)); |
| 1698 | 1698 |
| 1699 // Reset the stack to the callee saved registers. | 1699 // Reset the stack to the callee saved registers. |
| 1700 __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes); | 1700 __ Drop(-EntryFrameConstants::kCallerFPOffset, kByteSizeInBytes); |
| 1701 // Restore the callee-saved registers and return. | 1701 // Restore the callee-saved registers and return. |
| 1702 ASSERT(jssp.Is(__ StackPointer())); | 1702 DCHECK(jssp.Is(__ StackPointer())); |
| 1703 __ Mov(csp, jssp); | 1703 __ Mov(csp, jssp); |
| 1704 __ SetStackPointer(csp); | 1704 __ SetStackPointer(csp); |
| 1705 __ PopCalleeSavedRegisters(); | 1705 __ PopCalleeSavedRegisters(); |
| 1706 // After this point, we must not modify jssp because it is a callee-saved | 1706 // After this point, we must not modify jssp because it is a callee-saved |
| 1707 // register which we have just restored. | 1707 // register which we have just restored. |
| 1708 __ Ret(); | 1708 __ Ret(); |
| 1709 } | 1709 } |
| 1710 | 1710 |
| 1711 | 1711 |
| 1712 void FunctionPrototypeStub::Generate(MacroAssembler* masm) { | 1712 void FunctionPrototypeStub::Generate(MacroAssembler* masm) { |
| (...skipping 112 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1825 __ Ldr(chain_prototype, FieldMemOperand(chain_map, Map::kPrototypeOffset)); | 1825 __ Ldr(chain_prototype, FieldMemOperand(chain_map, Map::kPrototypeOffset)); |
| 1826 __ B(&loop); | 1826 __ B(&loop); |
| 1827 } | 1827 } |
| 1828 | 1828 |
| 1829 // Return sequence when no arguments are on the stack. | 1829 // Return sequence when no arguments are on the stack. |
| 1830 // We cannot fall through to here. | 1830 // We cannot fall through to here. |
| 1831 __ Bind(&return_true); | 1831 __ Bind(&return_true); |
| 1832 __ Mov(result, res_true); | 1832 __ Mov(result, res_true); |
| 1833 __ Bind(&return_result); | 1833 __ Bind(&return_result); |
| 1834 if (HasCallSiteInlineCheck()) { | 1834 if (HasCallSiteInlineCheck()) { |
| 1835 ASSERT(ReturnTrueFalseObject()); | 1835 DCHECK(ReturnTrueFalseObject()); |
| 1836 __ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult); | 1836 __ Add(map_check_site, map_check_site, kDeltaToLoadBoolResult); |
| 1837 __ GetRelocatedValueLocation(map_check_site, scratch2); | 1837 __ GetRelocatedValueLocation(map_check_site, scratch2); |
| 1838 __ Str(result, MemOperand(scratch2)); | 1838 __ Str(result, MemOperand(scratch2)); |
| 1839 } else { | 1839 } else { |
| 1840 __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex); | 1840 __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex); |
| 1841 } | 1841 } |
| 1842 __ Ret(); | 1842 __ Ret(); |
| 1843 | 1843 |
| 1844 Label object_not_null, object_not_null_or_smi; | 1844 Label object_not_null, object_not_null_or_smi; |
| 1845 | 1845 |
| (...skipping 615 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2461 // Ensure that a RegExp stack is allocated. | 2461 // Ensure that a RegExp stack is allocated. |
| 2462 ExternalReference address_of_regexp_stack_memory_address = | 2462 ExternalReference address_of_regexp_stack_memory_address = |
| 2463 ExternalReference::address_of_regexp_stack_memory_address(isolate()); | 2463 ExternalReference::address_of_regexp_stack_memory_address(isolate()); |
| 2464 ExternalReference address_of_regexp_stack_memory_size = | 2464 ExternalReference address_of_regexp_stack_memory_size = |
| 2465 ExternalReference::address_of_regexp_stack_memory_size(isolate()); | 2465 ExternalReference::address_of_regexp_stack_memory_size(isolate()); |
| 2466 __ Mov(x10, address_of_regexp_stack_memory_size); | 2466 __ Mov(x10, address_of_regexp_stack_memory_size); |
| 2467 __ Ldr(x10, MemOperand(x10)); | 2467 __ Ldr(x10, MemOperand(x10)); |
| 2468 __ Cbz(x10, &runtime); | 2468 __ Cbz(x10, &runtime); |
| 2469 | 2469 |
| 2470 // Check that the first argument is a JSRegExp object. | 2470 // Check that the first argument is a JSRegExp object. |
| 2471 ASSERT(jssp.Is(__ StackPointer())); | 2471 DCHECK(jssp.Is(__ StackPointer())); |
| 2472 __ Peek(jsregexp_object, kJSRegExpOffset); | 2472 __ Peek(jsregexp_object, kJSRegExpOffset); |
| 2473 __ JumpIfSmi(jsregexp_object, &runtime); | 2473 __ JumpIfSmi(jsregexp_object, &runtime); |
| 2474 __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime); | 2474 __ JumpIfNotObjectType(jsregexp_object, x10, x10, JS_REGEXP_TYPE, &runtime); |
| 2475 | 2475 |
| 2476 // Check that the RegExp has been compiled (data contains a fixed array). | 2476 // Check that the RegExp has been compiled (data contains a fixed array). |
| 2477 __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset)); | 2477 __ Ldr(regexp_data, FieldMemOperand(jsregexp_object, JSRegExp::kDataOffset)); |
| 2478 if (FLAG_debug_code) { | 2478 if (FLAG_debug_code) { |
| 2479 STATIC_ASSERT(kSmiTag == 0); | 2479 STATIC_ASSERT(kSmiTag == 0); |
| 2480 __ Tst(regexp_data, kSmiTagMask); | 2480 __ Tst(regexp_data, kSmiTagMask); |
| 2481 __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected); | 2481 __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected); |
| (...skipping 16 matching lines...) Expand all Loading... |
| 2498 // Check (number_of_captures + 1) * 2 <= offsets vector size | 2498 // Check (number_of_captures + 1) * 2 <= offsets vector size |
| 2499 // number_of_captures * 2 <= offsets vector size - 2 | 2499 // number_of_captures * 2 <= offsets vector size - 2 |
| 2500 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2); | 2500 STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2); |
| 2501 __ Add(x10, x10, x10); | 2501 __ Add(x10, x10, x10); |
| 2502 __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2); | 2502 __ Cmp(x10, Isolate::kJSRegexpStaticOffsetsVectorSize - 2); |
| 2503 __ B(hi, &runtime); | 2503 __ B(hi, &runtime); |
| 2504 | 2504 |
| 2505 // Initialize offset for possibly sliced string. | 2505 // Initialize offset for possibly sliced string. |
| 2506 __ Mov(sliced_string_offset, 0); | 2506 __ Mov(sliced_string_offset, 0); |
| 2507 | 2507 |
| 2508 ASSERT(jssp.Is(__ StackPointer())); | 2508 DCHECK(jssp.Is(__ StackPointer())); |
| 2509 __ Peek(subject, kSubjectOffset); | 2509 __ Peek(subject, kSubjectOffset); |
| 2510 __ JumpIfSmi(subject, &runtime); | 2510 __ JumpIfSmi(subject, &runtime); |
| 2511 | 2511 |
| 2512 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset)); | 2512 __ Ldr(x10, FieldMemOperand(subject, HeapObject::kMapOffset)); |
| 2513 __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset)); | 2513 __ Ldrb(string_type, FieldMemOperand(x10, Map::kInstanceTypeOffset)); |
| 2514 | 2514 |
| 2515 __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset)); | 2515 __ Ldr(jsstring_length, FieldMemOperand(subject, String::kLengthOffset)); |
| 2516 | 2516 |
| 2517 // Handle subject string according to its encoding and representation: | 2517 // Handle subject string according to its encoding and representation: |
| 2518 // (1) Sequential string? If yes, go to (5). | 2518 // (1) Sequential string? If yes, go to (5). |
| (...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2581 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength); | 2581 STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength); |
| 2582 __ TestAndBranchIfAnySet(string_type.X(), | 2582 __ TestAndBranchIfAnySet(string_type.X(), |
| 2583 kStringRepresentationMask, | 2583 kStringRepresentationMask, |
| 2584 &external_string); // Go to (7). | 2584 &external_string); // Go to (7). |
| 2585 | 2585 |
| 2586 // (5) Sequential string. Load regexp code according to encoding. | 2586 // (5) Sequential string. Load regexp code according to encoding. |
| 2587 __ Bind(&seq_string); | 2587 __ Bind(&seq_string); |
| 2588 | 2588 |
| 2589 // Check that the third argument is a positive smi less than the subject | 2589 // Check that the third argument is a positive smi less than the subject |
| 2590 // string length. A negative value will be greater (unsigned comparison). | 2590 // string length. A negative value will be greater (unsigned comparison). |
| 2591 ASSERT(jssp.Is(__ StackPointer())); | 2591 DCHECK(jssp.Is(__ StackPointer())); |
| 2592 __ Peek(x10, kPreviousIndexOffset); | 2592 __ Peek(x10, kPreviousIndexOffset); |
| 2593 __ JumpIfNotSmi(x10, &runtime); | 2593 __ JumpIfNotSmi(x10, &runtime); |
| 2594 __ Cmp(jsstring_length, x10); | 2594 __ Cmp(jsstring_length, x10); |
| 2595 __ B(ls, &runtime); | 2595 __ B(ls, &runtime); |
| 2596 | 2596 |
| 2597 // Argument 2 (x1): We need to load argument 2 (the previous index) into x1 | 2597 // Argument 2 (x1): We need to load argument 2 (the previous index) into x1 |
| 2598 // before entering the exit frame. | 2598 // before entering the exit frame. |
| 2599 __ SmiUntag(x1, x10); | 2599 __ SmiUntag(x1, x10); |
| 2600 | 2600 |
| 2601 // The third bit determines the string encoding in string_type. | 2601 // The third bit determines the string encoding in string_type. |
| 2602 STATIC_ASSERT(kOneByteStringTag == 0x04); | 2602 STATIC_ASSERT(kOneByteStringTag == 0x04); |
| 2603 STATIC_ASSERT(kTwoByteStringTag == 0x00); | 2603 STATIC_ASSERT(kTwoByteStringTag == 0x00); |
| 2604 STATIC_ASSERT(kStringEncodingMask == 0x04); | 2604 STATIC_ASSERT(kStringEncodingMask == 0x04); |
| 2605 | 2605 |
| 2606 // Find the code object based on the assumptions above. | 2606 // Find the code object based on the assumptions above. |
| 2607 // kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset | 2607 // kDataAsciiCodeOffset and kDataUC16CodeOffset are adjacent, adds an offset |
| 2608 // of kPointerSize to reach the latter. | 2608 // of kPointerSize to reach the latter. |
| 2609 ASSERT_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize, | 2609 DCHECK_EQ(JSRegExp::kDataAsciiCodeOffset + kPointerSize, |
| 2610 JSRegExp::kDataUC16CodeOffset); | 2610 JSRegExp::kDataUC16CodeOffset); |
| 2611 __ Mov(x10, kPointerSize); | 2611 __ Mov(x10, kPointerSize); |
| 2612 // We will need the encoding later: ASCII = 0x04 | 2612 // We will need the encoding later: ASCII = 0x04 |
| 2613 // UC16 = 0x00 | 2613 // UC16 = 0x00 |
| 2614 __ Ands(string_encoding, string_type, kStringEncodingMask); | 2614 __ Ands(string_encoding, string_type, kStringEncodingMask); |
| 2615 __ CzeroX(x10, ne); | 2615 __ CzeroX(x10, ne); |
| 2616 __ Add(x10, regexp_data, x10); | 2616 __ Add(x10, regexp_data, x10); |
| 2617 __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataAsciiCodeOffset)); | 2617 __ Ldr(code_object, FieldMemOperand(x10, JSRegExp::kDataAsciiCodeOffset)); |
| 2618 | 2618 |
| 2619 // (E) Carry on. String handling is done. | 2619 // (E) Carry on. String handling is done. |
| 2620 | 2620 |
| 2621 // Check that the irregexp code has been generated for the actual string | 2621 // Check that the irregexp code has been generated for the actual string |
| 2622 // encoding. If it has, the field contains a code object otherwise it contains | 2622 // encoding. If it has, the field contains a code object otherwise it contains |
| 2623 // a smi (code flushing support). | 2623 // a smi (code flushing support). |
| 2624 __ JumpIfSmi(code_object, &runtime); | 2624 __ JumpIfSmi(code_object, &runtime); |
| 2625 | 2625 |
| 2626 // All checks done. Now push arguments for native regexp code. | 2626 // All checks done. Now push arguments for native regexp code. |
| 2627 __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, | 2627 __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, |
| 2628 x10, | 2628 x10, |
| 2629 x11); | 2629 x11); |
| 2630 | 2630 |
| 2631 // Isolates: note we add an additional parameter here (isolate pointer). | 2631 // Isolates: note we add an additional parameter here (isolate pointer). |
| 2632 __ EnterExitFrame(false, x10, 1); | 2632 __ EnterExitFrame(false, x10, 1); |
| 2633 ASSERT(csp.Is(__ StackPointer())); | 2633 DCHECK(csp.Is(__ StackPointer())); |
| 2634 | 2634 |
| 2635 // We have 9 arguments to pass to the regexp code, therefore we have to pass | 2635 // We have 9 arguments to pass to the regexp code, therefore we have to pass |
| 2636 // one on the stack and the rest as registers. | 2636 // one on the stack and the rest as registers. |
| 2637 | 2637 |
| 2638 // Note that the placement of the argument on the stack isn't standard | 2638 // Note that the placement of the argument on the stack isn't standard |
| 2639 // AAPCS64: | 2639 // AAPCS64: |
| 2640 // csp[0]: Space for the return address placed by DirectCEntryStub. | 2640 // csp[0]: Space for the return address placed by DirectCEntryStub. |
| 2641 // csp[8]: Argument 9, the current isolate address. | 2641 // csp[8]: Argument 9, the current isolate address. |
| 2642 | 2642 |
| 2643 __ Mov(x10, ExternalReference::isolate_address(isolate())); | 2643 __ Mov(x10, ExternalReference::isolate_address(isolate())); |
| (...skipping 83 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2727 | 2727 |
| 2728 // Calculate number of capture registers (number_of_captures + 1) * 2 | 2728 // Calculate number of capture registers (number_of_captures + 1) * 2 |
| 2729 // and store it in the last match info. | 2729 // and store it in the last match info. |
| 2730 __ Ldrsw(x10, | 2730 __ Ldrsw(x10, |
| 2731 UntagSmiFieldMemOperand(regexp_data, | 2731 UntagSmiFieldMemOperand(regexp_data, |
| 2732 JSRegExp::kIrregexpCaptureCountOffset)); | 2732 JSRegExp::kIrregexpCaptureCountOffset)); |
| 2733 __ Add(x10, x10, x10); | 2733 __ Add(x10, x10, x10); |
| 2734 __ Add(number_of_capture_registers, x10, 2); | 2734 __ Add(number_of_capture_registers, x10, 2); |
| 2735 | 2735 |
| 2736 // Check that the fourth object is a JSArray object. | 2736 // Check that the fourth object is a JSArray object. |
| 2737 ASSERT(jssp.Is(__ StackPointer())); | 2737 DCHECK(jssp.Is(__ StackPointer())); |
| 2738 __ Peek(x10, kLastMatchInfoOffset); | 2738 __ Peek(x10, kLastMatchInfoOffset); |
| 2739 __ JumpIfSmi(x10, &runtime); | 2739 __ JumpIfSmi(x10, &runtime); |
| 2740 __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime); | 2740 __ JumpIfNotObjectType(x10, x11, x11, JS_ARRAY_TYPE, &runtime); |
| 2741 | 2741 |
| 2742 // Check that the JSArray is the fast case. | 2742 // Check that the JSArray is the fast case. |
| 2743 __ Ldr(last_match_info_elements, | 2743 __ Ldr(last_match_info_elements, |
| 2744 FieldMemOperand(x10, JSArray::kElementsOffset)); | 2744 FieldMemOperand(x10, JSArray::kElementsOffset)); |
| 2745 __ Ldr(x10, | 2745 __ Ldr(x10, |
| 2746 FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); | 2746 FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); |
| 2747 __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime); | 2747 __ JumpIfNotRoot(x10, Heap::kFixedArrayMapRootIndex, &runtime); |
| (...skipping 161 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2909 | 2909 |
| 2910 | 2910 |
| 2911 static void GenerateRecordCallTarget(MacroAssembler* masm, | 2911 static void GenerateRecordCallTarget(MacroAssembler* masm, |
| 2912 Register argc, | 2912 Register argc, |
| 2913 Register function, | 2913 Register function, |
| 2914 Register feedback_vector, | 2914 Register feedback_vector, |
| 2915 Register index, | 2915 Register index, |
| 2916 Register scratch1, | 2916 Register scratch1, |
| 2917 Register scratch2) { | 2917 Register scratch2) { |
| 2918 ASM_LOCATION("GenerateRecordCallTarget"); | 2918 ASM_LOCATION("GenerateRecordCallTarget"); |
| 2919 ASSERT(!AreAliased(scratch1, scratch2, | 2919 DCHECK(!AreAliased(scratch1, scratch2, |
| 2920 argc, function, feedback_vector, index)); | 2920 argc, function, feedback_vector, index)); |
| 2921 // Cache the called function in a feedback vector slot. Cache states are | 2921 // Cache the called function in a feedback vector slot. Cache states are |
| 2922 // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic. | 2922 // uninitialized, monomorphic (indicated by a JSFunction), and megamorphic. |
| 2923 // argc : number of arguments to the construct function | 2923 // argc : number of arguments to the construct function |
| 2924 // function : the function to call | 2924 // function : the function to call |
| 2925 // feedback_vector : the feedback vector | 2925 // feedback_vector : the feedback vector |
| 2926 // index : slot in feedback vector (smi) | 2926 // index : slot in feedback vector (smi) |
| 2927 Label initialize, done, miss, megamorphic, not_array_function; | 2927 Label initialize, done, miss, megamorphic, not_array_function; |
| 2928 | 2928 |
| 2929 ASSERT_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), | 2929 DCHECK_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), |
| 2930 masm->isolate()->heap()->megamorphic_symbol()); | 2930 masm->isolate()->heap()->megamorphic_symbol()); |
| 2931 ASSERT_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()), | 2931 DCHECK_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()), |
| 2932 masm->isolate()->heap()->uninitialized_symbol()); | 2932 masm->isolate()->heap()->uninitialized_symbol()); |
| 2933 | 2933 |
| 2934 // Load the cache state. | 2934 // Load the cache state. |
| 2935 __ Add(scratch1, feedback_vector, | 2935 __ Add(scratch1, feedback_vector, |
| 2936 Operand::UntagSmiAndScale(index, kPointerSizeLog2)); | 2936 Operand::UntagSmiAndScale(index, kPointerSizeLog2)); |
| 2937 __ Ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); | 2937 __ Ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); |
| 2938 | 2938 |
| 2939 // A monomorphic cache hit or an already megamorphic state: invoke the | 2939 // A monomorphic cache hit or an already megamorphic state: invoke the |
| 2940 // function without changing the state. | 2940 // function without changing the state. |
| 2941 __ Cmp(scratch1, function); | 2941 __ Cmp(scratch1, function); |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2986 { | 2986 { |
| 2987 FrameScope scope(masm, StackFrame::INTERNAL); | 2987 FrameScope scope(masm, StackFrame::INTERNAL); |
| 2988 CreateAllocationSiteStub create_stub(masm->isolate()); | 2988 CreateAllocationSiteStub create_stub(masm->isolate()); |
| 2989 | 2989 |
| 2990 // Arguments register must be smi-tagged to call out. | 2990 // Arguments register must be smi-tagged to call out. |
| 2991 __ SmiTag(argc); | 2991 __ SmiTag(argc); |
| 2992 __ Push(argc, function, feedback_vector, index); | 2992 __ Push(argc, function, feedback_vector, index); |
| 2993 | 2993 |
| 2994 // CreateAllocationSiteStub expect the feedback vector in x2 and the slot | 2994 // CreateAllocationSiteStub expect the feedback vector in x2 and the slot |
| 2995 // index in x3. | 2995 // index in x3. |
| 2996 ASSERT(feedback_vector.Is(x2) && index.Is(x3)); | 2996 DCHECK(feedback_vector.Is(x2) && index.Is(x3)); |
| 2997 __ CallStub(&create_stub); | 2997 __ CallStub(&create_stub); |
| 2998 | 2998 |
| 2999 __ Pop(index, feedback_vector, function, argc); | 2999 __ Pop(index, feedback_vector, function, argc); |
| 3000 __ SmiUntag(argc); | 3000 __ SmiUntag(argc); |
| 3001 } | 3001 } |
| 3002 __ B(&done); | 3002 __ B(&done); |
| 3003 | 3003 |
| 3004 __ Bind(¬_array_function); | 3004 __ Bind(¬_array_function); |
| 3005 } | 3005 } |
| 3006 | 3006 |
| (...skipping 405 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3412 result_, | 3412 result_, |
| 3413 Heap::kHeapNumberMapRootIndex, | 3413 Heap::kHeapNumberMapRootIndex, |
| 3414 index_not_number_, | 3414 index_not_number_, |
| 3415 DONT_DO_SMI_CHECK); | 3415 DONT_DO_SMI_CHECK); |
| 3416 call_helper.BeforeCall(masm); | 3416 call_helper.BeforeCall(masm); |
| 3417 // Save object_ on the stack and pass index_ as argument for runtime call. | 3417 // Save object_ on the stack and pass index_ as argument for runtime call. |
| 3418 __ Push(object_, index_); | 3418 __ Push(object_, index_); |
| 3419 if (index_flags_ == STRING_INDEX_IS_NUMBER) { | 3419 if (index_flags_ == STRING_INDEX_IS_NUMBER) { |
| 3420 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); | 3420 __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero, 1); |
| 3421 } else { | 3421 } else { |
| 3422 ASSERT(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); | 3422 DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); |
| 3423 // NumberToSmi discards numbers that are not exact integers. | 3423 // NumberToSmi discards numbers that are not exact integers. |
| 3424 __ CallRuntime(Runtime::kNumberToSmi, 1); | 3424 __ CallRuntime(Runtime::kNumberToSmi, 1); |
| 3425 } | 3425 } |
| 3426 // Save the conversion result before the pop instructions below | 3426 // Save the conversion result before the pop instructions below |
| 3427 // have a chance to overwrite it. | 3427 // have a chance to overwrite it. |
| 3428 __ Mov(index_, x0); | 3428 __ Mov(index_, x0); |
| 3429 __ Pop(object_); | 3429 __ Pop(object_); |
| 3430 // Reload the instance type. | 3430 // Reload the instance type. |
| 3431 __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 3431 __ Ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
| 3432 __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 3432 __ Ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
| (...skipping 46 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3479 __ Mov(result_, x0); | 3479 __ Mov(result_, x0); |
| 3480 call_helper.AfterCall(masm); | 3480 call_helper.AfterCall(masm); |
| 3481 __ B(&exit_); | 3481 __ B(&exit_); |
| 3482 | 3482 |
| 3483 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase); | 3483 __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase); |
| 3484 } | 3484 } |
| 3485 | 3485 |
| 3486 | 3486 |
| 3487 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { | 3487 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { |
| 3488 // Inputs are in x0 (lhs) and x1 (rhs). | 3488 // Inputs are in x0 (lhs) and x1 (rhs). |
| 3489 ASSERT(state_ == CompareIC::SMI); | 3489 DCHECK(state_ == CompareIC::SMI); |
| 3490 ASM_LOCATION("ICCompareStub[Smis]"); | 3490 ASM_LOCATION("ICCompareStub[Smis]"); |
| 3491 Label miss; | 3491 Label miss; |
| 3492 // Bail out (to 'miss') unless both x0 and x1 are smis. | 3492 // Bail out (to 'miss') unless both x0 and x1 are smis. |
| 3493 __ JumpIfEitherNotSmi(x0, x1, &miss); | 3493 __ JumpIfEitherNotSmi(x0, x1, &miss); |
| 3494 | 3494 |
| 3495 if (GetCondition() == eq) { | 3495 if (GetCondition() == eq) { |
| 3496 // For equality we do not care about the sign of the result. | 3496 // For equality we do not care about the sign of the result. |
| 3497 __ Sub(x0, x0, x1); | 3497 __ Sub(x0, x0, x1); |
| 3498 } else { | 3498 } else { |
| 3499 // Untag before subtracting to avoid handling overflow. | 3499 // Untag before subtracting to avoid handling overflow. |
| 3500 __ SmiUntag(x1); | 3500 __ SmiUntag(x1); |
| 3501 __ Sub(x0, x1, Operand::UntagSmi(x0)); | 3501 __ Sub(x0, x1, Operand::UntagSmi(x0)); |
| 3502 } | 3502 } |
| 3503 __ Ret(); | 3503 __ Ret(); |
| 3504 | 3504 |
| 3505 __ Bind(&miss); | 3505 __ Bind(&miss); |
| 3506 GenerateMiss(masm); | 3506 GenerateMiss(masm); |
| 3507 } | 3507 } |
| 3508 | 3508 |
| 3509 | 3509 |
| 3510 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { | 3510 void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { |
| 3511 ASSERT(state_ == CompareIC::NUMBER); | 3511 DCHECK(state_ == CompareIC::NUMBER); |
| 3512 ASM_LOCATION("ICCompareStub[HeapNumbers]"); | 3512 ASM_LOCATION("ICCompareStub[HeapNumbers]"); |
| 3513 | 3513 |
| 3514 Label unordered, maybe_undefined1, maybe_undefined2; | 3514 Label unordered, maybe_undefined1, maybe_undefined2; |
| 3515 Label miss, handle_lhs, values_in_d_regs; | 3515 Label miss, handle_lhs, values_in_d_regs; |
| 3516 Label untag_rhs, untag_lhs; | 3516 Label untag_rhs, untag_lhs; |
| 3517 | 3517 |
| 3518 Register result = x0; | 3518 Register result = x0; |
| 3519 Register rhs = x0; | 3519 Register rhs = x0; |
| 3520 Register lhs = x1; | 3520 Register lhs = x1; |
| 3521 FPRegister rhs_d = d0; | 3521 FPRegister rhs_d = d0; |
| (...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3569 if (Token::IsOrderedRelationalCompareOp(op_)) { | 3569 if (Token::IsOrderedRelationalCompareOp(op_)) { |
| 3570 __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered); | 3570 __ JumpIfRoot(lhs, Heap::kUndefinedValueRootIndex, &unordered); |
| 3571 } | 3571 } |
| 3572 | 3572 |
| 3573 __ Bind(&miss); | 3573 __ Bind(&miss); |
| 3574 GenerateMiss(masm); | 3574 GenerateMiss(masm); |
| 3575 } | 3575 } |
| 3576 | 3576 |
| 3577 | 3577 |
| 3578 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { | 3578 void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { |
| 3579 ASSERT(state_ == CompareIC::INTERNALIZED_STRING); | 3579 DCHECK(state_ == CompareIC::INTERNALIZED_STRING); |
| 3580 ASM_LOCATION("ICCompareStub[InternalizedStrings]"); | 3580 ASM_LOCATION("ICCompareStub[InternalizedStrings]"); |
| 3581 Label miss; | 3581 Label miss; |
| 3582 | 3582 |
| 3583 Register result = x0; | 3583 Register result = x0; |
| 3584 Register rhs = x0; | 3584 Register rhs = x0; |
| 3585 Register lhs = x1; | 3585 Register lhs = x1; |
| 3586 | 3586 |
| 3587 // Check that both operands are heap objects. | 3587 // Check that both operands are heap objects. |
| 3588 __ JumpIfEitherSmi(lhs, rhs, &miss); | 3588 __ JumpIfEitherSmi(lhs, rhs, &miss); |
| 3589 | 3589 |
| (...skipping 17 matching lines...) Expand all Loading... |
| 3607 __ Cmp(lhs, rhs); | 3607 __ Cmp(lhs, rhs); |
| 3608 __ Cset(result, ne); | 3608 __ Cset(result, ne); |
| 3609 __ Ret(); | 3609 __ Ret(); |
| 3610 | 3610 |
| 3611 __ Bind(&miss); | 3611 __ Bind(&miss); |
| 3612 GenerateMiss(masm); | 3612 GenerateMiss(masm); |
| 3613 } | 3613 } |
| 3614 | 3614 |
| 3615 | 3615 |
| 3616 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { | 3616 void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { |
| 3617 ASSERT(state_ == CompareIC::UNIQUE_NAME); | 3617 DCHECK(state_ == CompareIC::UNIQUE_NAME); |
| 3618 ASM_LOCATION("ICCompareStub[UniqueNames]"); | 3618 ASM_LOCATION("ICCompareStub[UniqueNames]"); |
| 3619 ASSERT(GetCondition() == eq); | 3619 DCHECK(GetCondition() == eq); |
| 3620 Label miss; | 3620 Label miss; |
| 3621 | 3621 |
| 3622 Register result = x0; | 3622 Register result = x0; |
| 3623 Register rhs = x0; | 3623 Register rhs = x0; |
| 3624 Register lhs = x1; | 3624 Register lhs = x1; |
| 3625 | 3625 |
| 3626 Register lhs_instance_type = w2; | 3626 Register lhs_instance_type = w2; |
| 3627 Register rhs_instance_type = w3; | 3627 Register rhs_instance_type = w3; |
| 3628 | 3628 |
| 3629 // Check that both operands are heap objects. | 3629 // Check that both operands are heap objects. |
| (...skipping 16 matching lines...) Expand all Loading... |
| 3646 __ Cmp(lhs, rhs); | 3646 __ Cmp(lhs, rhs); |
| 3647 __ Cset(result, ne); | 3647 __ Cset(result, ne); |
| 3648 __ Ret(); | 3648 __ Ret(); |
| 3649 | 3649 |
| 3650 __ Bind(&miss); | 3650 __ Bind(&miss); |
| 3651 GenerateMiss(masm); | 3651 GenerateMiss(masm); |
| 3652 } | 3652 } |
| 3653 | 3653 |
| 3654 | 3654 |
| 3655 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { | 3655 void ICCompareStub::GenerateStrings(MacroAssembler* masm) { |
| 3656 ASSERT(state_ == CompareIC::STRING); | 3656 DCHECK(state_ == CompareIC::STRING); |
| 3657 ASM_LOCATION("ICCompareStub[Strings]"); | 3657 ASM_LOCATION("ICCompareStub[Strings]"); |
| 3658 | 3658 |
| 3659 Label miss; | 3659 Label miss; |
| 3660 | 3660 |
| 3661 bool equality = Token::IsEqualityOp(op_); | 3661 bool equality = Token::IsEqualityOp(op_); |
| 3662 | 3662 |
| 3663 Register result = x0; | 3663 Register result = x0; |
| 3664 Register rhs = x0; | 3664 Register rhs = x0; |
| 3665 Register lhs = x1; | 3665 Register lhs = x1; |
| 3666 | 3666 |
| (...skipping 20 matching lines...) Expand all Loading... |
| 3687 __ Mov(result, EQUAL); | 3687 __ Mov(result, EQUAL); |
| 3688 __ Ret(); | 3688 __ Ret(); |
| 3689 | 3689 |
| 3690 __ Bind(¬_equal); | 3690 __ Bind(¬_equal); |
| 3691 // Handle not identical strings | 3691 // Handle not identical strings |
| 3692 | 3692 |
| 3693 // Check that both strings are internalized strings. If they are, we're done | 3693 // Check that both strings are internalized strings. If they are, we're done |
| 3694 // because we already know they are not identical. We know they are both | 3694 // because we already know they are not identical. We know they are both |
| 3695 // strings. | 3695 // strings. |
| 3696 if (equality) { | 3696 if (equality) { |
| 3697 ASSERT(GetCondition() == eq); | 3697 DCHECK(GetCondition() == eq); |
| 3698 STATIC_ASSERT(kInternalizedTag == 0); | 3698 STATIC_ASSERT(kInternalizedTag == 0); |
| 3699 Label not_internalized_strings; | 3699 Label not_internalized_strings; |
| 3700 __ Orr(x12, lhs_type, rhs_type); | 3700 __ Orr(x12, lhs_type, rhs_type); |
| 3701 __ TestAndBranchIfAnySet( | 3701 __ TestAndBranchIfAnySet( |
| 3702 x12, kIsNotInternalizedMask, ¬_internalized_strings); | 3702 x12, kIsNotInternalizedMask, ¬_internalized_strings); |
| 3703 // Result is in rhs (x0), and not EQUAL, as rhs is not a smi. | 3703 // Result is in rhs (x0), and not EQUAL, as rhs is not a smi. |
| 3704 __ Ret(); | 3704 __ Ret(); |
| 3705 __ Bind(¬_internalized_strings); | 3705 __ Bind(¬_internalized_strings); |
| 3706 } | 3706 } |
| 3707 | 3707 |
| (...skipping 19 matching lines...) Expand all Loading... |
| 3727 } else { | 3727 } else { |
| 3728 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); | 3728 __ TailCallRuntime(Runtime::kStringCompare, 2, 1); |
| 3729 } | 3729 } |
| 3730 | 3730 |
| 3731 __ Bind(&miss); | 3731 __ Bind(&miss); |
| 3732 GenerateMiss(masm); | 3732 GenerateMiss(masm); |
| 3733 } | 3733 } |
| 3734 | 3734 |
| 3735 | 3735 |
| 3736 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { | 3736 void ICCompareStub::GenerateObjects(MacroAssembler* masm) { |
| 3737 ASSERT(state_ == CompareIC::OBJECT); | 3737 DCHECK(state_ == CompareIC::OBJECT); |
| 3738 ASM_LOCATION("ICCompareStub[Objects]"); | 3738 ASM_LOCATION("ICCompareStub[Objects]"); |
| 3739 | 3739 |
| 3740 Label miss; | 3740 Label miss; |
| 3741 | 3741 |
| 3742 Register result = x0; | 3742 Register result = x0; |
| 3743 Register rhs = x0; | 3743 Register rhs = x0; |
| 3744 Register lhs = x1; | 3744 Register lhs = x1; |
| 3745 | 3745 |
| 3746 __ JumpIfEitherSmi(rhs, lhs, &miss); | 3746 __ JumpIfEitherSmi(rhs, lhs, &miss); |
| 3747 | 3747 |
| 3748 __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss); | 3748 __ JumpIfNotObjectType(rhs, x10, x10, JS_OBJECT_TYPE, &miss); |
| 3749 __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss); | 3749 __ JumpIfNotObjectType(lhs, x10, x10, JS_OBJECT_TYPE, &miss); |
| 3750 | 3750 |
| 3751 ASSERT(GetCondition() == eq); | 3751 DCHECK(GetCondition() == eq); |
| 3752 __ Sub(result, rhs, lhs); | 3752 __ Sub(result, rhs, lhs); |
| 3753 __ Ret(); | 3753 __ Ret(); |
| 3754 | 3754 |
| 3755 __ Bind(&miss); | 3755 __ Bind(&miss); |
| 3756 GenerateMiss(masm); | 3756 GenerateMiss(masm); |
| 3757 } | 3757 } |
| 3758 | 3758 |
| 3759 | 3759 |
| 3760 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { | 3760 void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { |
| 3761 ASM_LOCATION("ICCompareStub[KnownObjects]"); | 3761 ASM_LOCATION("ICCompareStub[KnownObjects]"); |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3817 } | 3817 } |
| 3818 | 3818 |
| 3819 // Tail-call to the new stub. | 3819 // Tail-call to the new stub. |
| 3820 __ Jump(stub_entry); | 3820 __ Jump(stub_entry); |
| 3821 } | 3821 } |
| 3822 | 3822 |
| 3823 | 3823 |
| 3824 void StringHelper::GenerateHashInit(MacroAssembler* masm, | 3824 void StringHelper::GenerateHashInit(MacroAssembler* masm, |
| 3825 Register hash, | 3825 Register hash, |
| 3826 Register character) { | 3826 Register character) { |
| 3827 ASSERT(!AreAliased(hash, character)); | 3827 DCHECK(!AreAliased(hash, character)); |
| 3828 | 3828 |
| 3829 // hash = character + (character << 10); | 3829 // hash = character + (character << 10); |
| 3830 __ LoadRoot(hash, Heap::kHashSeedRootIndex); | 3830 __ LoadRoot(hash, Heap::kHashSeedRootIndex); |
| 3831 // Untag smi seed and add the character. | 3831 // Untag smi seed and add the character. |
| 3832 __ Add(hash, character, Operand::UntagSmi(hash)); | 3832 __ Add(hash, character, Operand::UntagSmi(hash)); |
| 3833 | 3833 |
| 3834 // Compute hashes modulo 2^32 using a 32-bit W register. | 3834 // Compute hashes modulo 2^32 using a 32-bit W register. |
| 3835 Register hash_w = hash.W(); | 3835 Register hash_w = hash.W(); |
| 3836 | 3836 |
| 3837 // hash += hash << 10; | 3837 // hash += hash << 10; |
| 3838 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10)); | 3838 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10)); |
| 3839 // hash ^= hash >> 6; | 3839 // hash ^= hash >> 6; |
| 3840 __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6)); | 3840 __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6)); |
| 3841 } | 3841 } |
| 3842 | 3842 |
| 3843 | 3843 |
| 3844 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, | 3844 void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, |
| 3845 Register hash, | 3845 Register hash, |
| 3846 Register character) { | 3846 Register character) { |
| 3847 ASSERT(!AreAliased(hash, character)); | 3847 DCHECK(!AreAliased(hash, character)); |
| 3848 | 3848 |
| 3849 // hash += character; | 3849 // hash += character; |
| 3850 __ Add(hash, hash, character); | 3850 __ Add(hash, hash, character); |
| 3851 | 3851 |
| 3852 // Compute hashes modulo 2^32 using a 32-bit W register. | 3852 // Compute hashes modulo 2^32 using a 32-bit W register. |
| 3853 Register hash_w = hash.W(); | 3853 Register hash_w = hash.W(); |
| 3854 | 3854 |
| 3855 // hash += hash << 10; | 3855 // hash += hash << 10; |
| 3856 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10)); | 3856 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 10)); |
| 3857 // hash ^= hash >> 6; | 3857 // hash ^= hash >> 6; |
| 3858 __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6)); | 3858 __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 6)); |
| 3859 } | 3859 } |
| 3860 | 3860 |
| 3861 | 3861 |
| 3862 void StringHelper::GenerateHashGetHash(MacroAssembler* masm, | 3862 void StringHelper::GenerateHashGetHash(MacroAssembler* masm, |
| 3863 Register hash, | 3863 Register hash, |
| 3864 Register scratch) { | 3864 Register scratch) { |
| 3865 // Compute hashes modulo 2^32 using a 32-bit W register. | 3865 // Compute hashes modulo 2^32 using a 32-bit W register. |
| 3866 Register hash_w = hash.W(); | 3866 Register hash_w = hash.W(); |
| 3867 Register scratch_w = scratch.W(); | 3867 Register scratch_w = scratch.W(); |
| 3868 ASSERT(!AreAliased(hash_w, scratch_w)); | 3868 DCHECK(!AreAliased(hash_w, scratch_w)); |
| 3869 | 3869 |
| 3870 // hash += hash << 3; | 3870 // hash += hash << 3; |
| 3871 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 3)); | 3871 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 3)); |
| 3872 // hash ^= hash >> 11; | 3872 // hash ^= hash >> 11; |
| 3873 __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 11)); | 3873 __ Eor(hash_w, hash_w, Operand(hash_w, LSR, 11)); |
| 3874 // hash += hash << 15; | 3874 // hash += hash << 15; |
| 3875 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 15)); | 3875 __ Add(hash_w, hash_w, Operand(hash_w, LSL, 15)); |
| 3876 | 3876 |
| 3877 __ Ands(hash_w, hash_w, String::kHashBitMask); | 3877 __ Ands(hash_w, hash_w, String::kHashBitMask); |
| 3878 | 3878 |
| (...skipping 253 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4132 generator.SkipSlow(masm, &runtime); | 4132 generator.SkipSlow(masm, &runtime); |
| 4133 } | 4133 } |
| 4134 | 4134 |
| 4135 | 4135 |
| 4136 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, | 4136 void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, |
| 4137 Register left, | 4137 Register left, |
| 4138 Register right, | 4138 Register right, |
| 4139 Register scratch1, | 4139 Register scratch1, |
| 4140 Register scratch2, | 4140 Register scratch2, |
| 4141 Register scratch3) { | 4141 Register scratch3) { |
| 4142 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3)); | 4142 DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3)); |
| 4143 Register result = x0; | 4143 Register result = x0; |
| 4144 Register left_length = scratch1; | 4144 Register left_length = scratch1; |
| 4145 Register right_length = scratch2; | 4145 Register right_length = scratch2; |
| 4146 | 4146 |
| 4147 // Compare lengths. If lengths differ, strings can't be equal. Lengths are | 4147 // Compare lengths. If lengths differ, strings can't be equal. Lengths are |
| 4148 // smis, and don't need to be untagged. | 4148 // smis, and don't need to be untagged. |
| 4149 Label strings_not_equal, check_zero_length; | 4149 Label strings_not_equal, check_zero_length; |
| 4150 __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset)); | 4150 __ Ldr(left_length, FieldMemOperand(left, String::kLengthOffset)); |
| 4151 __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset)); | 4151 __ Ldr(right_length, FieldMemOperand(right, String::kLengthOffset)); |
| 4152 __ Cmp(left_length, right_length); | 4152 __ Cmp(left_length, right_length); |
| (...skipping 22 matching lines...) Expand all Loading... |
| 4175 } | 4175 } |
| 4176 | 4176 |
| 4177 | 4177 |
| 4178 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, | 4178 void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, |
| 4179 Register left, | 4179 Register left, |
| 4180 Register right, | 4180 Register right, |
| 4181 Register scratch1, | 4181 Register scratch1, |
| 4182 Register scratch2, | 4182 Register scratch2, |
| 4183 Register scratch3, | 4183 Register scratch3, |
| 4184 Register scratch4) { | 4184 Register scratch4) { |
| 4185 ASSERT(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4)); | 4185 DCHECK(!AreAliased(left, right, scratch1, scratch2, scratch3, scratch4)); |
| 4186 Label result_not_equal, compare_lengths; | 4186 Label result_not_equal, compare_lengths; |
| 4187 | 4187 |
| 4188 // Find minimum length and length difference. | 4188 // Find minimum length and length difference. |
| 4189 Register length_delta = scratch3; | 4189 Register length_delta = scratch3; |
| 4190 __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset)); | 4190 __ Ldr(scratch1, FieldMemOperand(left, String::kLengthOffset)); |
| 4191 __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); | 4191 __ Ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); |
| 4192 __ Subs(length_delta, scratch1, scratch2); | 4192 __ Subs(length_delta, scratch1, scratch2); |
| 4193 | 4193 |
| 4194 Register min_length = scratch1; | 4194 Register min_length = scratch1; |
| 4195 __ Csel(min_length, scratch2, scratch1, gt); | 4195 __ Csel(min_length, scratch2, scratch1, gt); |
| 4196 __ Cbz(min_length, &compare_lengths); | 4196 __ Cbz(min_length, &compare_lengths); |
| 4197 | 4197 |
| 4198 // Compare loop. | 4198 // Compare loop. |
| 4199 GenerateAsciiCharsCompareLoop(masm, | 4199 GenerateAsciiCharsCompareLoop(masm, |
| 4200 left, right, min_length, scratch2, scratch4, | 4200 left, right, min_length, scratch2, scratch4, |
| 4201 &result_not_equal); | 4201 &result_not_equal); |
| 4202 | 4202 |
| 4203 // Compare lengths - strings up to min-length are equal. | 4203 // Compare lengths - strings up to min-length are equal. |
| 4204 __ Bind(&compare_lengths); | 4204 __ Bind(&compare_lengths); |
| 4205 | 4205 |
| 4206 ASSERT(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); | 4206 DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); |
| 4207 | 4207 |
| 4208 // Use length_delta as result if it's zero. | 4208 // Use length_delta as result if it's zero. |
| 4209 Register result = x0; | 4209 Register result = x0; |
| 4210 __ Subs(result, length_delta, 0); | 4210 __ Subs(result, length_delta, 0); |
| 4211 | 4211 |
| 4212 __ Bind(&result_not_equal); | 4212 __ Bind(&result_not_equal); |
| 4213 Register greater = x10; | 4213 Register greater = x10; |
| 4214 Register less = x11; | 4214 Register less = x11; |
| 4215 __ Mov(greater, Smi::FromInt(GREATER)); | 4215 __ Mov(greater, Smi::FromInt(GREATER)); |
| 4216 __ Mov(less, Smi::FromInt(LESS)); | 4216 __ Mov(less, Smi::FromInt(LESS)); |
| 4217 __ CmovX(result, greater, gt); | 4217 __ CmovX(result, greater, gt); |
| 4218 __ CmovX(result, less, lt); | 4218 __ CmovX(result, less, lt); |
| 4219 __ Ret(); | 4219 __ Ret(); |
| 4220 } | 4220 } |
| 4221 | 4221 |
| 4222 | 4222 |
| 4223 void StringCompareStub::GenerateAsciiCharsCompareLoop( | 4223 void StringCompareStub::GenerateAsciiCharsCompareLoop( |
| 4224 MacroAssembler* masm, | 4224 MacroAssembler* masm, |
| 4225 Register left, | 4225 Register left, |
| 4226 Register right, | 4226 Register right, |
| 4227 Register length, | 4227 Register length, |
| 4228 Register scratch1, | 4228 Register scratch1, |
| 4229 Register scratch2, | 4229 Register scratch2, |
| 4230 Label* chars_not_equal) { | 4230 Label* chars_not_equal) { |
| 4231 ASSERT(!AreAliased(left, right, length, scratch1, scratch2)); | 4231 DCHECK(!AreAliased(left, right, length, scratch1, scratch2)); |
| 4232 | 4232 |
| 4233 // Change index to run from -length to -1 by adding length to string | 4233 // Change index to run from -length to -1 by adding length to string |
| 4234 // start. This means that loop ends when index reaches zero, which | 4234 // start. This means that loop ends when index reaches zero, which |
| 4235 // doesn't need an additional compare. | 4235 // doesn't need an additional compare. |
| 4236 __ SmiUntag(length); | 4236 __ SmiUntag(length); |
| 4237 __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag); | 4237 __ Add(scratch1, length, SeqOneByteString::kHeaderSize - kHeapObjectTag); |
| 4238 __ Add(left, left, scratch1); | 4238 __ Add(left, left, scratch1); |
| 4239 __ Add(right, right, scratch1); | 4239 __ Add(right, right, scratch1); |
| 4240 | 4240 |
| 4241 Register index = length; | 4241 Register index = length; |
| (...skipping 119 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4361 InformIncrementalMarker(masm); | 4361 InformIncrementalMarker(masm); |
| 4362 regs_.Restore(masm); // Restore the extra scratch registers we used. | 4362 regs_.Restore(masm); // Restore the extra scratch registers we used. |
| 4363 __ Ret(); | 4363 __ Ret(); |
| 4364 } | 4364 } |
| 4365 | 4365 |
| 4366 | 4366 |
| 4367 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { | 4367 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { |
| 4368 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); | 4368 regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode_); |
| 4369 Register address = | 4369 Register address = |
| 4370 x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address(); | 4370 x0.Is(regs_.address()) ? regs_.scratch0() : regs_.address(); |
| 4371 ASSERT(!address.Is(regs_.object())); | 4371 DCHECK(!address.Is(regs_.object())); |
| 4372 ASSERT(!address.Is(x0)); | 4372 DCHECK(!address.Is(x0)); |
| 4373 __ Mov(address, regs_.address()); | 4373 __ Mov(address, regs_.address()); |
| 4374 __ Mov(x0, regs_.object()); | 4374 __ Mov(x0, regs_.object()); |
| 4375 __ Mov(x1, address); | 4375 __ Mov(x1, address); |
| 4376 __ Mov(x2, ExternalReference::isolate_address(isolate())); | 4376 __ Mov(x2, ExternalReference::isolate_address(isolate())); |
| 4377 | 4377 |
| 4378 AllowExternalCallThatCantCauseGC scope(masm); | 4378 AllowExternalCallThatCantCauseGC scope(masm); |
| 4379 ExternalReference function = | 4379 ExternalReference function = |
| 4380 ExternalReference::incremental_marking_record_write_function( | 4380 ExternalReference::incremental_marking_record_write_function( |
| 4381 isolate()); | 4381 isolate()); |
| 4382 __ CallCFunction(function, 3, 0); | 4382 __ CallCFunction(function, 3, 0); |
| (...skipping 219 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4602 | 4602 |
| 4603 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { | 4603 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { |
| 4604 if (masm->isolate()->function_entry_hook() != NULL) { | 4604 if (masm->isolate()->function_entry_hook() != NULL) { |
| 4605 ProfileEntryHookStub stub(masm->isolate()); | 4605 ProfileEntryHookStub stub(masm->isolate()); |
| 4606 Assembler::BlockConstPoolScope no_const_pools(masm); | 4606 Assembler::BlockConstPoolScope no_const_pools(masm); |
| 4607 DontEmitDebugCodeScope no_debug_code(masm); | 4607 DontEmitDebugCodeScope no_debug_code(masm); |
| 4608 Label entry_hook_call_start; | 4608 Label entry_hook_call_start; |
| 4609 __ Bind(&entry_hook_call_start); | 4609 __ Bind(&entry_hook_call_start); |
| 4610 __ Push(lr); | 4610 __ Push(lr); |
| 4611 __ CallStub(&stub); | 4611 __ CallStub(&stub); |
| 4612 ASSERT(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) == | 4612 DCHECK(masm->SizeOfCodeGeneratedSince(&entry_hook_call_start) == |
| 4613 GetProfileEntryHookCallSize(masm)); | 4613 GetProfileEntryHookCallSize(masm)); |
| 4614 | 4614 |
| 4615 __ Pop(lr); | 4615 __ Pop(lr); |
| 4616 } | 4616 } |
| 4617 } | 4617 } |
| 4618 | 4618 |
| 4619 | 4619 |
| 4620 void ProfileEntryHookStub::Generate(MacroAssembler* masm) { | 4620 void ProfileEntryHookStub::Generate(MacroAssembler* masm) { |
| 4621 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm); | 4621 MacroAssembler::NoUseRealAbortsScope no_use_real_aborts(masm); |
| 4622 | 4622 |
| 4623 // Save all kCallerSaved registers (including lr), since this can be called | 4623 // Save all kCallerSaved registers (including lr), since this can be called |
| 4624 // from anywhere. | 4624 // from anywhere. |
| 4625 // TODO(jbramley): What about FP registers? | 4625 // TODO(jbramley): What about FP registers? |
| 4626 __ PushCPURegList(kCallerSaved); | 4626 __ PushCPURegList(kCallerSaved); |
| 4627 ASSERT(kCallerSaved.IncludesAliasOf(lr)); | 4627 DCHECK(kCallerSaved.IncludesAliasOf(lr)); |
| 4628 const int kNumSavedRegs = kCallerSaved.Count(); | 4628 const int kNumSavedRegs = kCallerSaved.Count(); |
| 4629 | 4629 |
| 4630 // Compute the function's address as the first argument. | 4630 // Compute the function's address as the first argument. |
| 4631 __ Sub(x0, lr, GetProfileEntryHookCallSize(masm)); | 4631 __ Sub(x0, lr, GetProfileEntryHookCallSize(masm)); |
| 4632 | 4632 |
| 4633 #if V8_HOST_ARCH_ARM64 | 4633 #if V8_HOST_ARCH_ARM64 |
| 4634 uintptr_t entry_hook = | 4634 uintptr_t entry_hook = |
| 4635 reinterpret_cast<uintptr_t>(isolate()->function_entry_hook()); | 4635 reinterpret_cast<uintptr_t>(isolate()->function_entry_hook()); |
| 4636 __ Mov(x10, entry_hook); | 4636 __ Mov(x10, entry_hook); |
| 4637 #else | 4637 #else |
| (...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4678 __ AssertFPCRState(); | 4678 __ AssertFPCRState(); |
| 4679 __ Ret(); | 4679 __ Ret(); |
| 4680 | 4680 |
| 4681 __ SetStackPointer(old_stack_pointer); | 4681 __ SetStackPointer(old_stack_pointer); |
| 4682 } | 4682 } |
| 4683 | 4683 |
| 4684 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, | 4684 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, |
| 4685 Register target) { | 4685 Register target) { |
| 4686 // Make sure the caller configured the stack pointer (see comment in | 4686 // Make sure the caller configured the stack pointer (see comment in |
| 4687 // DirectCEntryStub::Generate). | 4687 // DirectCEntryStub::Generate). |
| 4688 ASSERT(csp.Is(__ StackPointer())); | 4688 DCHECK(csp.Is(__ StackPointer())); |
| 4689 | 4689 |
| 4690 intptr_t code = | 4690 intptr_t code = |
| 4691 reinterpret_cast<intptr_t>(GetCode().location()); | 4691 reinterpret_cast<intptr_t>(GetCode().location()); |
| 4692 __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET)); | 4692 __ Mov(lr, Operand(code, RelocInfo::CODE_TARGET)); |
| 4693 __ Mov(x10, target); | 4693 __ Mov(x10, target); |
| 4694 // Branch to the stub. | 4694 // Branch to the stub. |
| 4695 __ Blr(lr); | 4695 __ Blr(lr); |
| 4696 } | 4696 } |
| 4697 | 4697 |
| 4698 | 4698 |
| 4699 // Probe the name dictionary in the 'elements' register. | 4699 // Probe the name dictionary in the 'elements' register. |
| 4700 // Jump to the 'done' label if a property with the given name is found. | 4700 // Jump to the 'done' label if a property with the given name is found. |
| 4701 // Jump to the 'miss' label otherwise. | 4701 // Jump to the 'miss' label otherwise. |
| 4702 // | 4702 // |
| 4703 // If lookup was successful 'scratch2' will be equal to elements + 4 * index. | 4703 // If lookup was successful 'scratch2' will be equal to elements + 4 * index. |
| 4704 // 'elements' and 'name' registers are preserved on miss. | 4704 // 'elements' and 'name' registers are preserved on miss. |
| 4705 void NameDictionaryLookupStub::GeneratePositiveLookup( | 4705 void NameDictionaryLookupStub::GeneratePositiveLookup( |
| 4706 MacroAssembler* masm, | 4706 MacroAssembler* masm, |
| 4707 Label* miss, | 4707 Label* miss, |
| 4708 Label* done, | 4708 Label* done, |
| 4709 Register elements, | 4709 Register elements, |
| 4710 Register name, | 4710 Register name, |
| 4711 Register scratch1, | 4711 Register scratch1, |
| 4712 Register scratch2) { | 4712 Register scratch2) { |
| 4713 ASSERT(!AreAliased(elements, name, scratch1, scratch2)); | 4713 DCHECK(!AreAliased(elements, name, scratch1, scratch2)); |
| 4714 | 4714 |
| 4715 // Assert that name contains a string. | 4715 // Assert that name contains a string. |
| 4716 __ AssertName(name); | 4716 __ AssertName(name); |
| 4717 | 4717 |
| 4718 // Compute the capacity mask. | 4718 // Compute the capacity mask. |
| 4719 __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset)); | 4719 __ Ldrsw(scratch1, UntagSmiFieldMemOperand(elements, kCapacityOffset)); |
| 4720 __ Sub(scratch1, scratch1, 1); | 4720 __ Sub(scratch1, scratch1, 1); |
| 4721 | 4721 |
| 4722 // Generate an unrolled loop that performs a few probes before giving up. | 4722 // Generate an unrolled loop that performs a few probes before giving up. |
| 4723 for (int i = 0; i < kInlinedProbes; i++) { | 4723 for (int i = 0; i < kInlinedProbes; i++) { |
| 4724 // Compute the masked index: (hash + i + i * i) & mask. | 4724 // Compute the masked index: (hash + i + i * i) & mask. |
| 4725 __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); | 4725 __ Ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); |
| 4726 if (i > 0) { | 4726 if (i > 0) { |
| 4727 // Add the probe offset (i + i * i) left shifted to avoid right shifting | 4727 // Add the probe offset (i + i * i) left shifted to avoid right shifting |
| 4728 // the hash in a separate instruction. The value hash + i + i * i is right | 4728 // the hash in a separate instruction. The value hash + i + i * i is right |
| 4729 // shifted in the following and instruction. | 4729 // shifted in the following and instruction. |
| 4730 ASSERT(NameDictionary::GetProbeOffset(i) < | 4730 DCHECK(NameDictionary::GetProbeOffset(i) < |
| 4731 1 << (32 - Name::kHashFieldOffset)); | 4731 1 << (32 - Name::kHashFieldOffset)); |
| 4732 __ Add(scratch2, scratch2, Operand( | 4732 __ Add(scratch2, scratch2, Operand( |
| 4733 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); | 4733 NameDictionary::GetProbeOffset(i) << Name::kHashShift)); |
| 4734 } | 4734 } |
| 4735 __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift)); | 4735 __ And(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift)); |
| 4736 | 4736 |
| 4737 // Scale the index by multiplying by the element size. | 4737 // Scale the index by multiplying by the element size. |
| 4738 ASSERT(NameDictionary::kEntrySize == 3); | 4738 DCHECK(NameDictionary::kEntrySize == 3); |
| 4739 __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1)); | 4739 __ Add(scratch2, scratch2, Operand(scratch2, LSL, 1)); |
| 4740 | 4740 |
| 4741 // Check if the key is identical to the name. | 4741 // Check if the key is identical to the name. |
| 4742 UseScratchRegisterScope temps(masm); | 4742 UseScratchRegisterScope temps(masm); |
| 4743 Register scratch3 = temps.AcquireX(); | 4743 Register scratch3 = temps.AcquireX(); |
| 4744 __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2)); | 4744 __ Add(scratch2, elements, Operand(scratch2, LSL, kPointerSizeLog2)); |
| 4745 __ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset)); | 4745 __ Ldr(scratch3, FieldMemOperand(scratch2, kElementsStartOffset)); |
| 4746 __ Cmp(name, scratch3); | 4746 __ Cmp(name, scratch3); |
| 4747 __ B(eq, done); | 4747 __ B(eq, done); |
| 4748 } | 4748 } |
| 4749 | 4749 |
| 4750 // The inlined probes didn't find the entry. | 4750 // The inlined probes didn't find the entry. |
| 4751 // Call the complete stub to scan the whole dictionary. | 4751 // Call the complete stub to scan the whole dictionary. |
| 4752 | 4752 |
| 4753 CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6); | 4753 CPURegList spill_list(CPURegister::kRegister, kXRegSizeInBits, 0, 6); |
| 4754 spill_list.Combine(lr); | 4754 spill_list.Combine(lr); |
| 4755 spill_list.Remove(scratch1); | 4755 spill_list.Remove(scratch1); |
| 4756 spill_list.Remove(scratch2); | 4756 spill_list.Remove(scratch2); |
| 4757 | 4757 |
| 4758 __ PushCPURegList(spill_list); | 4758 __ PushCPURegList(spill_list); |
| 4759 | 4759 |
| 4760 if (name.is(x0)) { | 4760 if (name.is(x0)) { |
| 4761 ASSERT(!elements.is(x1)); | 4761 DCHECK(!elements.is(x1)); |
| 4762 __ Mov(x1, name); | 4762 __ Mov(x1, name); |
| 4763 __ Mov(x0, elements); | 4763 __ Mov(x0, elements); |
| 4764 } else { | 4764 } else { |
| 4765 __ Mov(x0, elements); | 4765 __ Mov(x0, elements); |
| 4766 __ Mov(x1, name); | 4766 __ Mov(x1, name); |
| 4767 } | 4767 } |
| 4768 | 4768 |
| 4769 Label not_found; | 4769 Label not_found; |
| 4770 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP); | 4770 NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP); |
| 4771 __ CallStub(&stub); | 4771 __ CallStub(&stub); |
| 4772 __ Cbz(x0, ¬_found); | 4772 __ Cbz(x0, ¬_found); |
| 4773 __ Mov(scratch2, x2); // Move entry index into scratch2. | 4773 __ Mov(scratch2, x2); // Move entry index into scratch2. |
| 4774 __ PopCPURegList(spill_list); | 4774 __ PopCPURegList(spill_list); |
| 4775 __ B(done); | 4775 __ B(done); |
| 4776 | 4776 |
| 4777 __ Bind(¬_found); | 4777 __ Bind(¬_found); |
| 4778 __ PopCPURegList(spill_list); | 4778 __ PopCPURegList(spill_list); |
| 4779 __ B(miss); | 4779 __ B(miss); |
| 4780 } | 4780 } |
| 4781 | 4781 |
| 4782 | 4782 |
| 4783 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, | 4783 void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, |
| 4784 Label* miss, | 4784 Label* miss, |
| 4785 Label* done, | 4785 Label* done, |
| 4786 Register receiver, | 4786 Register receiver, |
| 4787 Register properties, | 4787 Register properties, |
| 4788 Handle<Name> name, | 4788 Handle<Name> name, |
| 4789 Register scratch0) { | 4789 Register scratch0) { |
| 4790 ASSERT(!AreAliased(receiver, properties, scratch0)); | 4790 DCHECK(!AreAliased(receiver, properties, scratch0)); |
| 4791 ASSERT(name->IsUniqueName()); | 4791 DCHECK(name->IsUniqueName()); |
| 4792 // If names of slots in range from 1 to kProbes - 1 for the hash value are | 4792 // If names of slots in range from 1 to kProbes - 1 for the hash value are |
| 4793 // not equal to the name and kProbes-th slot is not used (its name is the | 4793 // not equal to the name and kProbes-th slot is not used (its name is the |
| 4794 // undefined value), it guarantees the hash table doesn't contain the | 4794 // undefined value), it guarantees the hash table doesn't contain the |
| 4795 // property. It's true even if some slots represent deleted properties | 4795 // property. It's true even if some slots represent deleted properties |
| 4796 // (their names are the hole value). | 4796 // (their names are the hole value). |
| 4797 for (int i = 0; i < kInlinedProbes; i++) { | 4797 for (int i = 0; i < kInlinedProbes; i++) { |
| 4798 // scratch0 points to properties hash. | 4798 // scratch0 points to properties hash. |
| 4799 // Compute the masked index: (hash + i + i * i) & mask. | 4799 // Compute the masked index: (hash + i + i * i) & mask. |
| 4800 Register index = scratch0; | 4800 Register index = scratch0; |
| 4801 // Capacity is smi 2^n. | 4801 // Capacity is smi 2^n. |
| 4802 __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset)); | 4802 __ Ldrsw(index, UntagSmiFieldMemOperand(properties, kCapacityOffset)); |
| 4803 __ Sub(index, index, 1); | 4803 __ Sub(index, index, 1); |
| 4804 __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i)); | 4804 __ And(index, index, name->Hash() + NameDictionary::GetProbeOffset(i)); |
| 4805 | 4805 |
| 4806 // Scale the index by multiplying by the entry size. | 4806 // Scale the index by multiplying by the entry size. |
| 4807 ASSERT(NameDictionary::kEntrySize == 3); | 4807 DCHECK(NameDictionary::kEntrySize == 3); |
| 4808 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3. | 4808 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3. |
| 4809 | 4809 |
| 4810 Register entity_name = scratch0; | 4810 Register entity_name = scratch0; |
| 4811 // Having undefined at this place means the name is not contained. | 4811 // Having undefined at this place means the name is not contained. |
| 4812 Register tmp = index; | 4812 Register tmp = index; |
| 4813 __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2)); | 4813 __ Add(tmp, properties, Operand(index, LSL, kPointerSizeLog2)); |
| 4814 __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); | 4814 __ Ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); |
| 4815 | 4815 |
| 4816 __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done); | 4816 __ JumpIfRoot(entity_name, Heap::kUndefinedValueRootIndex, done); |
| 4817 | 4817 |
| (...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4878 __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset)); | 4878 __ Ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset)); |
| 4879 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); | 4879 __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); |
| 4880 | 4880 |
| 4881 for (int i = kInlinedProbes; i < kTotalProbes; i++) { | 4881 for (int i = kInlinedProbes; i < kTotalProbes; i++) { |
| 4882 // Compute the masked index: (hash + i + i * i) & mask. | 4882 // Compute the masked index: (hash + i + i * i) & mask. |
| 4883 // Capacity is smi 2^n. | 4883 // Capacity is smi 2^n. |
| 4884 if (i > 0) { | 4884 if (i > 0) { |
| 4885 // Add the probe offset (i + i * i) left shifted to avoid right shifting | 4885 // Add the probe offset (i + i * i) left shifted to avoid right shifting |
| 4886 // the hash in a separate instruction. The value hash + i + i * i is right | 4886 // the hash in a separate instruction. The value hash + i + i * i is right |
| 4887 // shifted in the following and instruction. | 4887 // shifted in the following and instruction. |
| 4888 ASSERT(NameDictionary::GetProbeOffset(i) < | 4888 DCHECK(NameDictionary::GetProbeOffset(i) < |
| 4889 1 << (32 - Name::kHashFieldOffset)); | 4889 1 << (32 - Name::kHashFieldOffset)); |
| 4890 __ Add(index, hash, | 4890 __ Add(index, hash, |
| 4891 NameDictionary::GetProbeOffset(i) << Name::kHashShift); | 4891 NameDictionary::GetProbeOffset(i) << Name::kHashShift); |
| 4892 } else { | 4892 } else { |
| 4893 __ Mov(index, hash); | 4893 __ Mov(index, hash); |
| 4894 } | 4894 } |
| 4895 __ And(index, mask, Operand(index, LSR, Name::kHashShift)); | 4895 __ And(index, mask, Operand(index, LSR, Name::kHashShift)); |
| 4896 | 4896 |
| 4897 // Scale the index by multiplying by the entry size. | 4897 // Scale the index by multiplying by the entry size. |
| 4898 ASSERT(NameDictionary::kEntrySize == 3); | 4898 DCHECK(NameDictionary::kEntrySize == 3); |
| 4899 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3. | 4899 __ Add(index, index, Operand(index, LSL, 1)); // index *= 3. |
| 4900 | 4900 |
| 4901 __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2)); | 4901 __ Add(index, dictionary, Operand(index, LSL, kPointerSizeLog2)); |
| 4902 __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset)); | 4902 __ Ldr(entry_key, FieldMemOperand(index, kElementsStartOffset)); |
| 4903 | 4903 |
| 4904 // Having undefined at this place means the name is not contained. | 4904 // Having undefined at this place means the name is not contained. |
| 4905 __ Cmp(entry_key, undefined); | 4905 __ Cmp(entry_key, undefined); |
| 4906 __ B(eq, ¬_in_dictionary); | 4906 __ B(eq, ¬_in_dictionary); |
| 4907 | 4907 |
| 4908 // Stop if found the property. | 4908 // Stop if found the property. |
| (...skipping 415 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5324 // not controlled by GC. | 5324 // not controlled by GC. |
| 5325 const int kApiStackSpace = 4; | 5325 const int kApiStackSpace = 4; |
| 5326 | 5326 |
| 5327 // Allocate space for CallApiFunctionAndReturn can store some scratch | 5327 // Allocate space for CallApiFunctionAndReturn can store some scratch |
| 5328 // registeres on the stack. | 5328 // registeres on the stack. |
| 5329 const int kCallApiFunctionSpillSpace = 4; | 5329 const int kCallApiFunctionSpillSpace = 4; |
| 5330 | 5330 |
| 5331 FrameScope frame_scope(masm, StackFrame::MANUAL); | 5331 FrameScope frame_scope(masm, StackFrame::MANUAL); |
| 5332 __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace); | 5332 __ EnterExitFrame(false, x10, kApiStackSpace + kCallApiFunctionSpillSpace); |
| 5333 | 5333 |
| 5334 ASSERT(!AreAliased(x0, api_function_address)); | 5334 DCHECK(!AreAliased(x0, api_function_address)); |
| 5335 // x0 = FunctionCallbackInfo& | 5335 // x0 = FunctionCallbackInfo& |
| 5336 // Arguments is after the return address. | 5336 // Arguments is after the return address. |
| 5337 __ Add(x0, masm->StackPointer(), 1 * kPointerSize); | 5337 __ Add(x0, masm->StackPointer(), 1 * kPointerSize); |
| 5338 // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_ | 5338 // FunctionCallbackInfo::implicit_args_ and FunctionCallbackInfo::values_ |
| 5339 __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize)); | 5339 __ Add(x10, args, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize)); |
| 5340 __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize)); | 5340 __ Stp(args, x10, MemOperand(x0, 0 * kPointerSize)); |
| 5341 // FunctionCallbackInfo::length_ = argc and | 5341 // FunctionCallbackInfo::length_ = argc and |
| 5342 // FunctionCallbackInfo::is_construct_call = 0 | 5342 // FunctionCallbackInfo::is_construct_call = 0 |
| 5343 __ Mov(x10, argc); | 5343 __ Mov(x10, argc); |
| 5344 __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize)); | 5344 __ Stp(x10, xzr, MemOperand(x0, 2 * kPointerSize)); |
| (...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5409 MemOperand(fp, 6 * kPointerSize), | 5409 MemOperand(fp, 6 * kPointerSize), |
| 5410 NULL); | 5410 NULL); |
| 5411 } | 5411 } |
| 5412 | 5412 |
| 5413 | 5413 |
| 5414 #undef __ | 5414 #undef __ |
| 5415 | 5415 |
| 5416 } } // namespace v8::internal | 5416 } } // namespace v8::internal |
| 5417 | 5417 |
| 5418 #endif // V8_TARGET_ARCH_ARM64 | 5418 #endif // V8_TARGET_ARCH_ARM64 |
| OLD | NEW |