| OLD | NEW |
| 1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. | 11 // with the distribution. |
| (...skipping 22 matching lines...) Expand all Loading... |
| 34 #include "regexp-macro-assembler.h" | 34 #include "regexp-macro-assembler.h" |
| 35 | 35 |
| 36 namespace v8 { | 36 namespace v8 { |
| 37 namespace internal { | 37 namespace internal { |
| 38 | 38 |
| 39 | 39 |
| 40 #define __ ACCESS_MASM(masm) | 40 #define __ ACCESS_MASM(masm) |
| 41 | 41 |
| 42 static void EmitIdenticalObjectComparison(MacroAssembler* masm, | 42 static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
| 43 Label* slow, | 43 Label* slow, |
| 44 Condition cc, | 44 Condition cond, |
| 45 bool never_nan_nan); | 45 bool never_nan_nan); |
| 46 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 46 static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
| 47 Register lhs, | 47 Register lhs, |
| 48 Register rhs, | 48 Register rhs, |
| 49 Label* lhs_not_nan, | 49 Label* lhs_not_nan, |
| 50 Label* slow, | 50 Label* slow, |
| 51 bool strict); | 51 bool strict); |
| 52 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc); | 52 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond); |
| 53 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 53 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
| 54 Register lhs, | 54 Register lhs, |
| 55 Register rhs); | 55 Register rhs); |
| 56 | 56 |
| 57 | 57 |
| 58 void ToNumberStub::Generate(MacroAssembler* masm) { |
| 59 // The ToNumber stub takes one argument in eax. |
| 60 Label check_heap_number, call_builtin; |
| 61 __ tst(r0, Operand(kSmiTagMask)); |
| 62 __ b(ne, &check_heap_number); |
| 63 __ Ret(); |
| 64 |
| 65 __ bind(&check_heap_number); |
| 66 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); |
| 67 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
| 68 __ cmp(r1, ip); |
| 69 __ b(ne, &call_builtin); |
| 70 __ Ret(); |
| 71 |
| 72 __ bind(&call_builtin); |
| 73 __ push(r0); |
| 74 __ InvokeBuiltin(Builtins::TO_NUMBER, JUMP_JS); |
| 75 } |
| 76 |
| 77 |
| 58 void FastNewClosureStub::Generate(MacroAssembler* masm) { | 78 void FastNewClosureStub::Generate(MacroAssembler* masm) { |
| 59 // Create a new closure from the given function info in new | 79 // Create a new closure from the given function info in new |
| 60 // space. Set the context to the current context in cp. | 80 // space. Set the context to the current context in cp. |
| 61 Label gc; | 81 Label gc; |
| 62 | 82 |
| 63 // Pop the function info from the stack. | 83 // Pop the function info from the stack. |
| 64 __ pop(r3); | 84 __ pop(r3); |
| 65 | 85 |
| 66 // Attempt to allocate new JSFunction in new space. | 86 // Attempt to allocate new JSFunction in new space. |
| 67 __ AllocateInNewSpace(JSFunction::kSize, | 87 __ AllocateInNewSpace(JSFunction::kSize, |
| (...skipping 269 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 337 // Compute lower part of fraction (last 12 bits). | 357 // Compute lower part of fraction (last 12 bits). |
| 338 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); | 358 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); |
| 339 // And the top (top 20 bits). | 359 // And the top (top 20 bits). |
| 340 __ orr(exponent, | 360 __ orr(exponent, |
| 341 exponent, | 361 exponent, |
| 342 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); | 362 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); |
| 343 __ Ret(); | 363 __ Ret(); |
| 344 } | 364 } |
| 345 | 365 |
| 346 | 366 |
| 367 class FloatingPointHelper : public AllStatic { |
| 368 public: |
| 369 |
| 370 enum Destination { |
| 371 kVFPRegisters, |
| 372 kCoreRegisters |
| 373 }; |
| 374 |
| 375 |
| 376 // Loads smis from r0 and r1 (right and left in binary operations) into |
| 377 // floating point registers. Depending on the destination the values ends up |
| 378 // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is |
| 379 // floating point registers VFP3 must be supported. If core registers are |
| 380 // requested when VFP3 is supported d6 and d7 will be scratched. |
| 381 static void LoadSmis(MacroAssembler* masm, |
| 382 Destination destination, |
| 383 Register scratch1, |
| 384 Register scratch2); |
| 385 |
| 386 // Loads objects from r0 and r1 (right and left in binary operations) into |
| 387 // floating point registers. Depending on the destination the values ends up |
| 388 // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is |
| 389 // floating point registers VFP3 must be supported. If core registers are |
| 390 // requested when VFP3 is supported d6 and d7 will still be scratched. If |
| 391 // either r0 or r1 is not a number (not smi and not heap number object) the |
| 392 // not_number label is jumped to with r0 and r1 intact. |
| 393 static void LoadOperands(MacroAssembler* masm, |
| 394 FloatingPointHelper::Destination destination, |
| 395 Register heap_number_map, |
| 396 Register scratch1, |
| 397 Register scratch2, |
| 398 Label* not_number); |
| 399 |
| 400 // Loads the number from object into dst as a 32-bit integer if possible. If |
| 401 // the object is not a 32-bit integer control continues at the label |
| 402 // not_int32. If VFP is supported double_scratch is used but not scratch2. |
| 403 static void LoadNumberAsInteger(MacroAssembler* masm, |
| 404 Register object, |
| 405 Register dst, |
| 406 Register heap_number_map, |
| 407 Register scratch1, |
| 408 Register scratch2, |
| 409 DwVfpRegister double_scratch, |
| 410 Label* not_int32); |
| 411 |
| 412 private: |
| 413 static void LoadNumber(MacroAssembler* masm, |
| 414 FloatingPointHelper::Destination destination, |
| 415 Register object, |
| 416 DwVfpRegister dst, |
| 417 Register dst1, |
| 418 Register dst2, |
| 419 Register heap_number_map, |
| 420 Register scratch1, |
| 421 Register scratch2, |
| 422 Label* not_number); |
| 423 }; |
| 424 |
| 425 |
| 426 void FloatingPointHelper::LoadSmis(MacroAssembler* masm, |
| 427 FloatingPointHelper::Destination destination, |
| 428 Register scratch1, |
| 429 Register scratch2) { |
| 430 if (CpuFeatures::IsSupported(VFP3)) { |
| 431 CpuFeatures::Scope scope(VFP3); |
| 432 __ mov(scratch1, Operand(r0, ASR, kSmiTagSize)); |
| 433 __ vmov(d7.high(), scratch1); |
| 434 __ vcvt_f64_s32(d7, d7.high()); |
| 435 __ mov(scratch1, Operand(r1, ASR, kSmiTagSize)); |
| 436 __ vmov(d6.high(), scratch1); |
| 437 __ vcvt_f64_s32(d6, d6.high()); |
| 438 if (destination == kCoreRegisters) { |
| 439 __ vmov(r2, r3, d7); |
| 440 __ vmov(r0, r1, d6); |
| 441 } |
| 442 } else { |
| 443 ASSERT(destination == kCoreRegisters); |
| 444 // Write Smi from r0 to r3 and r2 in double format. |
| 445 __ mov(scratch1, Operand(r0)); |
| 446 ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2); |
| 447 __ push(lr); |
| 448 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET); |
| 449 // Write Smi from r1 to r1 and r0 in double format. r9 is scratch. |
| 450 __ mov(scratch1, Operand(r1)); |
| 451 ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2); |
| 452 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); |
| 453 __ pop(lr); |
| 454 } |
| 455 } |
| 456 |
| 457 |
| 458 void FloatingPointHelper::LoadOperands( |
| 459 MacroAssembler* masm, |
| 460 FloatingPointHelper::Destination destination, |
| 461 Register heap_number_map, |
| 462 Register scratch1, |
| 463 Register scratch2, |
| 464 Label* slow) { |
| 465 |
| 466 // Load right operand (r0) to d6 or r2/r3. |
| 467 LoadNumber(masm, destination, |
| 468 r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow); |
| 469 |
| 470 // Load left operand (r1) to d7 or r0/r1. |
| 471 LoadNumber(masm, destination, |
| 472 r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow); |
| 473 } |
| 474 |
| 475 |
| 476 void FloatingPointHelper::LoadNumber(MacroAssembler* masm, |
| 477 Destination destination, |
| 478 Register object, |
| 479 DwVfpRegister dst, |
| 480 Register dst1, |
| 481 Register dst2, |
| 482 Register heap_number_map, |
| 483 Register scratch1, |
| 484 Register scratch2, |
| 485 Label* not_number) { |
| 486 if (FLAG_debug_code) { |
| 487 __ AbortIfNotRootValue(heap_number_map, |
| 488 Heap::kHeapNumberMapRootIndex, |
| 489 "HeapNumberMap register clobbered."); |
| 490 } |
| 491 |
| 492 Label is_smi, done; |
| 493 |
| 494 __ JumpIfSmi(object, &is_smi); |
| 495 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number); |
| 496 |
| 497 // Handle loading a double from a heap number. |
| 498 if (CpuFeatures::IsSupported(VFP3) && destination == kVFPRegisters) { |
| 499 CpuFeatures::Scope scope(VFP3); |
| 500 // Load the double from tagged HeapNumber to double register. |
| 501 __ sub(scratch1, object, Operand(kHeapObjectTag)); |
| 502 __ vldr(dst, scratch1, HeapNumber::kValueOffset); |
| 503 } else { |
| 504 ASSERT(destination == kCoreRegisters); |
| 505 // Load the double from heap number to dst1 and dst2 in double format. |
| 506 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset)); |
| 507 } |
| 508 __ jmp(&done); |
| 509 |
| 510 // Handle loading a double from a smi. |
| 511 __ bind(&is_smi); |
| 512 if (CpuFeatures::IsSupported(VFP3)) { |
| 513 CpuFeatures::Scope scope(VFP3); |
| 514 // Convert smi to double using VFP instructions. |
| 515 __ SmiUntag(scratch1, object); |
| 516 __ vmov(dst.high(), scratch1); |
| 517 __ vcvt_f64_s32(dst, dst.high()); |
| 518 if (destination == kCoreRegisters) { |
| 519 // Load the converted smi to dst1 and dst2 in double format. |
| 520 __ vmov(dst1, dst2, dst); |
| 521 } |
| 522 } else { |
| 523 ASSERT(destination == kCoreRegisters); |
| 524 // Write smi to dst1 and dst2 double format. |
| 525 __ mov(scratch1, Operand(object)); |
| 526 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2); |
| 527 __ push(lr); |
| 528 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET); |
| 529 __ pop(lr); |
| 530 } |
| 531 |
| 532 __ bind(&done); |
| 533 } |
| 534 |
| 535 |
| 536 void FloatingPointHelper::LoadNumberAsInteger(MacroAssembler* masm, |
| 537 Register object, |
| 538 Register dst, |
| 539 Register heap_number_map, |
| 540 Register scratch1, |
| 541 Register scratch2, |
| 542 DwVfpRegister double_scratch, |
| 543 Label* not_int32) { |
| 544 if (FLAG_debug_code) { |
| 545 __ AbortIfNotRootValue(heap_number_map, |
| 546 Heap::kHeapNumberMapRootIndex, |
| 547 "HeapNumberMap register clobbered."); |
| 548 } |
| 549 Label is_smi, done; |
| 550 __ JumpIfSmi(object, &is_smi); |
| 551 __ ldr(scratch1, FieldMemOperand(object, HeapNumber::kMapOffset)); |
| 552 __ cmp(scratch1, heap_number_map); |
| 553 __ b(ne, not_int32); |
| 554 __ ConvertToInt32( |
| 555 object, dst, scratch1, scratch2, double_scratch, not_int32); |
| 556 __ jmp(&done); |
| 557 __ bind(&is_smi); |
| 558 __ SmiUntag(dst, object); |
| 559 __ bind(&done); |
| 560 } |
| 561 |
| 562 |
| 563 |
| 347 // See comment for class. | 564 // See comment for class. |
| 348 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { | 565 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { |
| 349 Label max_negative_int; | 566 Label max_negative_int; |
| 350 // the_int_ has the answer which is a signed int32 but not a Smi. | 567 // the_int_ has the answer which is a signed int32 but not a Smi. |
| 351 // We test for the special value that has a different exponent. This test | 568 // We test for the special value that has a different exponent. This test |
| 352 // has the neat side effect of setting the flags according to the sign. | 569 // has the neat side effect of setting the flags according to the sign. |
| 353 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); | 570 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); |
| 354 __ cmp(the_int_, Operand(0x80000000u)); | 571 __ cmp(the_int_, Operand(0x80000000u)); |
| 355 __ b(eq, &max_negative_int); | 572 __ b(eq, &max_negative_int); |
| 356 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. | 573 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 388 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); | 605 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); |
| 389 __ Ret(); | 606 __ Ret(); |
| 390 } | 607 } |
| 391 | 608 |
| 392 | 609 |
| 393 // Handle the case where the lhs and rhs are the same object. | 610 // Handle the case where the lhs and rhs are the same object. |
| 394 // Equality is almost reflexive (everything but NaN), so this is a test | 611 // Equality is almost reflexive (everything but NaN), so this is a test |
| 395 // for "identity and not NaN". | 612 // for "identity and not NaN". |
| 396 static void EmitIdenticalObjectComparison(MacroAssembler* masm, | 613 static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
| 397 Label* slow, | 614 Label* slow, |
| 398 Condition cc, | 615 Condition cond, |
| 399 bool never_nan_nan) { | 616 bool never_nan_nan) { |
| 400 Label not_identical; | 617 Label not_identical; |
| 401 Label heap_number, return_equal; | 618 Label heap_number, return_equal; |
| 402 __ cmp(r0, r1); | 619 __ cmp(r0, r1); |
| 403 __ b(ne, ¬_identical); | 620 __ b(ne, ¬_identical); |
| 404 | 621 |
| 405 // The two objects are identical. If we know that one of them isn't NaN then | 622 // The two objects are identical. If we know that one of them isn't NaN then |
| 406 // we now know they test equal. | 623 // we now know they test equal. |
| 407 if (cc != eq || !never_nan_nan) { | 624 if (cond != eq || !never_nan_nan) { |
| 408 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), | 625 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), |
| 409 // so we do the second best thing - test it ourselves. | 626 // so we do the second best thing - test it ourselves. |
| 410 // They are both equal and they are not both Smis so both of them are not | 627 // They are both equal and they are not both Smis so both of them are not |
| 411 // Smis. If it's not a heap number, then return equal. | 628 // Smis. If it's not a heap number, then return equal. |
| 412 if (cc == lt || cc == gt) { | 629 if (cond == lt || cond == gt) { |
| 413 __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE); | 630 __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE); |
| 414 __ b(ge, slow); | 631 __ b(ge, slow); |
| 415 } else { | 632 } else { |
| 416 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); | 633 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); |
| 417 __ b(eq, &heap_number); | 634 __ b(eq, &heap_number); |
| 418 // Comparing JS objects with <=, >= is complicated. | 635 // Comparing JS objects with <=, >= is complicated. |
| 419 if (cc != eq) { | 636 if (cond != eq) { |
| 420 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); | 637 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); |
| 421 __ b(ge, slow); | 638 __ b(ge, slow); |
| 422 // Normally here we fall through to return_equal, but undefined is | 639 // Normally here we fall through to return_equal, but undefined is |
| 423 // special: (undefined == undefined) == true, but | 640 // special: (undefined == undefined) == true, but |
| 424 // (undefined <= undefined) == false! See ECMAScript 11.8.5. | 641 // (undefined <= undefined) == false! See ECMAScript 11.8.5. |
| 425 if (cc == le || cc == ge) { | 642 if (cond == le || cond == ge) { |
| 426 __ cmp(r4, Operand(ODDBALL_TYPE)); | 643 __ cmp(r4, Operand(ODDBALL_TYPE)); |
| 427 __ b(ne, &return_equal); | 644 __ b(ne, &return_equal); |
| 428 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); | 645 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); |
| 429 __ cmp(r0, r2); | 646 __ cmp(r0, r2); |
| 430 __ b(ne, &return_equal); | 647 __ b(ne, &return_equal); |
| 431 if (cc == le) { | 648 if (cond == le) { |
| 432 // undefined <= undefined should fail. | 649 // undefined <= undefined should fail. |
| 433 __ mov(r0, Operand(GREATER)); | 650 __ mov(r0, Operand(GREATER)); |
| 434 } else { | 651 } else { |
| 435 // undefined >= undefined should fail. | 652 // undefined >= undefined should fail. |
| 436 __ mov(r0, Operand(LESS)); | 653 __ mov(r0, Operand(LESS)); |
| 437 } | 654 } |
| 438 __ Ret(); | 655 __ Ret(); |
| 439 } | 656 } |
| 440 } | 657 } |
| 441 } | 658 } |
| 442 } | 659 } |
| 443 | 660 |
| 444 __ bind(&return_equal); | 661 __ bind(&return_equal); |
| 445 if (cc == lt) { | 662 if (cond == lt) { |
| 446 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves. | 663 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves. |
| 447 } else if (cc == gt) { | 664 } else if (cond == gt) { |
| 448 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves. | 665 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves. |
| 449 } else { | 666 } else { |
| 450 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves. | 667 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves. |
| 451 } | 668 } |
| 452 __ Ret(); | 669 __ Ret(); |
| 453 | 670 |
| 454 if (cc != eq || !never_nan_nan) { | 671 if (cond != eq || !never_nan_nan) { |
| 455 // For less and greater we don't have to check for NaN since the result of | 672 // For less and greater we don't have to check for NaN since the result of |
| 456 // x < x is false regardless. For the others here is some code to check | 673 // x < x is false regardless. For the others here is some code to check |
| 457 // for NaN. | 674 // for NaN. |
| 458 if (cc != lt && cc != gt) { | 675 if (cond != lt && cond != gt) { |
| 459 __ bind(&heap_number); | 676 __ bind(&heap_number); |
| 460 // It is a heap number, so return non-equal if it's NaN and equal if it's | 677 // It is a heap number, so return non-equal if it's NaN and equal if it's |
| 461 // not NaN. | 678 // not NaN. |
| 462 | 679 |
| 463 // The representation of NaN values has all exponent bits (52..62) set, | 680 // The representation of NaN values has all exponent bits (52..62) set, |
| 464 // and not all mantissa bits (0..51) clear. | 681 // and not all mantissa bits (0..51) clear. |
| 465 // Read top bits of double representation (second word of value). | 682 // Read top bits of double representation (second word of value). |
| 466 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 683 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
| 467 // Test that exponent bits are all set. | 684 // Test that exponent bits are all set. |
| 468 __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); | 685 __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); |
| 469 // NaNs have all-one exponents so they sign extend to -1. | 686 // NaNs have all-one exponents so they sign extend to -1. |
| 470 __ cmp(r3, Operand(-1)); | 687 __ cmp(r3, Operand(-1)); |
| 471 __ b(ne, &return_equal); | 688 __ b(ne, &return_equal); |
| 472 | 689 |
| 473 // Shift out flag and all exponent bits, retaining only mantissa. | 690 // Shift out flag and all exponent bits, retaining only mantissa. |
| 474 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); | 691 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); |
| 475 // Or with all low-bits of mantissa. | 692 // Or with all low-bits of mantissa. |
| 476 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | 693 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); |
| 477 __ orr(r0, r3, Operand(r2), SetCC); | 694 __ orr(r0, r3, Operand(r2), SetCC); |
| 478 // For equal we already have the right value in r0: Return zero (equal) | 695 // For equal we already have the right value in r0: Return zero (equal) |
| 479 // if all bits in mantissa are zero (it's an Infinity) and non-zero if | 696 // if all bits in mantissa are zero (it's an Infinity) and non-zero if |
| 480 // not (it's a NaN). For <= and >= we need to load r0 with the failing | 697 // not (it's a NaN). For <= and >= we need to load r0 with the failing |
| 481 // value if it's a NaN. | 698 // value if it's a NaN. |
| 482 if (cc != eq) { | 699 if (cond != eq) { |
| 483 // All-zero means Infinity means equal. | 700 // All-zero means Infinity means equal. |
| 484 __ Ret(eq); | 701 __ Ret(eq); |
| 485 if (cc == le) { | 702 if (cond == le) { |
| 486 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. | 703 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. |
| 487 } else { | 704 } else { |
| 488 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. | 705 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. |
| 489 } | 706 } |
| 490 } | 707 } |
| 491 __ Ret(); | 708 __ Ret(); |
| 492 } | 709 } |
| 493 // No fall through here. | 710 // No fall through here. |
| 494 } | 711 } |
| 495 | 712 |
| (...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 582 // Convert rhs to a double in r0, r1. | 799 // Convert rhs to a double in r0, r1. |
| 583 __ mov(r7, Operand(rhs)); | 800 __ mov(r7, Operand(rhs)); |
| 584 ConvertToDoubleStub stub2(r1, r0, r7, r6); | 801 ConvertToDoubleStub stub2(r1, r0, r7, r6); |
| 585 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 802 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); |
| 586 __ pop(lr); | 803 __ pop(lr); |
| 587 } | 804 } |
| 588 // Fall through to both_loaded_as_doubles. | 805 // Fall through to both_loaded_as_doubles. |
| 589 } | 806 } |
| 590 | 807 |
| 591 | 808 |
| 592 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) { | 809 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) { |
| 593 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | 810 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); |
| 594 Register rhs_exponent = exp_first ? r0 : r1; | 811 Register rhs_exponent = exp_first ? r0 : r1; |
| 595 Register lhs_exponent = exp_first ? r2 : r3; | 812 Register lhs_exponent = exp_first ? r2 : r3; |
| 596 Register rhs_mantissa = exp_first ? r1 : r0; | 813 Register rhs_mantissa = exp_first ? r1 : r0; |
| 597 Register lhs_mantissa = exp_first ? r3 : r2; | 814 Register lhs_mantissa = exp_first ? r3 : r2; |
| 598 Label one_is_nan, neither_is_nan; | 815 Label one_is_nan, neither_is_nan; |
| 599 | 816 |
| 600 __ Sbfx(r4, | 817 __ Sbfx(r4, |
| 601 lhs_exponent, | 818 lhs_exponent, |
| 602 HeapNumber::kExponentShift, | 819 HeapNumber::kExponentShift, |
| (...skipping 19 matching lines...) Expand all Loading... |
| 622 __ mov(r4, | 839 __ mov(r4, |
| 623 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), | 840 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), |
| 624 SetCC); | 841 SetCC); |
| 625 __ b(ne, &one_is_nan); | 842 __ b(ne, &one_is_nan); |
| 626 __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE)); | 843 __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE)); |
| 627 __ b(eq, &neither_is_nan); | 844 __ b(eq, &neither_is_nan); |
| 628 | 845 |
| 629 __ bind(&one_is_nan); | 846 __ bind(&one_is_nan); |
| 630 // NaN comparisons always fail. | 847 // NaN comparisons always fail. |
| 631 // Load whatever we need in r0 to make the comparison fail. | 848 // Load whatever we need in r0 to make the comparison fail. |
| 632 if (cc == lt || cc == le) { | 849 if (cond == lt || cond == le) { |
| 633 __ mov(r0, Operand(GREATER)); | 850 __ mov(r0, Operand(GREATER)); |
| 634 } else { | 851 } else { |
| 635 __ mov(r0, Operand(LESS)); | 852 __ mov(r0, Operand(LESS)); |
| 636 } | 853 } |
| 637 __ Ret(); | 854 __ Ret(); |
| 638 | 855 |
| 639 __ bind(&neither_is_nan); | 856 __ bind(&neither_is_nan); |
| 640 } | 857 } |
| 641 | 858 |
| 642 | 859 |
| 643 // See comment at call site. | 860 // See comment at call site. |
| 644 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { | 861 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, |
| 862 Condition cond) { |
| 645 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | 863 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); |
| 646 Register rhs_exponent = exp_first ? r0 : r1; | 864 Register rhs_exponent = exp_first ? r0 : r1; |
| 647 Register lhs_exponent = exp_first ? r2 : r3; | 865 Register lhs_exponent = exp_first ? r2 : r3; |
| 648 Register rhs_mantissa = exp_first ? r1 : r0; | 866 Register rhs_mantissa = exp_first ? r1 : r0; |
| 649 Register lhs_mantissa = exp_first ? r3 : r2; | 867 Register lhs_mantissa = exp_first ? r3 : r2; |
| 650 | 868 |
| 651 // r0, r1, r2, r3 have the two doubles. Neither is a NaN. | 869 // r0, r1, r2, r3 have the two doubles. Neither is a NaN. |
| 652 if (cc == eq) { | 870 if (cond == eq) { |
| 653 // Doubles are not equal unless they have the same bit pattern. | 871 // Doubles are not equal unless they have the same bit pattern. |
| 654 // Exception: 0 and -0. | 872 // Exception: 0 and -0. |
| 655 __ cmp(rhs_mantissa, Operand(lhs_mantissa)); | 873 __ cmp(rhs_mantissa, Operand(lhs_mantissa)); |
| 656 __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne); | 874 __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne); |
| 657 // Return non-zero if the numbers are unequal. | 875 // Return non-zero if the numbers are unequal. |
| 658 __ Ret(ne); | 876 __ Ret(ne); |
| 659 | 877 |
| 660 __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC); | 878 __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC); |
| 661 // If exponents are equal then return 0. | 879 // If exponents are equal then return 0. |
| 662 __ Ret(eq); | 880 __ Ret(eq); |
| (...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 828 __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1)); | 1046 __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1)); |
| 829 __ sub(mask, mask, Operand(1)); // Make mask. | 1047 __ sub(mask, mask, Operand(1)); // Make mask. |
| 830 | 1048 |
| 831 // Calculate the entry in the number string cache. The hash value in the | 1049 // Calculate the entry in the number string cache. The hash value in the |
| 832 // number string cache for smis is just the smi value, and the hash for | 1050 // number string cache for smis is just the smi value, and the hash for |
| 833 // doubles is the xor of the upper and lower words. See | 1051 // doubles is the xor of the upper and lower words. See |
| 834 // Heap::GetNumberStringCache. | 1052 // Heap::GetNumberStringCache. |
| 835 Label is_smi; | 1053 Label is_smi; |
| 836 Label load_result_from_cache; | 1054 Label load_result_from_cache; |
| 837 if (!object_is_smi) { | 1055 if (!object_is_smi) { |
| 838 __ BranchOnSmi(object, &is_smi); | 1056 __ JumpIfSmi(object, &is_smi); |
| 839 if (CpuFeatures::IsSupported(VFP3)) { | 1057 if (CpuFeatures::IsSupported(VFP3)) { |
| 840 CpuFeatures::Scope scope(VFP3); | 1058 CpuFeatures::Scope scope(VFP3); |
| 841 __ CheckMap(object, | 1059 __ CheckMap(object, |
| 842 scratch1, | 1060 scratch1, |
| 843 Heap::kHeapNumberMapRootIndex, | 1061 Heap::kHeapNumberMapRootIndex, |
| 844 not_found, | 1062 not_found, |
| 845 true); | 1063 true); |
| 846 | 1064 |
| 847 STATIC_ASSERT(8 == kDoubleSize); | 1065 STATIC_ASSERT(8 == kDoubleSize); |
| 848 __ add(scratch1, | 1066 __ add(scratch1, |
| 849 object, | 1067 object, |
| 850 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); | 1068 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); |
| 851 __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); | 1069 __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); |
| 852 __ eor(scratch1, scratch1, Operand(scratch2)); | 1070 __ eor(scratch1, scratch1, Operand(scratch2)); |
| 853 __ and_(scratch1, scratch1, Operand(mask)); | 1071 __ and_(scratch1, scratch1, Operand(mask)); |
| 854 | 1072 |
| 855 // Calculate address of entry in string cache: each entry consists | 1073 // Calculate address of entry in string cache: each entry consists |
| 856 // of two pointer sized fields. | 1074 // of two pointer sized fields. |
| 857 __ add(scratch1, | 1075 __ add(scratch1, |
| 858 number_string_cache, | 1076 number_string_cache, |
| 859 Operand(scratch1, LSL, kPointerSizeLog2 + 1)); | 1077 Operand(scratch1, LSL, kPointerSizeLog2 + 1)); |
| 860 | 1078 |
| 861 Register probe = mask; | 1079 Register probe = mask; |
| 862 __ ldr(probe, | 1080 __ ldr(probe, |
| 863 FieldMemOperand(scratch1, FixedArray::kHeaderSize)); | 1081 FieldMemOperand(scratch1, FixedArray::kHeaderSize)); |
| 864 __ BranchOnSmi(probe, not_found); | 1082 __ JumpIfSmi(probe, not_found); |
| 865 __ sub(scratch2, object, Operand(kHeapObjectTag)); | 1083 __ sub(scratch2, object, Operand(kHeapObjectTag)); |
| 866 __ vldr(d0, scratch2, HeapNumber::kValueOffset); | 1084 __ vldr(d0, scratch2, HeapNumber::kValueOffset); |
| 867 __ sub(probe, probe, Operand(kHeapObjectTag)); | 1085 __ sub(probe, probe, Operand(kHeapObjectTag)); |
| 868 __ vldr(d1, probe, HeapNumber::kValueOffset); | 1086 __ vldr(d1, probe, HeapNumber::kValueOffset); |
| 869 __ vcmp(d0, d1); | 1087 __ VFPCompareAndSetFlags(d0, d1); |
| 870 __ vmrs(pc); | |
| 871 __ b(ne, not_found); // The cache did not contain this value. | 1088 __ b(ne, not_found); // The cache did not contain this value. |
| 872 __ b(&load_result_from_cache); | 1089 __ b(&load_result_from_cache); |
| 873 } else { | 1090 } else { |
| 874 __ b(not_found); | 1091 __ b(not_found); |
| 875 } | 1092 } |
| 876 } | 1093 } |
| 877 | 1094 |
| 878 __ bind(&is_smi); | 1095 __ bind(&is_smi); |
| 879 Register scratch = scratch1; | 1096 Register scratch = scratch1; |
| 880 __ and_(scratch, mask, Operand(object, ASR, 1)); | 1097 __ and_(scratch, mask, Operand(object, ASR, 1)); |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 932 __ orr(r2, r1, r0); | 1149 __ orr(r2, r1, r0); |
| 933 __ tst(r2, Operand(kSmiTagMask)); | 1150 __ tst(r2, Operand(kSmiTagMask)); |
| 934 __ b(ne, ¬_two_smis); | 1151 __ b(ne, ¬_two_smis); |
| 935 __ mov(r1, Operand(r1, ASR, 1)); | 1152 __ mov(r1, Operand(r1, ASR, 1)); |
| 936 __ sub(r0, r1, Operand(r0, ASR, 1)); | 1153 __ sub(r0, r1, Operand(r0, ASR, 1)); |
| 937 __ Ret(); | 1154 __ Ret(); |
| 938 __ bind(¬_two_smis); | 1155 __ bind(¬_two_smis); |
| 939 } else if (FLAG_debug_code) { | 1156 } else if (FLAG_debug_code) { |
| 940 __ orr(r2, r1, r0); | 1157 __ orr(r2, r1, r0); |
| 941 __ tst(r2, Operand(kSmiTagMask)); | 1158 __ tst(r2, Operand(kSmiTagMask)); |
| 942 __ Assert(nz, "CompareStub: unexpected smi operands."); | 1159 __ Assert(ne, "CompareStub: unexpected smi operands."); |
| 943 } | 1160 } |
| 944 | 1161 |
| 945 // NOTICE! This code is only reached after a smi-fast-case check, so | 1162 // NOTICE! This code is only reached after a smi-fast-case check, so |
| 946 // it is certain that at least one operand isn't a smi. | 1163 // it is certain that at least one operand isn't a smi. |
| 947 | 1164 |
| 948 // Handle the case where the objects are identical. Either returns the answer | 1165 // Handle the case where the objects are identical. Either returns the answer |
| 949 // or goes to slow. Only falls through if the objects were not identical. | 1166 // or goes to slow. Only falls through if the objects were not identical. |
| 950 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); | 1167 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); |
| 951 | 1168 |
| 952 // If either is a Smi (we know that not both are), then they can only | 1169 // If either is a Smi (we know that not both are), then they can only |
| (...skipping 15 matching lines...) Expand all Loading... |
| 968 EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_); | 1185 EmitSmiNonsmiComparison(masm, lhs_, rhs_, &lhs_not_nan, &slow, strict_); |
| 969 | 1186 |
| 970 __ bind(&both_loaded_as_doubles); | 1187 __ bind(&both_loaded_as_doubles); |
| 971 // The arguments have been converted to doubles and stored in d6 and d7, if | 1188 // The arguments have been converted to doubles and stored in d6 and d7, if |
| 972 // VFP3 is supported, or in r0, r1, r2, and r3. | 1189 // VFP3 is supported, or in r0, r1, r2, and r3. |
| 973 if (CpuFeatures::IsSupported(VFP3)) { | 1190 if (CpuFeatures::IsSupported(VFP3)) { |
| 974 __ bind(&lhs_not_nan); | 1191 __ bind(&lhs_not_nan); |
| 975 CpuFeatures::Scope scope(VFP3); | 1192 CpuFeatures::Scope scope(VFP3); |
| 976 Label no_nan; | 1193 Label no_nan; |
| 977 // ARMv7 VFP3 instructions to implement double precision comparison. | 1194 // ARMv7 VFP3 instructions to implement double precision comparison. |
| 978 __ vcmp(d7, d6); | 1195 __ VFPCompareAndSetFlags(d7, d6); |
| 979 __ vmrs(pc); // Move vector status bits to normal status bits. | |
| 980 Label nan; | 1196 Label nan; |
| 981 __ b(vs, &nan); | 1197 __ b(vs, &nan); |
| 982 __ mov(r0, Operand(EQUAL), LeaveCC, eq); | 1198 __ mov(r0, Operand(EQUAL), LeaveCC, eq); |
| 983 __ mov(r0, Operand(LESS), LeaveCC, lt); | 1199 __ mov(r0, Operand(LESS), LeaveCC, lt); |
| 984 __ mov(r0, Operand(GREATER), LeaveCC, gt); | 1200 __ mov(r0, Operand(GREATER), LeaveCC, gt); |
| 985 __ Ret(); | 1201 __ Ret(); |
| 986 | 1202 |
| 987 __ bind(&nan); | 1203 __ bind(&nan); |
| 988 // If one of the sides was a NaN then the v flag is set. Load r0 with | 1204 // If one of the sides was a NaN then the v flag is set. Load r0 with |
| 989 // whatever it takes to make the comparison fail, since comparisons with NaN | 1205 // whatever it takes to make the comparison fail, since comparisons with NaN |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1075 // tagged as a small integer. | 1291 // tagged as a small integer. |
| 1076 __ InvokeBuiltin(native, JUMP_JS); | 1292 __ InvokeBuiltin(native, JUMP_JS); |
| 1077 } | 1293 } |
| 1078 | 1294 |
| 1079 | 1295 |
| 1080 // This stub does not handle the inlined cases (Smis, Booleans, undefined). | 1296 // This stub does not handle the inlined cases (Smis, Booleans, undefined). |
| 1081 // The stub returns zero for false, and a non-zero value for true. | 1297 // The stub returns zero for false, and a non-zero value for true. |
| 1082 void ToBooleanStub::Generate(MacroAssembler* masm) { | 1298 void ToBooleanStub::Generate(MacroAssembler* masm) { |
| 1083 Label false_result; | 1299 Label false_result; |
| 1084 Label not_heap_number; | 1300 Label not_heap_number; |
| 1085 Register scratch = r7; | 1301 Register scratch = r9.is(tos_) ? r7 : r9; |
| 1086 | 1302 |
| 1087 __ LoadRoot(ip, Heap::kNullValueRootIndex); | 1303 __ LoadRoot(ip, Heap::kNullValueRootIndex); |
| 1088 __ cmp(tos_, ip); | 1304 __ cmp(tos_, ip); |
| 1089 __ b(eq, &false_result); | 1305 __ b(eq, &false_result); |
| 1090 | 1306 |
| 1091 // HeapNumber => false iff +0, -0, or NaN. | 1307 // HeapNumber => false iff +0, -0, or NaN. |
| 1092 __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset)); | 1308 __ ldr(scratch, FieldMemOperand(tos_, HeapObject::kMapOffset)); |
| 1093 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); | 1309 __ LoadRoot(ip, Heap::kHeapNumberMapRootIndex); |
| 1094 __ cmp(scratch, ip); | 1310 __ cmp(scratch, ip); |
| 1095 __ b(¬_heap_number, ne); | 1311 __ b(¬_heap_number, ne); |
| 1096 | 1312 |
| 1097 __ sub(ip, tos_, Operand(kHeapObjectTag)); | 1313 __ sub(ip, tos_, Operand(kHeapObjectTag)); |
| 1098 __ vldr(d1, ip, HeapNumber::kValueOffset); | 1314 __ vldr(d1, ip, HeapNumber::kValueOffset); |
| 1099 __ vcmp(d1, 0.0); | 1315 __ VFPCompareAndSetFlags(d1, 0.0); |
| 1100 __ vmrs(pc); | |
| 1101 // "tos_" is a register, and contains a non zero value by default. | 1316 // "tos_" is a register, and contains a non zero value by default. |
| 1102 // Hence we only need to overwrite "tos_" with zero to return false for | 1317 // Hence we only need to overwrite "tos_" with zero to return false for |
| 1103 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. | 1318 // FP_ZERO or FP_NAN cases. Otherwise, by default it returns true. |
| 1104 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO | 1319 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, eq); // for FP_ZERO |
| 1105 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN | 1320 __ mov(tos_, Operand(0, RelocInfo::NONE), LeaveCC, vs); // for FP_NAN |
| 1106 __ Ret(); | 1321 __ Ret(); |
| 1107 | 1322 |
| 1108 __ bind(¬_heap_number); | 1323 __ bind(¬_heap_number); |
| 1109 | 1324 |
| 1110 // Check if the value is 'null'. | 1325 // Check if the value is 'null'. |
| (...skipping 260 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1371 } else if (Token::ADD == op_) { | 1586 } else if (Token::ADD == op_) { |
| 1372 __ vadd(d5, d6, d7); | 1587 __ vadd(d5, d6, d7); |
| 1373 } else if (Token::SUB == op_) { | 1588 } else if (Token::SUB == op_) { |
| 1374 __ vsub(d5, d6, d7); | 1589 __ vsub(d5, d6, d7); |
| 1375 } else { | 1590 } else { |
| 1376 UNREACHABLE(); | 1591 UNREACHABLE(); |
| 1377 } | 1592 } |
| 1378 __ sub(r0, r5, Operand(kHeapObjectTag)); | 1593 __ sub(r0, r5, Operand(kHeapObjectTag)); |
| 1379 __ vstr(d5, r0, HeapNumber::kValueOffset); | 1594 __ vstr(d5, r0, HeapNumber::kValueOffset); |
| 1380 __ add(r0, r0, Operand(kHeapObjectTag)); | 1595 __ add(r0, r0, Operand(kHeapObjectTag)); |
| 1381 __ mov(pc, lr); | 1596 __ Ret(); |
| 1382 } else { | 1597 } else { |
| 1383 // If we did not inline the operation, then the arguments are in: | 1598 // If we did not inline the operation, then the arguments are in: |
| 1384 // r0: Left value (least significant part of mantissa). | 1599 // r0: Left value (least significant part of mantissa). |
| 1385 // r1: Left value (sign, exponent, top of mantissa). | 1600 // r1: Left value (sign, exponent, top of mantissa). |
| 1386 // r2: Right value (least significant part of mantissa). | 1601 // r2: Right value (least significant part of mantissa). |
| 1387 // r3: Right value (sign, exponent, top of mantissa). | 1602 // r3: Right value (sign, exponent, top of mantissa). |
| 1388 // r5: Address of heap number for result. | 1603 // r5: Address of heap number for result. |
| 1389 | 1604 |
| 1390 __ push(lr); // For later. | 1605 __ push(lr); // For later. |
| 1391 __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments. | 1606 __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments. |
| (...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1501 Label done_checking_rhs, done_checking_lhs; | 1716 Label done_checking_rhs, done_checking_lhs; |
| 1502 | 1717 |
| 1503 Register heap_number_map = r6; | 1718 Register heap_number_map = r6; |
| 1504 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 1719 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 1505 | 1720 |
| 1506 __ tst(lhs, Operand(kSmiTagMask)); | 1721 __ tst(lhs, Operand(kSmiTagMask)); |
| 1507 __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number. | 1722 __ b(eq, &lhs_is_smi); // It's a Smi so don't check it's a heap number. |
| 1508 __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset)); | 1723 __ ldr(r4, FieldMemOperand(lhs, HeapNumber::kMapOffset)); |
| 1509 __ cmp(r4, heap_number_map); | 1724 __ cmp(r4, heap_number_map); |
| 1510 __ b(ne, &slow); | 1725 __ b(ne, &slow); |
| 1511 __ ConvertToInt32(lhs, r3, r5, r4, &slow); | 1726 __ ConvertToInt32(lhs, r3, r5, r4, d0, &slow); |
| 1512 __ jmp(&done_checking_lhs); | 1727 __ jmp(&done_checking_lhs); |
| 1513 __ bind(&lhs_is_smi); | 1728 __ bind(&lhs_is_smi); |
| 1514 __ mov(r3, Operand(lhs, ASR, 1)); | 1729 __ mov(r3, Operand(lhs, ASR, 1)); |
| 1515 __ bind(&done_checking_lhs); | 1730 __ bind(&done_checking_lhs); |
| 1516 | 1731 |
| 1517 __ tst(rhs, Operand(kSmiTagMask)); | 1732 __ tst(rhs, Operand(kSmiTagMask)); |
| 1518 __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number. | 1733 __ b(eq, &rhs_is_smi); // It's a Smi so don't check it's a heap number. |
| 1519 __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset)); | 1734 __ ldr(r4, FieldMemOperand(rhs, HeapNumber::kMapOffset)); |
| 1520 __ cmp(r4, heap_number_map); | 1735 __ cmp(r4, heap_number_map); |
| 1521 __ b(ne, &slow); | 1736 __ b(ne, &slow); |
| 1522 __ ConvertToInt32(rhs, r2, r5, r4, &slow); | 1737 __ ConvertToInt32(rhs, r2, r5, r4, d0, &slow); |
| 1523 __ jmp(&done_checking_rhs); | 1738 __ jmp(&done_checking_rhs); |
| 1524 __ bind(&rhs_is_smi); | 1739 __ bind(&rhs_is_smi); |
| 1525 __ mov(r2, Operand(rhs, ASR, 1)); | 1740 __ mov(r2, Operand(rhs, ASR, 1)); |
| 1526 __ bind(&done_checking_rhs); | 1741 __ bind(&done_checking_rhs); |
| 1527 | 1742 |
| 1528 ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)))); | 1743 ASSERT(((lhs.is(r0) && rhs.is(r1)) || (lhs.is(r1) && rhs.is(r0)))); |
| 1529 | 1744 |
| 1530 // r0 and r1: Original operands (Smi or heap numbers). | 1745 // r0 and r1: Original operands (Smi or heap numbers). |
| 1531 // r2 and r3: Signed int32 operands. | 1746 // r2 and r3: Signed int32 operands. |
| 1532 switch (op_) { | 1747 switch (op_) { |
| (...skipping 423 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1956 } | 2171 } |
| 1957 HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::MUL); | 2172 HandleBinaryOpSlowCases(masm, ¬_smi, lhs, rhs, Builtins::MUL); |
| 1958 break; | 2173 break; |
| 1959 } | 2174 } |
| 1960 | 2175 |
| 1961 case Token::DIV: | 2176 case Token::DIV: |
| 1962 case Token::MOD: { | 2177 case Token::MOD: { |
| 1963 Label not_smi; | 2178 Label not_smi; |
| 1964 if (ShouldGenerateSmiCode() && specialized_on_rhs_) { | 2179 if (ShouldGenerateSmiCode() && specialized_on_rhs_) { |
| 1965 Label lhs_is_unsuitable; | 2180 Label lhs_is_unsuitable; |
| 1966 __ BranchOnNotSmi(lhs, ¬_smi); | 2181 __ JumpIfNotSmi(lhs, ¬_smi); |
| 1967 if (IsPowerOf2(constant_rhs_)) { | 2182 if (IsPowerOf2(constant_rhs_)) { |
| 1968 if (op_ == Token::MOD) { | 2183 if (op_ == Token::MOD) { |
| 1969 __ and_(rhs, | 2184 __ and_(rhs, |
| 1970 lhs, | 2185 lhs, |
| 1971 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)), | 2186 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)), |
| 1972 SetCC); | 2187 SetCC); |
| 1973 // We now have the answer, but if the input was negative we also | 2188 // We now have the answer, but if the input was negative we also |
| 1974 // have the sign bit. Our work is done if the result is | 2189 // have the sign bit. Our work is done if the result is |
| 1975 // positive or zero: | 2190 // positive or zero: |
| 1976 if (!rhs.is(r0)) { | 2191 if (!rhs.is(r0)) { |
| (...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2203 | 2418 |
| 2204 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { | 2419 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { |
| 2205 GenericBinaryOpStub stub(key, type_info); | 2420 GenericBinaryOpStub stub(key, type_info); |
| 2206 return stub.GetCode(); | 2421 return stub.GetCode(); |
| 2207 } | 2422 } |
| 2208 | 2423 |
| 2209 | 2424 |
| 2210 Handle<Code> GetTypeRecordingBinaryOpStub(int key, | 2425 Handle<Code> GetTypeRecordingBinaryOpStub(int key, |
| 2211 TRBinaryOpIC::TypeInfo type_info, | 2426 TRBinaryOpIC::TypeInfo type_info, |
| 2212 TRBinaryOpIC::TypeInfo result_type_info) { | 2427 TRBinaryOpIC::TypeInfo result_type_info) { |
| 2428 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info); |
| 2429 return stub.GetCode(); |
| 2430 } |
| 2431 |
| 2432 |
| 2433 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |
| 2434 Label get_result; |
| 2435 |
| 2436 __ Push(r1, r0); |
| 2437 |
| 2438 __ mov(r2, Operand(Smi::FromInt(MinorKey()))); |
| 2439 __ mov(r1, Operand(Smi::FromInt(op_))); |
| 2440 __ mov(r0, Operand(Smi::FromInt(operands_type_))); |
| 2441 __ Push(r2, r1, r0); |
| 2442 |
| 2443 __ TailCallExternalReference( |
| 2444 ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)), |
| 2445 5, |
| 2446 1); |
| 2447 } |
| 2448 |
| 2449 |
| 2450 void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs( |
| 2451 MacroAssembler* masm) { |
| 2213 UNIMPLEMENTED(); | 2452 UNIMPLEMENTED(); |
| 2214 return Handle<Code>::null(); | 2453 } |
| 2454 |
| 2455 |
| 2456 void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) { |
| 2457 switch (operands_type_) { |
| 2458 case TRBinaryOpIC::UNINITIALIZED: |
| 2459 GenerateTypeTransition(masm); |
| 2460 break; |
| 2461 case TRBinaryOpIC::SMI: |
| 2462 GenerateSmiStub(masm); |
| 2463 break; |
| 2464 case TRBinaryOpIC::INT32: |
| 2465 GenerateInt32Stub(masm); |
| 2466 break; |
| 2467 case TRBinaryOpIC::HEAP_NUMBER: |
| 2468 GenerateHeapNumberStub(masm); |
| 2469 break; |
| 2470 case TRBinaryOpIC::STRING: |
| 2471 GenerateStringStub(masm); |
| 2472 break; |
| 2473 case TRBinaryOpIC::GENERIC: |
| 2474 GenerateGeneric(masm); |
| 2475 break; |
| 2476 default: |
| 2477 UNREACHABLE(); |
| 2478 } |
| 2479 } |
| 2480 |
| 2481 |
| 2482 const char* TypeRecordingBinaryOpStub::GetName() { |
| 2483 if (name_ != NULL) return name_; |
| 2484 const int kMaxNameLength = 100; |
| 2485 name_ = Bootstrapper::AllocateAutoDeletedArray(kMaxNameLength); |
| 2486 if (name_ == NULL) return "OOM"; |
| 2487 const char* op_name = Token::Name(op_); |
| 2488 const char* overwrite_name; |
| 2489 switch (mode_) { |
| 2490 case NO_OVERWRITE: overwrite_name = "Alloc"; break; |
| 2491 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; |
| 2492 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; |
| 2493 default: overwrite_name = "UnknownOverwrite"; break; |
| 2494 } |
| 2495 |
| 2496 OS::SNPrintF(Vector<char>(name_, kMaxNameLength), |
| 2497 "TypeRecordingBinaryOpStub_%s_%s_%s", |
| 2498 op_name, |
| 2499 overwrite_name, |
| 2500 TRBinaryOpIC::GetName(operands_type_)); |
| 2501 return name_; |
| 2502 } |
| 2503 |
| 2504 |
| 2505 void TypeRecordingBinaryOpStub::GenerateSmiSmiOperation( |
| 2506 MacroAssembler* masm) { |
| 2507 Register left = r1; |
| 2508 Register right = r0; |
| 2509 Register scratch1 = r7; |
| 2510 Register scratch2 = r9; |
| 2511 |
| 2512 ASSERT(right.is(r0)); |
| 2513 STATIC_ASSERT(kSmiTag == 0); |
| 2514 |
| 2515 Label not_smi_result; |
| 2516 switch (op_) { |
| 2517 case Token::ADD: |
| 2518 __ add(right, left, Operand(right), SetCC); // Add optimistically. |
| 2519 __ Ret(vc); |
| 2520 __ sub(right, right, Operand(left)); // Revert optimistic add. |
| 2521 break; |
| 2522 case Token::SUB: |
| 2523 __ sub(right, left, Operand(right), SetCC); // Subtract optimistically. |
| 2524 __ Ret(vc); |
| 2525 __ sub(right, left, Operand(right)); // Revert optimistic subtract. |
| 2526 break; |
| 2527 case Token::MUL: |
| 2528 // Remove tag from one of the operands. This way the multiplication result |
| 2529 // will be a smi if it fits the smi range. |
| 2530 __ SmiUntag(ip, right); |
| 2531 // Do multiplication |
| 2532 // scratch1 = lower 32 bits of ip * left. |
| 2533 // scratch2 = higher 32 bits of ip * left. |
| 2534 __ smull(scratch1, scratch2, left, ip); |
| 2535 // Check for overflowing the smi range - no overflow if higher 33 bits of |
| 2536 // the result are identical. |
| 2537 __ mov(ip, Operand(scratch1, ASR, 31)); |
| 2538 __ cmp(ip, Operand(scratch2)); |
| 2539 __ b(ne, ¬_smi_result); |
| 2540 // Go slow on zero result to handle -0. |
| 2541 __ tst(scratch1, Operand(scratch1)); |
| 2542 __ mov(right, Operand(scratch1), LeaveCC, ne); |
| 2543 __ Ret(ne); |
| 2544 // We need -0 if we were multiplying a negative number with 0 to get 0. |
| 2545 // We know one of them was zero. |
| 2546 __ add(scratch2, right, Operand(left), SetCC); |
| 2547 __ mov(right, Operand(Smi::FromInt(0)), LeaveCC, pl); |
| 2548 __ Ret(pl); // Return smi 0 if the non-zero one was positive. |
| 2549 // We fall through here if we multiplied a negative number with 0, because |
| 2550 // that would mean we should produce -0. |
| 2551 break; |
| 2552 case Token::DIV: |
| 2553 // Check for power of two on the right hand side. |
| 2554 __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result); |
| 2555 // Check for positive and no remainder (scratch1 contains right - 1). |
| 2556 __ orr(scratch2, scratch1, Operand(0x80000000u)); |
| 2557 __ tst(left, scratch2); |
| 2558 __ b(ne, ¬_smi_result); |
| 2559 |
| 2560 // Perform division by shifting. |
| 2561 __ CountLeadingZeros(scratch1, scratch1, scratch2); |
| 2562 __ rsb(scratch1, scratch1, Operand(31)); |
| 2563 __ mov(right, Operand(left, LSR, scratch1)); |
| 2564 __ Ret(); |
| 2565 break; |
| 2566 case Token::MOD: |
| 2567 // Check for two positive smis. |
| 2568 __ orr(scratch1, left, Operand(right)); |
| 2569 __ tst(scratch1, Operand(0x80000000u | kSmiTagMask)); |
| 2570 __ b(ne, ¬_smi_result); |
| 2571 |
| 2572 // Check for power of two on the right hand side. |
| 2573 __ JumpIfNotPowerOfTwoOrZero(right, scratch1, ¬_smi_result); |
| 2574 |
| 2575 // Perform modulus by masking. |
| 2576 __ and_(right, left, Operand(scratch1)); |
| 2577 __ Ret(); |
| 2578 break; |
| 2579 case Token::BIT_OR: |
| 2580 __ orr(right, left, Operand(right)); |
| 2581 __ Ret(); |
| 2582 break; |
| 2583 case Token::BIT_AND: |
| 2584 __ and_(right, left, Operand(right)); |
| 2585 __ Ret(); |
| 2586 break; |
| 2587 case Token::BIT_XOR: |
| 2588 __ eor(right, left, Operand(right)); |
| 2589 __ Ret(); |
| 2590 break; |
| 2591 case Token::SAR: |
| 2592 // Remove tags from right operand. |
| 2593 __ GetLeastBitsFromSmi(scratch1, right, 5); |
| 2594 __ mov(right, Operand(left, ASR, scratch1)); |
| 2595 // Smi tag result. |
| 2596 __ bic(right, right, Operand(kSmiTagMask)); |
| 2597 __ Ret(); |
| 2598 break; |
| 2599 case Token::SHR: |
| 2600 // Remove tags from operands. We can't do this on a 31 bit number |
| 2601 // because then the 0s get shifted into bit 30 instead of bit 31. |
| 2602 __ SmiUntag(scratch1, left); |
| 2603 __ GetLeastBitsFromSmi(scratch2, right, 5); |
| 2604 __ mov(scratch1, Operand(scratch1, LSR, scratch2)); |
| 2605 // Unsigned shift is not allowed to produce a negative number, so |
| 2606 // check the sign bit and the sign bit after Smi tagging. |
| 2607 __ tst(scratch1, Operand(0xc0000000)); |
| 2608 __ b(ne, ¬_smi_result); |
| 2609 // Smi tag result. |
| 2610 __ SmiTag(right, scratch1); |
| 2611 __ Ret(); |
| 2612 break; |
| 2613 case Token::SHL: |
| 2614 // Remove tags from operands. |
| 2615 __ SmiUntag(scratch1, left); |
| 2616 __ GetLeastBitsFromSmi(scratch2, right, 5); |
| 2617 __ mov(scratch1, Operand(scratch1, LSL, scratch2)); |
| 2618 // Check that the signed result fits in a Smi. |
| 2619 __ add(scratch2, scratch1, Operand(0x40000000), SetCC); |
| 2620 __ b(mi, ¬_smi_result); |
| 2621 __ SmiTag(right, scratch1); |
| 2622 __ Ret(); |
| 2623 break; |
| 2624 default: |
| 2625 UNREACHABLE(); |
| 2626 } |
| 2627 __ bind(¬_smi_result); |
| 2628 } |
| 2629 |
| 2630 |
| 2631 void TypeRecordingBinaryOpStub::GenerateFPOperation(MacroAssembler* masm, |
| 2632 bool smi_operands, |
| 2633 Label* not_numbers, |
| 2634 Label* gc_required) { |
| 2635 Register left = r1; |
| 2636 Register right = r0; |
| 2637 Register scratch1 = r7; |
| 2638 Register scratch2 = r9; |
| 2639 |
| 2640 ASSERT(smi_operands || (not_numbers != NULL)); |
| 2641 if (smi_operands && FLAG_debug_code) { |
| 2642 __ AbortIfNotSmi(left); |
| 2643 __ AbortIfNotSmi(right); |
| 2644 } |
| 2645 |
| 2646 Register heap_number_map = r6; |
| 2647 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 2648 |
| 2649 switch (op_) { |
| 2650 case Token::ADD: |
| 2651 case Token::SUB: |
| 2652 case Token::MUL: |
| 2653 case Token::DIV: |
| 2654 case Token::MOD: { |
| 2655 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 |
| 2656 // depending on whether VFP3 is available or not. |
| 2657 FloatingPointHelper::Destination destination = |
| 2658 CpuFeatures::IsSupported(VFP3) && op_ != Token::MOD ? |
| 2659 FloatingPointHelper::kVFPRegisters : |
| 2660 FloatingPointHelper::kCoreRegisters; |
| 2661 |
| 2662 // Allocate new heap number for result. |
| 2663 Register result = r5; |
| 2664 __ AllocateHeapNumber( |
| 2665 result, scratch1, scratch2, heap_number_map, gc_required); |
| 2666 |
| 2667 // Load the operands. |
| 2668 if (smi_operands) { |
| 2669 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2); |
| 2670 } else { |
| 2671 FloatingPointHelper::LoadOperands(masm, |
| 2672 destination, |
| 2673 heap_number_map, |
| 2674 scratch1, |
| 2675 scratch2, |
| 2676 not_numbers); |
| 2677 } |
| 2678 |
| 2679 // Calculate the result. |
| 2680 if (destination == FloatingPointHelper::kVFPRegisters) { |
| 2681 // Using VFP registers: |
| 2682 // d6: Left value |
| 2683 // d7: Right value |
| 2684 CpuFeatures::Scope scope(VFP3); |
| 2685 switch (op_) { |
| 2686 case Token::ADD: |
| 2687 __ vadd(d5, d6, d7); |
| 2688 break; |
| 2689 case Token::SUB: |
| 2690 __ vsub(d5, d6, d7); |
| 2691 break; |
| 2692 case Token::MUL: |
| 2693 __ vmul(d5, d6, d7); |
| 2694 break; |
| 2695 case Token::DIV: |
| 2696 __ vdiv(d5, d6, d7); |
| 2697 break; |
| 2698 default: |
| 2699 UNREACHABLE(); |
| 2700 } |
| 2701 |
| 2702 __ sub(r0, result, Operand(kHeapObjectTag)); |
| 2703 __ vstr(d5, r0, HeapNumber::kValueOffset); |
| 2704 __ add(r0, r0, Operand(kHeapObjectTag)); |
| 2705 __ Ret(); |
| 2706 } else { |
| 2707 // Using core registers: |
| 2708 // r0: Left value (least significant part of mantissa). |
| 2709 // r1: Left value (sign, exponent, top of mantissa). |
| 2710 // r2: Right value (least significant part of mantissa). |
| 2711 // r3: Right value (sign, exponent, top of mantissa). |
| 2712 |
| 2713 // Push the current return address before the C call. Return will be |
| 2714 // through pop(pc) below. |
| 2715 __ push(lr); |
| 2716 __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments. |
| 2717 // Call C routine that may not cause GC or other trouble. r5 is callee |
| 2718 // save. |
| 2719 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4); |
| 2720 // Store answer in the overwritable heap number. |
| 2721 #if !defined(USE_ARM_EABI) |
| 2722 // Double returned in fp coprocessor register 0 and 1, encoded as |
| 2723 // register cr8. Offsets must be divisible by 4 for coprocessor so we |
| 2724 // need to substract the tag from r5. |
| 2725 __ sub(scratch1, result, Operand(kHeapObjectTag)); |
| 2726 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset)); |
| 2727 #else |
| 2728 // Double returned in registers 0 and 1. |
| 2729 __ Strd(r0, r1, FieldMemOperand(result, HeapNumber::kValueOffset)); |
| 2730 #endif |
| 2731 // Plase result in r0 and return to the pushed return address. |
| 2732 __ mov(r0, Operand(result)); |
| 2733 __ pop(pc); |
| 2734 } |
| 2735 break; |
| 2736 } |
| 2737 case Token::BIT_OR: |
| 2738 case Token::BIT_XOR: |
| 2739 case Token::BIT_AND: |
| 2740 case Token::SAR: |
| 2741 case Token::SHR: |
| 2742 case Token::SHL: { |
| 2743 if (smi_operands) { |
| 2744 __ SmiUntag(r3, left); |
| 2745 __ SmiUntag(r2, right); |
| 2746 } else { |
| 2747 // Convert operands to 32-bit integers. Right in r2 and left in r3. |
| 2748 FloatingPointHelper::LoadNumberAsInteger(masm, |
| 2749 left, |
| 2750 r3, |
| 2751 heap_number_map, |
| 2752 scratch1, |
| 2753 scratch2, |
| 2754 d0, |
| 2755 not_numbers); |
| 2756 FloatingPointHelper::LoadNumberAsInteger(masm, |
| 2757 right, |
| 2758 r2, |
| 2759 heap_number_map, |
| 2760 scratch1, |
| 2761 scratch2, |
| 2762 d0, |
| 2763 not_numbers); |
| 2764 } |
| 2765 |
| 2766 Label result_not_a_smi; |
| 2767 switch (op_) { |
| 2768 case Token::BIT_OR: |
| 2769 __ orr(r2, r3, Operand(r2)); |
| 2770 break; |
| 2771 case Token::BIT_XOR: |
| 2772 __ eor(r2, r3, Operand(r2)); |
| 2773 break; |
| 2774 case Token::BIT_AND: |
| 2775 __ and_(r2, r3, Operand(r2)); |
| 2776 break; |
| 2777 case Token::SAR: |
| 2778 // Use only the 5 least significant bits of the shift count. |
| 2779 __ and_(r2, r2, Operand(0x1f)); |
| 2780 __ GetLeastBitsFromInt32(r2, r2, 5); |
| 2781 __ mov(r2, Operand(r3, ASR, r2)); |
| 2782 break; |
| 2783 case Token::SHR: |
| 2784 // Use only the 5 least significant bits of the shift count. |
| 2785 __ GetLeastBitsFromInt32(r2, r2, 5); |
| 2786 __ mov(r2, Operand(r3, LSR, r2), SetCC); |
| 2787 // SHR is special because it is required to produce a positive answer. |
| 2788 // The code below for writing into heap numbers isn't capable of |
| 2789 // writing the register as an unsigned int so we go to slow case if we |
| 2790 // hit this case. |
| 2791 if (CpuFeatures::IsSupported(VFP3)) { |
| 2792 __ b(mi, &result_not_a_smi); |
| 2793 } else { |
| 2794 __ b(mi, not_numbers); |
| 2795 } |
| 2796 break; |
| 2797 case Token::SHL: |
| 2798 // Use only the 5 least significant bits of the shift count. |
| 2799 __ GetLeastBitsFromInt32(r2, r2, 5); |
| 2800 __ mov(r2, Operand(r3, LSL, r2)); |
| 2801 break; |
| 2802 default: |
| 2803 UNREACHABLE(); |
| 2804 } |
| 2805 |
| 2806 // Check that the *signed* result fits in a smi. |
| 2807 __ add(r3, r2, Operand(0x40000000), SetCC); |
| 2808 __ b(mi, &result_not_a_smi); |
| 2809 __ SmiTag(r0, r2); |
| 2810 __ Ret(); |
| 2811 |
| 2812 // Allocate new heap number for result. |
| 2813 __ bind(&result_not_a_smi); |
| 2814 __ AllocateHeapNumber( |
| 2815 r5, scratch1, scratch2, heap_number_map, gc_required); |
| 2816 |
| 2817 // r2: Answer as signed int32. |
| 2818 // r5: Heap number to write answer into. |
| 2819 |
| 2820 // Nothing can go wrong now, so move the heap number to r0, which is the |
| 2821 // result. |
| 2822 __ mov(r0, Operand(r5)); |
| 2823 |
| 2824 if (CpuFeatures::IsSupported(VFP3)) { |
| 2825 // Convert the int32 in r2 to the heap number in r0. r3 is corrupted. As |
| 2826 // mentioned above SHR needs to always produce a positive result. |
| 2827 CpuFeatures::Scope scope(VFP3); |
| 2828 __ vmov(s0, r2); |
| 2829 if (op_ == Token::SHR) { |
| 2830 __ vcvt_f64_u32(d0, s0); |
| 2831 } else { |
| 2832 __ vcvt_f64_s32(d0, s0); |
| 2833 } |
| 2834 __ sub(r3, r0, Operand(kHeapObjectTag)); |
| 2835 __ vstr(d0, r3, HeapNumber::kValueOffset); |
| 2836 __ Ret(); |
| 2837 } else { |
| 2838 // Tail call that writes the int32 in r2 to the heap number in r0, using |
| 2839 // r3 as scratch. r0 is preserved and returned. |
| 2840 WriteInt32ToHeapNumberStub stub(r2, r0, r3); |
| 2841 __ TailCallStub(&stub); |
| 2842 } |
| 2843 break; |
| 2844 } |
| 2845 default: |
| 2846 UNREACHABLE(); |
| 2847 } |
| 2848 } |
| 2849 |
| 2850 |
| 2851 // Generate the smi code. If the operation on smis are successful this return is |
| 2852 // generated. If the result is not a smi and heap number allocation is not |
| 2853 // requested the code falls through. If number allocation is requested but a |
| 2854 // heap number cannot be allocated the code jumps to the lable gc_required. |
| 2855 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, |
| 2856 Label* gc_required, |
| 2857 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) { |
| 2858 Label not_smis; |
| 2859 |
| 2860 Register left = r1; |
| 2861 Register right = r0; |
| 2862 Register scratch1 = r7; |
| 2863 Register scratch2 = r9; |
| 2864 |
| 2865 // Perform combined smi check on both operands. |
| 2866 __ orr(scratch1, left, Operand(right)); |
| 2867 STATIC_ASSERT(kSmiTag == 0); |
| 2868 __ tst(scratch1, Operand(kSmiTagMask)); |
| 2869 __ b(ne, ¬_smis); |
| 2870 |
| 2871 // If the smi-smi operation results in a smi return is generated. |
| 2872 GenerateSmiSmiOperation(masm); |
| 2873 |
| 2874 // If heap number results are possible generate the result in an allocated |
| 2875 // heap number. |
| 2876 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) { |
| 2877 GenerateFPOperation(masm, true, NULL, gc_required); |
| 2878 } |
| 2879 __ bind(¬_smis); |
| 2880 } |
| 2881 |
| 2882 |
| 2883 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) { |
| 2884 Label not_smis, call_runtime; |
| 2885 |
| 2886 if (result_type_ == TRBinaryOpIC::UNINITIALIZED || |
| 2887 result_type_ == TRBinaryOpIC::SMI) { |
| 2888 // Only allow smi results. |
| 2889 GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS); |
| 2890 } else { |
| 2891 // Allow heap number result and don't make a transition if a heap number |
| 2892 // cannot be allocated. |
| 2893 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); |
| 2894 } |
| 2895 |
| 2896 // Code falls through if the result is not returned as either a smi or heap |
| 2897 // number. |
| 2898 GenerateTypeTransition(masm); |
| 2899 |
| 2900 __ bind(&call_runtime); |
| 2901 GenerateCallRuntime(masm); |
| 2902 } |
| 2903 |
| 2904 |
| 2905 void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) { |
| 2906 ASSERT(operands_type_ == TRBinaryOpIC::STRING); |
| 2907 ASSERT(op_ == Token::ADD); |
| 2908 // Try to add arguments as strings, otherwise, transition to the generic |
| 2909 // TRBinaryOpIC type. |
| 2910 GenerateAddStrings(masm); |
| 2911 GenerateTypeTransition(masm); |
| 2912 } |
| 2913 |
| 2914 |
| 2915 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) { |
| 2916 ASSERT(operands_type_ == TRBinaryOpIC::INT32); |
| 2917 |
| 2918 GenerateTypeTransition(masm); |
| 2919 } |
| 2920 |
| 2921 |
| 2922 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) { |
| 2923 Label not_numbers, call_runtime; |
| 2924 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER); |
| 2925 |
| 2926 GenerateFPOperation(masm, false, ¬_numbers, &call_runtime); |
| 2927 |
| 2928 __ bind(¬_numbers); |
| 2929 GenerateTypeTransition(masm); |
| 2930 |
| 2931 __ bind(&call_runtime); |
| 2932 GenerateCallRuntime(masm); |
| 2933 } |
| 2934 |
| 2935 |
| 2936 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) { |
| 2937 Label call_runtime; |
| 2938 |
| 2939 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS); |
| 2940 |
| 2941 // If all else fails, use the runtime system to get the correct |
| 2942 // result. |
| 2943 __ bind(&call_runtime); |
| 2944 |
| 2945 // Try to add strings before calling runtime. |
| 2946 if (op_ == Token::ADD) { |
| 2947 GenerateAddStrings(masm); |
| 2948 } |
| 2949 |
| 2950 GenericBinaryOpStub stub(op_, mode_, r1, r0); |
| 2951 __ TailCallStub(&stub); |
| 2952 } |
| 2953 |
| 2954 |
| 2955 void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) { |
| 2956 ASSERT(op_ == Token::ADD); |
| 2957 |
| 2958 Register left = r1; |
| 2959 Register right = r0; |
| 2960 Label call_runtime; |
| 2961 |
| 2962 // Check if first argument is a string. |
| 2963 __ JumpIfSmi(left, &call_runtime); |
| 2964 __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE); |
| 2965 __ b(ge, &call_runtime); |
| 2966 |
| 2967 // First argument is a a string, test second. |
| 2968 __ JumpIfSmi(right, &call_runtime); |
| 2969 __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE); |
| 2970 __ b(ge, &call_runtime); |
| 2971 |
| 2972 // First and second argument are strings. |
| 2973 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); |
| 2974 GenerateRegisterArgsPush(masm); |
| 2975 __ TailCallStub(&string_add_stub); |
| 2976 |
| 2977 // At least one argument is not a string. |
| 2978 __ bind(&call_runtime); |
| 2979 } |
| 2980 |
| 2981 |
| 2982 void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) { |
| 2983 GenerateRegisterArgsPush(masm); |
| 2984 switch (op_) { |
| 2985 case Token::ADD: |
| 2986 __ InvokeBuiltin(Builtins::ADD, JUMP_JS); |
| 2987 break; |
| 2988 case Token::SUB: |
| 2989 __ InvokeBuiltin(Builtins::SUB, JUMP_JS); |
| 2990 break; |
| 2991 case Token::MUL: |
| 2992 __ InvokeBuiltin(Builtins::MUL, JUMP_JS); |
| 2993 break; |
| 2994 case Token::DIV: |
| 2995 __ InvokeBuiltin(Builtins::DIV, JUMP_JS); |
| 2996 break; |
| 2997 case Token::MOD: |
| 2998 __ InvokeBuiltin(Builtins::MOD, JUMP_JS); |
| 2999 break; |
| 3000 case Token::BIT_OR: |
| 3001 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_JS); |
| 3002 break; |
| 3003 case Token::BIT_AND: |
| 3004 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_JS); |
| 3005 break; |
| 3006 case Token::BIT_XOR: |
| 3007 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_JS); |
| 3008 break; |
| 3009 case Token::SAR: |
| 3010 __ InvokeBuiltin(Builtins::SAR, JUMP_JS); |
| 3011 break; |
| 3012 case Token::SHR: |
| 3013 __ InvokeBuiltin(Builtins::SHR, JUMP_JS); |
| 3014 break; |
| 3015 case Token::SHL: |
| 3016 __ InvokeBuiltin(Builtins::SHL, JUMP_JS); |
| 3017 break; |
| 3018 default: |
| 3019 UNREACHABLE(); |
| 3020 } |
| 3021 } |
| 3022 |
| 3023 |
| 3024 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation( |
| 3025 MacroAssembler* masm, |
| 3026 Register result, |
| 3027 Register heap_number_map, |
| 3028 Register scratch1, |
| 3029 Register scratch2, |
| 3030 Label* gc_required) { |
| 3031 |
| 3032 // Code below will scratch result if allocation fails. To keep both arguments |
| 3033 // intact for the runtime call result cannot be one of these. |
| 3034 ASSERT(!result.is(r0) && !result.is(r1)); |
| 3035 |
| 3036 if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) { |
| 3037 Label skip_allocation, allocated; |
| 3038 Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0; |
| 3039 // If the overwritable operand is already an object, we skip the |
| 3040 // allocation of a heap number. |
| 3041 __ JumpIfNotSmi(overwritable_operand, &skip_allocation); |
| 3042 // Allocate a heap number for the result. |
| 3043 __ AllocateHeapNumber( |
| 3044 result, scratch1, scratch2, heap_number_map, gc_required); |
| 3045 __ b(&allocated); |
| 3046 __ bind(&skip_allocation); |
| 3047 // Use object holding the overwritable operand for result. |
| 3048 __ mov(result, Operand(overwritable_operand)); |
| 3049 __ bind(&allocated); |
| 3050 } else { |
| 3051 ASSERT(mode_ == NO_OVERWRITE); |
| 3052 __ AllocateHeapNumber( |
| 3053 result, scratch1, scratch2, heap_number_map, gc_required); |
| 3054 } |
| 3055 } |
| 3056 |
| 3057 |
| 3058 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { |
| 3059 __ Push(r1, r0); |
| 2215 } | 3060 } |
| 2216 | 3061 |
| 2217 | 3062 |
| 2218 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { | 3063 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { |
| 2219 // Argument is a number and is on stack and in r0. | 3064 // Argument is a number and is on stack and in r0. |
| 2220 Label runtime_call; | 3065 Label runtime_call; |
| 2221 Label input_not_smi; | 3066 Label input_not_smi; |
| 2222 Label loaded; | 3067 Label loaded; |
| 2223 | 3068 |
| 2224 if (CpuFeatures::IsSupported(VFP3)) { | 3069 if (CpuFeatures::IsSupported(VFP3)) { |
| 2225 // Load argument and check if it is a smi. | 3070 // Load argument and check if it is a smi. |
| 2226 __ BranchOnNotSmi(r0, &input_not_smi); | 3071 __ JumpIfNotSmi(r0, &input_not_smi); |
| 2227 | 3072 |
| 2228 CpuFeatures::Scope scope(VFP3); | 3073 CpuFeatures::Scope scope(VFP3); |
| 2229 // Input is a smi. Convert to double and load the low and high words | 3074 // Input is a smi. Convert to double and load the low and high words |
| 2230 // of the double into r2, r3. | 3075 // of the double into r2, r3. |
| 2231 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); | 3076 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); |
| 2232 __ b(&loaded); | 3077 __ b(&loaded); |
| 2233 | 3078 |
| 2234 __ bind(&input_not_smi); | 3079 __ bind(&input_not_smi); |
| 2235 // Check if input is a HeapNumber. | 3080 // Check if input is a HeapNumber. |
| 2236 __ CheckMap(r0, | 3081 __ CheckMap(r0, |
| (...skipping 133 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2370 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | 3215 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); |
| 2371 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 3216 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
| 2372 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); | 3217 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); |
| 2373 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. | 3218 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. |
| 2374 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); | 3219 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); |
| 2375 __ mov(r0, Operand(r1)); | 3220 __ mov(r0, Operand(r1)); |
| 2376 } | 3221 } |
| 2377 } else if (op_ == Token::BIT_NOT) { | 3222 } else if (op_ == Token::BIT_NOT) { |
| 2378 if (include_smi_code_) { | 3223 if (include_smi_code_) { |
| 2379 Label non_smi; | 3224 Label non_smi; |
| 2380 __ BranchOnNotSmi(r0, &non_smi); | 3225 __ JumpIfNotSmi(r0, &non_smi); |
| 2381 __ mvn(r0, Operand(r0)); | 3226 __ mvn(r0, Operand(r0)); |
| 2382 // Bit-clear inverted smi-tag. | 3227 // Bit-clear inverted smi-tag. |
| 2383 __ bic(r0, r0, Operand(kSmiTagMask)); | 3228 __ bic(r0, r0, Operand(kSmiTagMask)); |
| 2384 __ Ret(); | 3229 __ Ret(); |
| 2385 __ bind(&non_smi); | 3230 __ bind(&non_smi); |
| 2386 } else if (FLAG_debug_code) { | 3231 } else if (FLAG_debug_code) { |
| 2387 __ tst(r0, Operand(kSmiTagMask)); | 3232 __ tst(r0, Operand(kSmiTagMask)); |
| 2388 __ Assert(ne, "Unexpected smi operand."); | 3233 __ Assert(ne, "Unexpected smi operand."); |
| 2389 } | 3234 } |
| 2390 | 3235 |
| 2391 // Check if the operand is a heap number. | 3236 // Check if the operand is a heap number. |
| 2392 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); | 3237 __ ldr(r1, FieldMemOperand(r0, HeapObject::kMapOffset)); |
| 2393 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 3238 __ AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 2394 __ cmp(r1, heap_number_map); | 3239 __ cmp(r1, heap_number_map); |
| 2395 __ b(ne, &slow); | 3240 __ b(ne, &slow); |
| 2396 | 3241 |
| 2397 // Convert the heap number is r0 to an untagged integer in r1. | 3242 // Convert the heap number is r0 to an untagged integer in r1. |
| 2398 __ ConvertToInt32(r0, r1, r2, r3, &slow); | 3243 __ ConvertToInt32(r0, r1, r2, r3, d0, &slow); |
| 2399 | 3244 |
| 2400 // Do the bitwise operation (move negated) and check if the result | 3245 // Do the bitwise operation (move negated) and check if the result |
| 2401 // fits in a smi. | 3246 // fits in a smi. |
| 2402 Label try_float; | 3247 Label try_float; |
| 2403 __ mvn(r1, Operand(r1)); | 3248 __ mvn(r1, Operand(r1)); |
| 2404 __ add(r2, r1, Operand(0x40000000), SetCC); | 3249 __ add(r2, r1, Operand(0x40000000), SetCC); |
| 2405 __ b(mi, &try_float); | 3250 __ b(mi, &try_float); |
| 2406 __ mov(r0, Operand(r1, LSL, kSmiTagSize)); | 3251 __ mov(r0, Operand(r1, LSL, kSmiTagSize)); |
| 2407 __ b(&done); | 3252 __ b(&done); |
| 2408 | 3253 |
| (...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2447 case Token::BIT_NOT: | 3292 case Token::BIT_NOT: |
| 2448 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS); | 3293 __ InvokeBuiltin(Builtins::BIT_NOT, JUMP_JS); |
| 2449 break; | 3294 break; |
| 2450 default: | 3295 default: |
| 2451 UNREACHABLE(); | 3296 UNREACHABLE(); |
| 2452 } | 3297 } |
| 2453 } | 3298 } |
| 2454 | 3299 |
| 2455 | 3300 |
| 2456 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { | 3301 void CEntryStub::GenerateThrowTOS(MacroAssembler* masm) { |
| 2457 // r0 holds the exception. | 3302 __ Throw(r0); |
| 2458 | |
| 2459 // Adjust this code if not the case. | |
| 2460 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); | |
| 2461 | |
| 2462 // Drop the sp to the top of the handler. | |
| 2463 __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); | |
| 2464 __ ldr(sp, MemOperand(r3)); | |
| 2465 | |
| 2466 // Restore the next handler and frame pointer, discard handler state. | |
| 2467 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | |
| 2468 __ pop(r2); | |
| 2469 __ str(r2, MemOperand(r3)); | |
| 2470 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); | |
| 2471 __ ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state. | |
| 2472 | |
| 2473 // Before returning we restore the context from the frame pointer if | |
| 2474 // not NULL. The frame pointer is NULL in the exception handler of a | |
| 2475 // JS entry frame. | |
| 2476 __ cmp(fp, Operand(0, RelocInfo::NONE)); | |
| 2477 // Set cp to NULL if fp is NULL. | |
| 2478 __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq); | |
| 2479 // Restore cp otherwise. | |
| 2480 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); | |
| 2481 #ifdef DEBUG | |
| 2482 if (FLAG_debug_code) { | |
| 2483 __ mov(lr, Operand(pc)); | |
| 2484 } | |
| 2485 #endif | |
| 2486 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); | |
| 2487 __ pop(pc); | |
| 2488 } | 3303 } |
| 2489 | 3304 |
| 2490 | 3305 |
| 2491 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, | 3306 void CEntryStub::GenerateThrowUncatchable(MacroAssembler* masm, |
| 2492 UncatchableExceptionType type) { | 3307 UncatchableExceptionType type) { |
| 2493 // Adjust this code if not the case. | 3308 __ ThrowUncatchable(type, r0); |
| 2494 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize); | |
| 2495 | |
| 2496 // Drop sp to the top stack handler. | |
| 2497 __ mov(r3, Operand(ExternalReference(Top::k_handler_address))); | |
| 2498 __ ldr(sp, MemOperand(r3)); | |
| 2499 | |
| 2500 // Unwind the handlers until the ENTRY handler is found. | |
| 2501 Label loop, done; | |
| 2502 __ bind(&loop); | |
| 2503 // Load the type of the current stack handler. | |
| 2504 const int kStateOffset = StackHandlerConstants::kStateOffset; | |
| 2505 __ ldr(r2, MemOperand(sp, kStateOffset)); | |
| 2506 __ cmp(r2, Operand(StackHandler::ENTRY)); | |
| 2507 __ b(eq, &done); | |
| 2508 // Fetch the next handler in the list. | |
| 2509 const int kNextOffset = StackHandlerConstants::kNextOffset; | |
| 2510 __ ldr(sp, MemOperand(sp, kNextOffset)); | |
| 2511 __ jmp(&loop); | |
| 2512 __ bind(&done); | |
| 2513 | |
| 2514 // Set the top handler address to next handler past the current ENTRY handler. | |
| 2515 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | |
| 2516 __ pop(r2); | |
| 2517 __ str(r2, MemOperand(r3)); | |
| 2518 | |
| 2519 if (type == OUT_OF_MEMORY) { | |
| 2520 // Set external caught exception to false. | |
| 2521 ExternalReference external_caught(Top::k_external_caught_exception_address); | |
| 2522 __ mov(r0, Operand(false)); | |
| 2523 __ mov(r2, Operand(external_caught)); | |
| 2524 __ str(r0, MemOperand(r2)); | |
| 2525 | |
| 2526 // Set pending exception and r0 to out of memory exception. | |
| 2527 Failure* out_of_memory = Failure::OutOfMemoryException(); | |
| 2528 __ mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory))); | |
| 2529 __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address))); | |
| 2530 __ str(r0, MemOperand(r2)); | |
| 2531 } | |
| 2532 | |
| 2533 // Stack layout at this point. See also StackHandlerConstants. | |
| 2534 // sp -> state (ENTRY) | |
| 2535 // fp | |
| 2536 // lr | |
| 2537 | |
| 2538 // Discard handler state (r2 is not used) and restore frame pointer. | |
| 2539 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize); | |
| 2540 __ ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state. | |
| 2541 // Before returning we restore the context from the frame pointer if | |
| 2542 // not NULL. The frame pointer is NULL in the exception handler of a | |
| 2543 // JS entry frame. | |
| 2544 __ cmp(fp, Operand(0, RelocInfo::NONE)); | |
| 2545 // Set cp to NULL if fp is NULL. | |
| 2546 __ mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq); | |
| 2547 // Restore cp otherwise. | |
| 2548 __ ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne); | |
| 2549 #ifdef DEBUG | |
| 2550 if (FLAG_debug_code) { | |
| 2551 __ mov(lr, Operand(pc)); | |
| 2552 } | |
| 2553 #endif | |
| 2554 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); | |
| 2555 __ pop(pc); | |
| 2556 } | 3309 } |
| 2557 | 3310 |
| 2558 | 3311 |
| 2559 void CEntryStub::GenerateCore(MacroAssembler* masm, | 3312 void CEntryStub::GenerateCore(MacroAssembler* masm, |
| 2560 Label* throw_normal_exception, | 3313 Label* throw_normal_exception, |
| 2561 Label* throw_termination_exception, | 3314 Label* throw_termination_exception, |
| 2562 Label* throw_out_of_memory_exception, | 3315 Label* throw_out_of_memory_exception, |
| 2563 bool do_gc, | 3316 bool do_gc, |
| 2564 bool always_allocate, | 3317 bool always_allocate) { |
| 2565 int frame_alignment_skew) { | |
| 2566 // r0: result parameter for PerformGC, if any | 3318 // r0: result parameter for PerformGC, if any |
| 2567 // r4: number of arguments including receiver (C callee-saved) | 3319 // r4: number of arguments including receiver (C callee-saved) |
| 2568 // r5: pointer to builtin function (C callee-saved) | 3320 // r5: pointer to builtin function (C callee-saved) |
| 2569 // r6: pointer to the first argument (C callee-saved) | 3321 // r6: pointer to the first argument (C callee-saved) |
| 2570 | 3322 |
| 2571 if (do_gc) { | 3323 if (do_gc) { |
| 2572 // Passing r0. | 3324 // Passing r0. |
| 2573 __ PrepareCallCFunction(1, r1); | 3325 __ PrepareCallCFunction(1, r1); |
| 2574 __ CallCFunction(ExternalReference::perform_gc_function(), 1); | 3326 __ CallCFunction(ExternalReference::perform_gc_function(), 1); |
| 2575 } | 3327 } |
| 2576 | 3328 |
| 2577 ExternalReference scope_depth = | 3329 ExternalReference scope_depth = |
| 2578 ExternalReference::heap_always_allocate_scope_depth(); | 3330 ExternalReference::heap_always_allocate_scope_depth(); |
| 2579 if (always_allocate) { | 3331 if (always_allocate) { |
| 2580 __ mov(r0, Operand(scope_depth)); | 3332 __ mov(r0, Operand(scope_depth)); |
| 2581 __ ldr(r1, MemOperand(r0)); | 3333 __ ldr(r1, MemOperand(r0)); |
| 2582 __ add(r1, r1, Operand(1)); | 3334 __ add(r1, r1, Operand(1)); |
| 2583 __ str(r1, MemOperand(r0)); | 3335 __ str(r1, MemOperand(r0)); |
| 2584 } | 3336 } |
| 2585 | 3337 |
| 2586 // Call C built-in. | 3338 // Call C built-in. |
| 2587 // r0 = argc, r1 = argv | 3339 // r0 = argc, r1 = argv |
| 2588 __ mov(r0, Operand(r4)); | 3340 __ mov(r0, Operand(r4)); |
| 2589 __ mov(r1, Operand(r6)); | 3341 __ mov(r1, Operand(r6)); |
| 2590 | 3342 |
| 3343 #if defined(V8_HOST_ARCH_ARM) |
| 2591 int frame_alignment = MacroAssembler::ActivationFrameAlignment(); | 3344 int frame_alignment = MacroAssembler::ActivationFrameAlignment(); |
| 2592 int frame_alignment_mask = frame_alignment - 1; | 3345 int frame_alignment_mask = frame_alignment - 1; |
| 2593 #if defined(V8_HOST_ARCH_ARM) | |
| 2594 if (FLAG_debug_code) { | 3346 if (FLAG_debug_code) { |
| 2595 if (frame_alignment > kPointerSize) { | 3347 if (frame_alignment > kPointerSize) { |
| 2596 Label alignment_as_expected; | 3348 Label alignment_as_expected; |
| 2597 ASSERT(IsPowerOf2(frame_alignment)); | 3349 ASSERT(IsPowerOf2(frame_alignment)); |
| 2598 __ sub(r2, sp, Operand(frame_alignment_skew)); | 3350 __ tst(sp, Operand(frame_alignment_mask)); |
| 2599 __ tst(r2, Operand(frame_alignment_mask)); | |
| 2600 __ b(eq, &alignment_as_expected); | 3351 __ b(eq, &alignment_as_expected); |
| 2601 // Don't use Check here, as it will call Runtime_Abort re-entering here. | 3352 // Don't use Check here, as it will call Runtime_Abort re-entering here. |
| 2602 __ stop("Unexpected alignment"); | 3353 __ stop("Unexpected alignment"); |
| 2603 __ bind(&alignment_as_expected); | 3354 __ bind(&alignment_as_expected); |
| 2604 } | 3355 } |
| 2605 } | 3356 } |
| 2606 #endif | 3357 #endif |
| 2607 | 3358 |
| 2608 // Just before the call (jump) below lr is pushed, so the actual alignment is | |
| 2609 // adding one to the current skew. | |
| 2610 int alignment_before_call = | |
| 2611 (frame_alignment_skew + kPointerSize) & frame_alignment_mask; | |
| 2612 if (alignment_before_call > 0) { | |
| 2613 // Push until the alignment before the call is met. | |
| 2614 __ mov(r2, Operand(0, RelocInfo::NONE)); | |
| 2615 for (int i = alignment_before_call; | |
| 2616 (i & frame_alignment_mask) != 0; | |
| 2617 i += kPointerSize) { | |
| 2618 __ push(r2); | |
| 2619 } | |
| 2620 } | |
| 2621 | |
| 2622 // TODO(1242173): To let the GC traverse the return address of the exit | 3359 // TODO(1242173): To let the GC traverse the return address of the exit |
| 2623 // frames, we need to know where the return address is. Right now, | 3360 // frames, we need to know where the return address is. Right now, |
| 2624 // we push it on the stack to be able to find it again, but we never | 3361 // we store it on the stack to be able to find it again, but we never |
| 2625 // restore from it in case of changes, which makes it impossible to | 3362 // restore from it in case of changes, which makes it impossible to |
| 2626 // support moving the C entry code stub. This should be fixed, but currently | 3363 // support moving the C entry code stub. This should be fixed, but currently |
| 2627 // this is OK because the CEntryStub gets generated so early in the V8 boot | 3364 // this is OK because the CEntryStub gets generated so early in the V8 boot |
| 2628 // sequence that it is not moving ever. | 3365 // sequence that it is not moving ever. |
| 2629 masm->add(lr, pc, Operand(4)); // Compute return address: (pc + 8) + 4 | 3366 |
| 2630 masm->push(lr); | 3367 // Compute the return address in lr to return to after the jump below. Pc is |
| 3368 // already at '+ 8' from the current instruction but return is after three |
| 3369 // instructions so add another 4 to pc to get the return address. |
| 3370 masm->add(lr, pc, Operand(4)); |
| 3371 __ str(lr, MemOperand(sp, 0)); |
| 2631 masm->Jump(r5); | 3372 masm->Jump(r5); |
| 2632 | 3373 |
| 2633 // Restore sp back to before aligning the stack. | |
| 2634 if (alignment_before_call > 0) { | |
| 2635 __ add(sp, sp, Operand(alignment_before_call)); | |
| 2636 } | |
| 2637 | |
| 2638 if (always_allocate) { | 3374 if (always_allocate) { |
| 2639 // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 | 3375 // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 |
| 2640 // though (contain the result). | 3376 // though (contain the result). |
| 2641 __ mov(r2, Operand(scope_depth)); | 3377 __ mov(r2, Operand(scope_depth)); |
| 2642 __ ldr(r3, MemOperand(r2)); | 3378 __ ldr(r3, MemOperand(r2)); |
| 2643 __ sub(r3, r3, Operand(1)); | 3379 __ sub(r3, r3, Operand(1)); |
| 2644 __ str(r3, MemOperand(r2)); | 3380 __ str(r3, MemOperand(r2)); |
| 2645 } | 3381 } |
| 2646 | 3382 |
| 2647 // check for failure result | 3383 // check for failure result |
| 2648 Label failure_returned; | 3384 Label failure_returned; |
| 2649 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); | 3385 STATIC_ASSERT(((kFailureTag + 1) & kFailureTagMask) == 0); |
| 2650 // Lower 2 bits of r2 are 0 iff r0 has failure tag. | 3386 // Lower 2 bits of r2 are 0 iff r0 has failure tag. |
| 2651 __ add(r2, r0, Operand(1)); | 3387 __ add(r2, r0, Operand(1)); |
| 2652 __ tst(r2, Operand(kFailureTagMask)); | 3388 __ tst(r2, Operand(kFailureTagMask)); |
| 2653 __ b(eq, &failure_returned); | 3389 __ b(eq, &failure_returned); |
| 2654 | 3390 |
| 2655 // Exit C frame and return. | 3391 // Exit C frame and return. |
| 2656 // r0:r1: result | 3392 // r0:r1: result |
| 2657 // sp: stack pointer | 3393 // sp: stack pointer |
| 2658 // fp: frame pointer | 3394 // fp: frame pointer |
| 2659 __ LeaveExitFrame(save_doubles_); | 3395 // Callee-saved register r4 still holds argc. |
| 3396 __ LeaveExitFrame(save_doubles_, r4); |
| 3397 __ mov(pc, lr); |
| 2660 | 3398 |
| 2661 // check if we should retry or throw exception | 3399 // check if we should retry or throw exception |
| 2662 Label retry; | 3400 Label retry; |
| 2663 __ bind(&failure_returned); | 3401 __ bind(&failure_returned); |
| 2664 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); | 3402 STATIC_ASSERT(Failure::RETRY_AFTER_GC == 0); |
| 2665 __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); | 3403 __ tst(r0, Operand(((1 << kFailureTypeTagSize) - 1) << kFailureTagSize)); |
| 2666 __ b(eq, &retry); | 3404 __ b(eq, &retry); |
| 2667 | 3405 |
| 2668 // Special handling of out of memory exceptions. | 3406 // Special handling of out of memory exceptions. |
| 2669 Failure* out_of_memory = Failure::OutOfMemoryException(); | 3407 Failure* out_of_memory = Failure::OutOfMemoryException(); |
| (...skipping 27 matching lines...) Expand all Loading... |
| 2697 // sp: stack pointer (restored as callee's sp after C call) | 3435 // sp: stack pointer (restored as callee's sp after C call) |
| 2698 // cp: current context (C callee-saved) | 3436 // cp: current context (C callee-saved) |
| 2699 | 3437 |
| 2700 // Result returned in r0 or r0+r1 by default. | 3438 // Result returned in r0 or r0+r1 by default. |
| 2701 | 3439 |
| 2702 // NOTE: Invocations of builtins may return failure objects | 3440 // NOTE: Invocations of builtins may return failure objects |
| 2703 // instead of a proper result. The builtin entry handles | 3441 // instead of a proper result. The builtin entry handles |
| 2704 // this by performing a garbage collection and retrying the | 3442 // this by performing a garbage collection and retrying the |
| 2705 // builtin once. | 3443 // builtin once. |
| 2706 | 3444 |
| 3445 // Compute the argv pointer in a callee-saved register. |
| 3446 __ add(r6, sp, Operand(r0, LSL, kPointerSizeLog2)); |
| 3447 __ sub(r6, r6, Operand(kPointerSize)); |
| 3448 |
| 2707 // Enter the exit frame that transitions from JavaScript to C++. | 3449 // Enter the exit frame that transitions from JavaScript to C++. |
| 2708 __ EnterExitFrame(save_doubles_); | 3450 __ EnterExitFrame(save_doubles_); |
| 2709 | 3451 |
| 3452 // Setup argc and the builtin function in callee-saved registers. |
| 3453 __ mov(r4, Operand(r0)); |
| 3454 __ mov(r5, Operand(r1)); |
| 3455 |
| 2710 // r4: number of arguments (C callee-saved) | 3456 // r4: number of arguments (C callee-saved) |
| 2711 // r5: pointer to builtin function (C callee-saved) | 3457 // r5: pointer to builtin function (C callee-saved) |
| 2712 // r6: pointer to first argument (C callee-saved) | 3458 // r6: pointer to first argument (C callee-saved) |
| 2713 | 3459 |
| 2714 Label throw_normal_exception; | 3460 Label throw_normal_exception; |
| 2715 Label throw_termination_exception; | 3461 Label throw_termination_exception; |
| 2716 Label throw_out_of_memory_exception; | 3462 Label throw_out_of_memory_exception; |
| 2717 | 3463 |
| 2718 // Call into the runtime system. | 3464 // Call into the runtime system. |
| 2719 GenerateCore(masm, | 3465 GenerateCore(masm, |
| 2720 &throw_normal_exception, | 3466 &throw_normal_exception, |
| 2721 &throw_termination_exception, | 3467 &throw_termination_exception, |
| 2722 &throw_out_of_memory_exception, | 3468 &throw_out_of_memory_exception, |
| 2723 false, | 3469 false, |
| 2724 false, | 3470 false); |
| 2725 -kPointerSize); | |
| 2726 | 3471 |
| 2727 // Do space-specific GC and retry runtime call. | 3472 // Do space-specific GC and retry runtime call. |
| 2728 GenerateCore(masm, | 3473 GenerateCore(masm, |
| 2729 &throw_normal_exception, | 3474 &throw_normal_exception, |
| 2730 &throw_termination_exception, | 3475 &throw_termination_exception, |
| 2731 &throw_out_of_memory_exception, | 3476 &throw_out_of_memory_exception, |
| 2732 true, | 3477 true, |
| 2733 false, | 3478 false); |
| 2734 0); | |
| 2735 | 3479 |
| 2736 // Do full GC and retry runtime call one final time. | 3480 // Do full GC and retry runtime call one final time. |
| 2737 Failure* failure = Failure::InternalError(); | 3481 Failure* failure = Failure::InternalError(); |
| 2738 __ mov(r0, Operand(reinterpret_cast<int32_t>(failure))); | 3482 __ mov(r0, Operand(reinterpret_cast<int32_t>(failure))); |
| 2739 GenerateCore(masm, | 3483 GenerateCore(masm, |
| 2740 &throw_normal_exception, | 3484 &throw_normal_exception, |
| 2741 &throw_termination_exception, | 3485 &throw_termination_exception, |
| 2742 &throw_out_of_memory_exception, | 3486 &throw_out_of_memory_exception, |
| 2743 true, | 3487 true, |
| 2744 true, | 3488 true); |
| 2745 kPointerSize); | |
| 2746 | 3489 |
| 2747 __ bind(&throw_out_of_memory_exception); | 3490 __ bind(&throw_out_of_memory_exception); |
| 2748 GenerateThrowUncatchable(masm, OUT_OF_MEMORY); | 3491 GenerateThrowUncatchable(masm, OUT_OF_MEMORY); |
| 2749 | 3492 |
| 2750 __ bind(&throw_termination_exception); | 3493 __ bind(&throw_termination_exception); |
| 2751 GenerateThrowUncatchable(masm, TERMINATION); | 3494 GenerateThrowUncatchable(masm, TERMINATION); |
| 2752 | 3495 |
| 2753 __ bind(&throw_normal_exception); | 3496 __ bind(&throw_normal_exception); |
| 2754 GenerateThrowTOS(masm); | 3497 GenerateThrowTOS(masm); |
| 2755 } | 3498 } |
| (...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2886 // Restore callee-saved registers and return. | 3629 // Restore callee-saved registers and return. |
| 2887 #ifdef DEBUG | 3630 #ifdef DEBUG |
| 2888 if (FLAG_debug_code) { | 3631 if (FLAG_debug_code) { |
| 2889 __ mov(lr, Operand(pc)); | 3632 __ mov(lr, Operand(pc)); |
| 2890 } | 3633 } |
| 2891 #endif | 3634 #endif |
| 2892 __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); | 3635 __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); |
| 2893 } | 3636 } |
| 2894 | 3637 |
| 2895 | 3638 |
| 2896 // Uses registers r0 to r4. Expected input is | 3639 // Uses registers r0 to r4. |
| 2897 // object in r0 (or at sp+1*kPointerSize) and function in | 3640 // Expected input (depending on whether args are in registers or on the stack): |
| 2898 // r1 (or at sp), depending on whether or not | 3641 // * object: r0 or at sp + 1 * kPointerSize. |
| 2899 // args_in_registers() is true. | 3642 // * function: r1 or at sp. |
| 3643 // |
| 3644 // An inlined call site may have been generated before calling this stub. |
| 3645 // In this case the offset to the inline site to patch is passed on the stack, |
| 3646 // in the safepoint slot for register r4. |
| 3647 // (See LCodeGen::DoInstanceOfKnownGlobal) |
| 2900 void InstanceofStub::Generate(MacroAssembler* masm) { | 3648 void InstanceofStub::Generate(MacroAssembler* masm) { |
| 3649 // Call site inlining and patching implies arguments in registers. |
| 3650 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck()); |
| 3651 // ReturnTrueFalse is only implemented for inlined call sites. |
| 3652 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck()); |
| 3653 |
| 2901 // Fixed register usage throughout the stub: | 3654 // Fixed register usage throughout the stub: |
| 2902 const Register object = r0; // Object (lhs). | 3655 const Register object = r0; // Object (lhs). |
| 2903 const Register map = r3; // Map of the object. | 3656 Register map = r3; // Map of the object. |
| 2904 const Register function = r1; // Function (rhs). | 3657 const Register function = r1; // Function (rhs). |
| 2905 const Register prototype = r4; // Prototype of the function. | 3658 const Register prototype = r4; // Prototype of the function. |
| 3659 const Register inline_site = r9; |
| 2906 const Register scratch = r2; | 3660 const Register scratch = r2; |
| 3661 |
| 3662 const int32_t kDeltaToLoadBoolResult = 3 * kPointerSize; |
| 3663 |
| 2907 Label slow, loop, is_instance, is_not_instance, not_js_object; | 3664 Label slow, loop, is_instance, is_not_instance, not_js_object; |
| 2908 if (!args_in_registers()) { | 3665 |
| 3666 if (!HasArgsInRegisters()) { |
| 2909 __ ldr(object, MemOperand(sp, 1 * kPointerSize)); | 3667 __ ldr(object, MemOperand(sp, 1 * kPointerSize)); |
| 2910 __ ldr(function, MemOperand(sp, 0)); | 3668 __ ldr(function, MemOperand(sp, 0)); |
| 2911 } | 3669 } |
| 2912 | 3670 |
| 2913 // Check that the left hand is a JS object and load map. | 3671 // Check that the left hand is a JS object and load map. |
| 2914 __ BranchOnSmi(object, ¬_js_object); | 3672 __ JumpIfSmi(object, ¬_js_object); |
| 2915 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object); | 3673 __ IsObjectJSObjectType(object, map, scratch, ¬_js_object); |
| 2916 | 3674 |
| 2917 // Look up the function and the map in the instanceof cache. | 3675 // If there is a call site cache don't look in the global cache, but do the |
| 2918 Label miss; | 3676 // real lookup and update the call site cache. |
| 2919 __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex); | 3677 if (!HasCallSiteInlineCheck()) { |
| 2920 __ cmp(function, ip); | 3678 Label miss; |
| 2921 __ b(ne, &miss); | 3679 __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex); |
| 2922 __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex); | 3680 __ cmp(function, ip); |
| 2923 __ cmp(map, ip); | 3681 __ b(ne, &miss); |
| 2924 __ b(ne, &miss); | 3682 __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex); |
| 2925 __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); | 3683 __ cmp(map, ip); |
| 2926 __ Ret(args_in_registers() ? 0 : 2); | 3684 __ b(ne, &miss); |
| 3685 __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); |
| 3686 __ Ret(HasArgsInRegisters() ? 0 : 2); |
| 2927 | 3687 |
| 2928 __ bind(&miss); | 3688 __ bind(&miss); |
| 3689 } |
| 3690 |
| 3691 // Get the prototype of the function. |
| 2929 __ TryGetFunctionPrototype(function, prototype, scratch, &slow); | 3692 __ TryGetFunctionPrototype(function, prototype, scratch, &slow); |
| 2930 | 3693 |
| 2931 // Check that the function prototype is a JS object. | 3694 // Check that the function prototype is a JS object. |
| 2932 __ BranchOnSmi(prototype, &slow); | 3695 __ JumpIfSmi(prototype, &slow); |
| 2933 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); | 3696 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); |
| 2934 | 3697 |
| 2935 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); | 3698 // Update the global instanceof or call site inlined cache with the current |
| 2936 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); | 3699 // map and function. The cached answer will be set when it is known below. |
| 3700 if (!HasCallSiteInlineCheck()) { |
| 3701 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); |
| 3702 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); |
| 3703 } else { |
| 3704 ASSERT(HasArgsInRegisters()); |
| 3705 // Patch the (relocated) inlined map check. |
| 3706 |
| 3707 // The offset was stored in r4 safepoint slot. |
| 3708 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal) |
| 3709 __ ldr(scratch, MacroAssembler::SafepointRegisterSlot(r4)); |
| 3710 __ sub(inline_site, lr, scratch); |
| 3711 // Get the map location in scratch and patch it. |
| 3712 __ GetRelocatedValueLocation(inline_site, scratch); |
| 3713 __ str(map, MemOperand(scratch)); |
| 3714 } |
| 2937 | 3715 |
| 2938 // Register mapping: r3 is object map and r4 is function prototype. | 3716 // Register mapping: r3 is object map and r4 is function prototype. |
| 2939 // Get prototype of object into r2. | 3717 // Get prototype of object into r2. |
| 2940 __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); | 3718 __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); |
| 2941 | 3719 |
| 3720 // We don't need map any more. Use it as a scratch register. |
| 3721 Register scratch2 = map; |
| 3722 map = no_reg; |
| 3723 |
| 2942 // Loop through the prototype chain looking for the function prototype. | 3724 // Loop through the prototype chain looking for the function prototype. |
| 3725 __ LoadRoot(scratch2, Heap::kNullValueRootIndex); |
| 2943 __ bind(&loop); | 3726 __ bind(&loop); |
| 2944 __ cmp(scratch, Operand(prototype)); | 3727 __ cmp(scratch, Operand(prototype)); |
| 2945 __ b(eq, &is_instance); | 3728 __ b(eq, &is_instance); |
| 2946 __ LoadRoot(ip, Heap::kNullValueRootIndex); | 3729 __ cmp(scratch, scratch2); |
| 2947 __ cmp(scratch, ip); | |
| 2948 __ b(eq, &is_not_instance); | 3730 __ b(eq, &is_not_instance); |
| 2949 __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); | 3731 __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); |
| 2950 __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); | 3732 __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); |
| 2951 __ jmp(&loop); | 3733 __ jmp(&loop); |
| 2952 | 3734 |
| 2953 __ bind(&is_instance); | 3735 __ bind(&is_instance); |
| 2954 __ mov(r0, Operand(Smi::FromInt(0))); | 3736 if (!HasCallSiteInlineCheck()) { |
| 2955 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); | 3737 __ mov(r0, Operand(Smi::FromInt(0))); |
| 2956 __ Ret(args_in_registers() ? 0 : 2); | 3738 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); |
| 3739 } else { |
| 3740 // Patch the call site to return true. |
| 3741 __ LoadRoot(r0, Heap::kTrueValueRootIndex); |
| 3742 __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); |
| 3743 // Get the boolean result location in scratch and patch it. |
| 3744 __ GetRelocatedValueLocation(inline_site, scratch); |
| 3745 __ str(r0, MemOperand(scratch)); |
| 3746 |
| 3747 if (!ReturnTrueFalseObject()) { |
| 3748 __ mov(r0, Operand(Smi::FromInt(0))); |
| 3749 } |
| 3750 } |
| 3751 __ Ret(HasArgsInRegisters() ? 0 : 2); |
| 2957 | 3752 |
| 2958 __ bind(&is_not_instance); | 3753 __ bind(&is_not_instance); |
| 2959 __ mov(r0, Operand(Smi::FromInt(1))); | 3754 if (!HasCallSiteInlineCheck()) { |
| 2960 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); | 3755 __ mov(r0, Operand(Smi::FromInt(1))); |
| 2961 __ Ret(args_in_registers() ? 0 : 2); | 3756 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); |
| 3757 } else { |
| 3758 // Patch the call site to return false. |
| 3759 __ LoadRoot(r0, Heap::kFalseValueRootIndex); |
| 3760 __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); |
| 3761 // Get the boolean result location in scratch and patch it. |
| 3762 __ GetRelocatedValueLocation(inline_site, scratch); |
| 3763 __ str(r0, MemOperand(scratch)); |
| 3764 |
| 3765 if (!ReturnTrueFalseObject()) { |
| 3766 __ mov(r0, Operand(Smi::FromInt(1))); |
| 3767 } |
| 3768 } |
| 3769 __ Ret(HasArgsInRegisters() ? 0 : 2); |
| 2962 | 3770 |
| 2963 Label object_not_null, object_not_null_or_smi; | 3771 Label object_not_null, object_not_null_or_smi; |
| 2964 __ bind(¬_js_object); | 3772 __ bind(¬_js_object); |
| 2965 // Before null, smi and string value checks, check that the rhs is a function | 3773 // Before null, smi and string value checks, check that the rhs is a function |
| 2966 // as for a non-function rhs an exception needs to be thrown. | 3774 // as for a non-function rhs an exception needs to be thrown. |
| 2967 __ BranchOnSmi(function, &slow); | 3775 __ JumpIfSmi(function, &slow); |
| 2968 __ CompareObjectType(function, map, scratch, JS_FUNCTION_TYPE); | 3776 __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE); |
| 2969 __ b(ne, &slow); | 3777 __ b(ne, &slow); |
| 2970 | 3778 |
| 2971 // Null is not instance of anything. | 3779 // Null is not instance of anything. |
| 2972 __ cmp(scratch, Operand(Factory::null_value())); | 3780 __ cmp(scratch, Operand(Factory::null_value())); |
| 2973 __ b(ne, &object_not_null); | 3781 __ b(ne, &object_not_null); |
| 2974 __ mov(r0, Operand(Smi::FromInt(1))); | 3782 __ mov(r0, Operand(Smi::FromInt(1))); |
| 2975 __ Ret(args_in_registers() ? 0 : 2); | 3783 __ Ret(HasArgsInRegisters() ? 0 : 2); |
| 2976 | 3784 |
| 2977 __ bind(&object_not_null); | 3785 __ bind(&object_not_null); |
| 2978 // Smi values are not instances of anything. | 3786 // Smi values are not instances of anything. |
| 2979 __ BranchOnNotSmi(object, &object_not_null_or_smi); | 3787 __ JumpIfNotSmi(object, &object_not_null_or_smi); |
| 2980 __ mov(r0, Operand(Smi::FromInt(1))); | 3788 __ mov(r0, Operand(Smi::FromInt(1))); |
| 2981 __ Ret(args_in_registers() ? 0 : 2); | 3789 __ Ret(HasArgsInRegisters() ? 0 : 2); |
| 2982 | 3790 |
| 2983 __ bind(&object_not_null_or_smi); | 3791 __ bind(&object_not_null_or_smi); |
| 2984 // String values are not instances of anything. | 3792 // String values are not instances of anything. |
| 2985 __ IsObjectJSStringType(object, scratch, &slow); | 3793 __ IsObjectJSStringType(object, scratch, &slow); |
| 2986 __ mov(r0, Operand(Smi::FromInt(1))); | 3794 __ mov(r0, Operand(Smi::FromInt(1))); |
| 2987 __ Ret(args_in_registers() ? 0 : 2); | 3795 __ Ret(HasArgsInRegisters() ? 0 : 2); |
| 2988 | 3796 |
| 2989 // Slow-case. Tail call builtin. | 3797 // Slow-case. Tail call builtin. |
| 2990 if (args_in_registers()) { | 3798 __ bind(&slow); |
| 3799 if (!ReturnTrueFalseObject()) { |
| 3800 if (HasArgsInRegisters()) { |
| 3801 __ Push(r0, r1); |
| 3802 } |
| 3803 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS); |
| 3804 } else { |
| 3805 __ EnterInternalFrame(); |
| 2991 __ Push(r0, r1); | 3806 __ Push(r0, r1); |
| 3807 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_JS); |
| 3808 __ LeaveInternalFrame(); |
| 3809 __ cmp(r0, Operand(0)); |
| 3810 __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq); |
| 3811 __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne); |
| 3812 __ Ret(HasArgsInRegisters() ? 0 : 2); |
| 2992 } | 3813 } |
| 2993 __ bind(&slow); | |
| 2994 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS); | |
| 2995 } | 3814 } |
| 2996 | 3815 |
| 2997 | 3816 |
| 3817 Register InstanceofStub::left() { return r0; } |
| 3818 |
| 3819 |
| 3820 Register InstanceofStub::right() { return r1; } |
| 3821 |
| 3822 |
| 2998 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { | 3823 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { |
| 2999 // The displacement is the offset of the last parameter (if any) | 3824 // The displacement is the offset of the last parameter (if any) |
| 3000 // relative to the frame pointer. | 3825 // relative to the frame pointer. |
| 3001 static const int kDisplacement = | 3826 static const int kDisplacement = |
| 3002 StandardFrameConstants::kCallerSPOffset - kPointerSize; | 3827 StandardFrameConstants::kCallerSPOffset - kPointerSize; |
| 3003 | 3828 |
| 3004 // Check that the key is a smi. | 3829 // Check that the key is a smi. |
| 3005 Label slow; | 3830 Label slow; |
| 3006 __ BranchOnNotSmi(r1, &slow); | 3831 __ JumpIfNotSmi(r1, &slow); |
| 3007 | 3832 |
| 3008 // Check if the calling frame is an arguments adaptor frame. | 3833 // Check if the calling frame is an arguments adaptor frame. |
| 3009 Label adaptor; | 3834 Label adaptor; |
| 3010 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 3835 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
| 3011 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); | 3836 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); |
| 3012 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 3837 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); |
| 3013 __ b(eq, &adaptor); | 3838 __ b(eq, &adaptor); |
| 3014 | 3839 |
| 3015 // Check index against formal parameters count limit passed in | 3840 // Check index against formal parameters count limit passed in |
| 3016 // through register r0. Use unsigned comparison to get negative | 3841 // through register r0. Use unsigned comparison to get negative |
| 3017 // check for free. | 3842 // check for free. |
| 3018 __ cmp(r1, r0); | 3843 __ cmp(r1, r0); |
| 3019 __ b(cs, &slow); | 3844 __ b(hs, &slow); |
| 3020 | 3845 |
| 3021 // Read the argument from the stack and return it. | 3846 // Read the argument from the stack and return it. |
| 3022 __ sub(r3, r0, r1); | 3847 __ sub(r3, r0, r1); |
| 3023 __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); | 3848 __ add(r3, fp, Operand(r3, LSL, kPointerSizeLog2 - kSmiTagSize)); |
| 3024 __ ldr(r0, MemOperand(r3, kDisplacement)); | 3849 __ ldr(r0, MemOperand(r3, kDisplacement)); |
| 3025 __ Jump(lr); | 3850 __ Jump(lr); |
| 3026 | 3851 |
| 3027 // Arguments adaptor case: Check index against actual arguments | 3852 // Arguments adaptor case: Check index against actual arguments |
| 3028 // limit found in the arguments adaptor frame. Use unsigned | 3853 // limit found in the arguments adaptor frame. Use unsigned |
| 3029 // comparison to get negative check for free. | 3854 // comparison to get negative check for free. |
| (...skipping 170 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3200 STATIC_ASSERT(kSmiTag == 0); | 4025 STATIC_ASSERT(kSmiTag == 0); |
| 3201 __ tst(r0, Operand(kSmiTagMask)); | 4026 __ tst(r0, Operand(kSmiTagMask)); |
| 3202 __ b(eq, &runtime); | 4027 __ b(eq, &runtime); |
| 3203 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); | 4028 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); |
| 3204 __ b(ne, &runtime); | 4029 __ b(ne, &runtime); |
| 3205 | 4030 |
| 3206 // Check that the RegExp has been compiled (data contains a fixed array). | 4031 // Check that the RegExp has been compiled (data contains a fixed array). |
| 3207 __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); | 4032 __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); |
| 3208 if (FLAG_debug_code) { | 4033 if (FLAG_debug_code) { |
| 3209 __ tst(regexp_data, Operand(kSmiTagMask)); | 4034 __ tst(regexp_data, Operand(kSmiTagMask)); |
| 3210 __ Check(nz, "Unexpected type for RegExp data, FixedArray expected"); | 4035 __ Check(ne, "Unexpected type for RegExp data, FixedArray expected"); |
| 3211 __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE); | 4036 __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE); |
| 3212 __ Check(eq, "Unexpected type for RegExp data, FixedArray expected"); | 4037 __ Check(eq, "Unexpected type for RegExp data, FixedArray expected"); |
| 3213 } | 4038 } |
| 3214 | 4039 |
| 3215 // regexp_data: RegExp data (FixedArray) | 4040 // regexp_data: RegExp data (FixedArray) |
| 3216 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. | 4041 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. |
| 3217 __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); | 4042 __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); |
| 3218 __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); | 4043 __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); |
| 3219 __ b(ne, &runtime); | 4044 __ b(ne, &runtime); |
| 3220 | 4045 |
| (...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3303 __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset)); | 4128 __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset)); |
| 3304 __ LoadRoot(r1, Heap::kEmptyStringRootIndex); | 4129 __ LoadRoot(r1, Heap::kEmptyStringRootIndex); |
| 3305 __ cmp(r0, r1); | 4130 __ cmp(r0, r1); |
| 3306 __ b(ne, &runtime); | 4131 __ b(ne, &runtime); |
| 3307 __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); | 4132 __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); |
| 3308 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); | 4133 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); |
| 3309 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); | 4134 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); |
| 3310 // Is first part a flat string? | 4135 // Is first part a flat string? |
| 3311 STATIC_ASSERT(kSeqStringTag == 0); | 4136 STATIC_ASSERT(kSeqStringTag == 0); |
| 3312 __ tst(r0, Operand(kStringRepresentationMask)); | 4137 __ tst(r0, Operand(kStringRepresentationMask)); |
| 3313 __ b(nz, &runtime); | 4138 __ b(ne, &runtime); |
| 3314 | 4139 |
| 3315 __ bind(&seq_string); | 4140 __ bind(&seq_string); |
| 3316 // subject: Subject string | 4141 // subject: Subject string |
| 3317 // regexp_data: RegExp data (FixedArray) | 4142 // regexp_data: RegExp data (FixedArray) |
| 3318 // r0: Instance type of subject string | 4143 // r0: Instance type of subject string |
| 3319 STATIC_ASSERT(4 == kAsciiStringTag); | 4144 STATIC_ASSERT(4 == kAsciiStringTag); |
| 3320 STATIC_ASSERT(kTwoByteStringTag == 0); | 4145 STATIC_ASSERT(kTwoByteStringTag == 0); |
| 3321 // Find the code object based on the assumptions above. | 4146 // Find the code object based on the assumptions above. |
| 3322 __ and_(r0, r0, Operand(kStringEncodingMask)); | 4147 __ and_(r0, r0, Operand(kStringEncodingMask)); |
| 3323 __ mov(r3, Operand(r0, ASR, 2), SetCC); | 4148 __ mov(r3, Operand(r0, ASR, 2), SetCC); |
| (...skipping 17 matching lines...) Expand all Loading... |
| 3341 | 4166 |
| 3342 // r1: previous index | 4167 // r1: previous index |
| 3343 // r3: encoding of subject string (1 if ascii, 0 if two_byte); | 4168 // r3: encoding of subject string (1 if ascii, 0 if two_byte); |
| 3344 // r7: code | 4169 // r7: code |
| 3345 // subject: Subject string | 4170 // subject: Subject string |
| 3346 // regexp_data: RegExp data (FixedArray) | 4171 // regexp_data: RegExp data (FixedArray) |
| 3347 // All checks done. Now push arguments for native regexp code. | 4172 // All checks done. Now push arguments for native regexp code. |
| 3348 __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2); | 4173 __ IncrementCounter(&Counters::regexp_entry_native, 1, r0, r2); |
| 3349 | 4174 |
| 3350 static const int kRegExpExecuteArguments = 7; | 4175 static const int kRegExpExecuteArguments = 7; |
| 3351 __ push(lr); | 4176 static const int kParameterRegisters = 4; |
| 3352 __ PrepareCallCFunction(kRegExpExecuteArguments, r0); | 4177 __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); |
| 3353 | 4178 |
| 3354 // Argument 7 (sp[8]): Indicate that this is a direct call from JavaScript. | 4179 // Stack pointer now points to cell where return address is to be written. |
| 4180 // Arguments are before that on the stack or in registers. |
| 4181 |
| 4182 // Argument 7 (sp[12]): Indicate that this is a direct call from JavaScript. |
| 3355 __ mov(r0, Operand(1)); | 4183 __ mov(r0, Operand(1)); |
| 3356 __ str(r0, MemOperand(sp, 2 * kPointerSize)); | 4184 __ str(r0, MemOperand(sp, 3 * kPointerSize)); |
| 3357 | 4185 |
| 3358 // Argument 6 (sp[4]): Start (high end) of backtracking stack memory area. | 4186 // Argument 6 (sp[8]): Start (high end) of backtracking stack memory area. |
| 3359 __ mov(r0, Operand(address_of_regexp_stack_memory_address)); | 4187 __ mov(r0, Operand(address_of_regexp_stack_memory_address)); |
| 3360 __ ldr(r0, MemOperand(r0, 0)); | 4188 __ ldr(r0, MemOperand(r0, 0)); |
| 3361 __ mov(r2, Operand(address_of_regexp_stack_memory_size)); | 4189 __ mov(r2, Operand(address_of_regexp_stack_memory_size)); |
| 3362 __ ldr(r2, MemOperand(r2, 0)); | 4190 __ ldr(r2, MemOperand(r2, 0)); |
| 3363 __ add(r0, r0, Operand(r2)); | 4191 __ add(r0, r0, Operand(r2)); |
| 4192 __ str(r0, MemOperand(sp, 2 * kPointerSize)); |
| 4193 |
| 4194 // Argument 5 (sp[4]): static offsets vector buffer. |
| 4195 __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector())); |
| 3364 __ str(r0, MemOperand(sp, 1 * kPointerSize)); | 4196 __ str(r0, MemOperand(sp, 1 * kPointerSize)); |
| 3365 | 4197 |
| 3366 // Argument 5 (sp[0]): static offsets vector buffer. | |
| 3367 __ mov(r0, Operand(ExternalReference::address_of_static_offsets_vector())); | |
| 3368 __ str(r0, MemOperand(sp, 0 * kPointerSize)); | |
| 3369 | |
| 3370 // For arguments 4 and 3 get string length, calculate start of string data and | 4198 // For arguments 4 and 3 get string length, calculate start of string data and |
| 3371 // calculate the shift of the index (0 for ASCII and 1 for two byte). | 4199 // calculate the shift of the index (0 for ASCII and 1 for two byte). |
| 3372 __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset)); | 4200 __ ldr(r0, FieldMemOperand(subject, String::kLengthOffset)); |
| 3373 __ mov(r0, Operand(r0, ASR, kSmiTagSize)); | 4201 __ mov(r0, Operand(r0, ASR, kSmiTagSize)); |
| 3374 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); | 4202 STATIC_ASSERT(SeqAsciiString::kHeaderSize == SeqTwoByteString::kHeaderSize); |
| 3375 __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); | 4203 __ add(r9, subject, Operand(SeqAsciiString::kHeaderSize - kHeapObjectTag)); |
| 3376 __ eor(r3, r3, Operand(1)); | 4204 __ eor(r3, r3, Operand(1)); |
| 3377 // Argument 4 (r3): End of string data | 4205 // Argument 4 (r3): End of string data |
| 3378 // Argument 3 (r2): Start of string data | 4206 // Argument 3 (r2): Start of string data |
| 3379 __ add(r2, r9, Operand(r1, LSL, r3)); | 4207 __ add(r2, r9, Operand(r1, LSL, r3)); |
| 3380 __ add(r3, r9, Operand(r0, LSL, r3)); | 4208 __ add(r3, r9, Operand(r0, LSL, r3)); |
| 3381 | 4209 |
| 3382 // Argument 2 (r1): Previous index. | 4210 // Argument 2 (r1): Previous index. |
| 3383 // Already there | 4211 // Already there |
| 3384 | 4212 |
| 3385 // Argument 1 (r0): Subject string. | 4213 // Argument 1 (r0): Subject string. |
| 3386 __ mov(r0, subject); | 4214 __ mov(r0, subject); |
| 3387 | 4215 |
| 3388 // Locate the code entry and call it. | 4216 // Locate the code entry and call it. |
| 3389 __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); | 4217 __ add(r7, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 3390 __ CallCFunction(r7, kRegExpExecuteArguments); | 4218 DirectCEntryStub stub; |
| 3391 __ pop(lr); | 4219 stub.GenerateCall(masm, r7); |
| 4220 |
| 4221 __ LeaveExitFrame(false, no_reg); |
| 3392 | 4222 |
| 3393 // r0: result | 4223 // r0: result |
| 3394 // subject: subject string (callee saved) | 4224 // subject: subject string (callee saved) |
| 3395 // regexp_data: RegExp data (callee saved) | 4225 // regexp_data: RegExp data (callee saved) |
| 3396 // last_match_info_elements: Last match info elements (callee saved) | 4226 // last_match_info_elements: Last match info elements (callee saved) |
| 3397 | 4227 |
| 3398 // Check the result. | 4228 // Check the result. |
| 3399 Label success; | 4229 Label success; |
| 4230 |
| 3400 __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS)); | 4231 __ cmp(r0, Operand(NativeRegExpMacroAssembler::SUCCESS)); |
| 3401 __ b(eq, &success); | 4232 __ b(eq, &success); |
| 3402 Label failure; | 4233 Label failure; |
| 3403 __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE)); | 4234 __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE)); |
| 3404 __ b(eq, &failure); | 4235 __ b(eq, &failure); |
| 3405 __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION)); | 4236 __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION)); |
| 3406 // If not exception it can only be retry. Handle that in the runtime system. | 4237 // If not exception it can only be retry. Handle that in the runtime system. |
| 3407 __ b(ne, &runtime); | 4238 __ b(ne, &runtime); |
| 3408 // Result must now be exception. If there is no pending exception already a | 4239 // Result must now be exception. If there is no pending exception already a |
| 3409 // stack overflow (on the backtrack stack) was detected in RegExp code but | 4240 // stack overflow (on the backtrack stack) was detected in RegExp code but |
| 3410 // haven't created the exception yet. Handle that in the runtime system. | 4241 // haven't created the exception yet. Handle that in the runtime system. |
| 3411 // TODO(592): Rerunning the RegExp to get the stack overflow exception. | 4242 // TODO(592): Rerunning the RegExp to get the stack overflow exception. |
| 3412 __ mov(r0, Operand(ExternalReference::the_hole_value_location())); | 4243 __ mov(r1, Operand(ExternalReference::the_hole_value_location())); |
| 3413 __ ldr(r0, MemOperand(r0, 0)); | |
| 3414 __ mov(r1, Operand(ExternalReference(Top::k_pending_exception_address))); | |
| 3415 __ ldr(r1, MemOperand(r1, 0)); | 4244 __ ldr(r1, MemOperand(r1, 0)); |
| 4245 __ mov(r2, Operand(ExternalReference(Top::k_pending_exception_address))); |
| 4246 __ ldr(r0, MemOperand(r2, 0)); |
| 3416 __ cmp(r0, r1); | 4247 __ cmp(r0, r1); |
| 3417 __ b(eq, &runtime); | 4248 __ b(eq, &runtime); |
| 4249 |
| 4250 __ str(r1, MemOperand(r2, 0)); // Clear pending exception. |
| 4251 |
| 4252 // Check if the exception is a termination. If so, throw as uncatchable. |
| 4253 __ LoadRoot(ip, Heap::kTerminationExceptionRootIndex); |
| 4254 __ cmp(r0, ip); |
| 4255 Label termination_exception; |
| 4256 __ b(eq, &termination_exception); |
| 4257 |
| 4258 __ Throw(r0); // Expects thrown value in r0. |
| 4259 |
| 4260 __ bind(&termination_exception); |
| 4261 __ ThrowUncatchable(TERMINATION, r0); // Expects thrown value in r0. |
| 4262 |
| 3418 __ bind(&failure); | 4263 __ bind(&failure); |
| 3419 // For failure and exception return null. | 4264 // For failure and exception return null. |
| 3420 __ mov(r0, Operand(Factory::null_value())); | 4265 __ mov(r0, Operand(Factory::null_value())); |
| 3421 __ add(sp, sp, Operand(4 * kPointerSize)); | 4266 __ add(sp, sp, Operand(4 * kPointerSize)); |
| 3422 __ Ret(); | 4267 __ Ret(); |
| 3423 | 4268 |
| 3424 // Process the result from the native regexp code. | 4269 // Process the result from the native regexp code. |
| 3425 __ bind(&success); | 4270 __ bind(&success); |
| 3426 __ ldr(r1, | 4271 __ ldr(r1, |
| 3427 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); | 4272 FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); |
| (...skipping 154 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3582 | 4427 |
| 3583 // If the receiver might be a value (string, number or boolean) check for this | 4428 // If the receiver might be a value (string, number or boolean) check for this |
| 3584 // and box it if it is. | 4429 // and box it if it is. |
| 3585 if (ReceiverMightBeValue()) { | 4430 if (ReceiverMightBeValue()) { |
| 3586 // Get the receiver from the stack. | 4431 // Get the receiver from the stack. |
| 3587 // function, receiver [, arguments] | 4432 // function, receiver [, arguments] |
| 3588 Label receiver_is_value, receiver_is_js_object; | 4433 Label receiver_is_value, receiver_is_js_object; |
| 3589 __ ldr(r1, MemOperand(sp, argc_ * kPointerSize)); | 4434 __ ldr(r1, MemOperand(sp, argc_ * kPointerSize)); |
| 3590 | 4435 |
| 3591 // Check if receiver is a smi (which is a number value). | 4436 // Check if receiver is a smi (which is a number value). |
| 3592 __ BranchOnSmi(r1, &receiver_is_value); | 4437 __ JumpIfSmi(r1, &receiver_is_value); |
| 3593 | 4438 |
| 3594 // Check if the receiver is a valid JS object. | 4439 // Check if the receiver is a valid JS object. |
| 3595 __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE); | 4440 __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE); |
| 3596 __ b(ge, &receiver_is_js_object); | 4441 __ b(ge, &receiver_is_js_object); |
| 3597 | 4442 |
| 3598 // Call the runtime to box the value. | 4443 // Call the runtime to box the value. |
| 3599 __ bind(&receiver_is_value); | 4444 __ bind(&receiver_is_value); |
| 3600 __ EnterInternalFrame(); | 4445 __ EnterInternalFrame(); |
| 3601 __ push(r1); | 4446 __ push(r1); |
| 3602 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS); | 4447 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS); |
| 3603 __ LeaveInternalFrame(); | 4448 __ LeaveInternalFrame(); |
| 3604 __ str(r0, MemOperand(sp, argc_ * kPointerSize)); | 4449 __ str(r0, MemOperand(sp, argc_ * kPointerSize)); |
| 3605 | 4450 |
| 3606 __ bind(&receiver_is_js_object); | 4451 __ bind(&receiver_is_js_object); |
| 3607 } | 4452 } |
| 3608 | 4453 |
| 3609 // Get the function to call from the stack. | 4454 // Get the function to call from the stack. |
| 3610 // function, receiver [, arguments] | 4455 // function, receiver [, arguments] |
| 3611 __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize)); | 4456 __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize)); |
| 3612 | 4457 |
| 3613 // Check that the function is really a JavaScript function. | 4458 // Check that the function is really a JavaScript function. |
| 3614 // r1: pushed function (to be verified) | 4459 // r1: pushed function (to be verified) |
| 3615 __ BranchOnSmi(r1, &slow); | 4460 __ JumpIfSmi(r1, &slow); |
| 3616 // Get the map of the function object. | 4461 // Get the map of the function object. |
| 3617 __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); | 4462 __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); |
| 3618 __ b(ne, &slow); | 4463 __ b(ne, &slow); |
| 3619 | 4464 |
| 3620 // Fast-case: Invoke the function now. | 4465 // Fast-case: Invoke the function now. |
| 3621 // r1: pushed function | 4466 // r1: pushed function |
| 3622 ParameterCount actual(argc_); | 4467 ParameterCount actual(argc_); |
| 3623 __ InvokeFunction(r1, actual, JUMP_FUNCTION); | 4468 __ InvokeFunction(r1, actual, JUMP_FUNCTION); |
| 3624 | 4469 |
| 3625 // Slow-case: Non-function called. | 4470 // Slow-case: Non-function called. |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3703 return ConditionField::encode(static_cast<unsigned>(cc_) >> 28) | 4548 return ConditionField::encode(static_cast<unsigned>(cc_) >> 28) |
| 3704 | RegisterField::encode(lhs_.is(r0)) | 4549 | RegisterField::encode(lhs_.is(r0)) |
| 3705 | StrictField::encode(strict_) | 4550 | StrictField::encode(strict_) |
| 3706 | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false) | 4551 | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false) |
| 3707 | IncludeNumberCompareField::encode(include_number_compare_) | 4552 | IncludeNumberCompareField::encode(include_number_compare_) |
| 3708 | IncludeSmiCompareField::encode(include_smi_compare_); | 4553 | IncludeSmiCompareField::encode(include_smi_compare_); |
| 3709 } | 4554 } |
| 3710 | 4555 |
| 3711 | 4556 |
| 3712 // StringCharCodeAtGenerator | 4557 // StringCharCodeAtGenerator |
| 3713 | |
| 3714 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { | 4558 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { |
| 3715 Label flat_string; | 4559 Label flat_string; |
| 3716 Label ascii_string; | 4560 Label ascii_string; |
| 3717 Label got_char_code; | 4561 Label got_char_code; |
| 3718 | 4562 |
| 3719 // If the receiver is a smi trigger the non-string case. | 4563 // If the receiver is a smi trigger the non-string case. |
| 3720 __ BranchOnSmi(object_, receiver_not_string_); | 4564 __ JumpIfSmi(object_, receiver_not_string_); |
| 3721 | 4565 |
| 3722 // Fetch the instance type of the receiver into result register. | 4566 // Fetch the instance type of the receiver into result register. |
| 3723 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 4567 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
| 3724 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 4568 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
| 3725 // If the receiver is not a string trigger the non-string case. | 4569 // If the receiver is not a string trigger the non-string case. |
| 3726 __ tst(result_, Operand(kIsNotStringMask)); | 4570 __ tst(result_, Operand(kIsNotStringMask)); |
| 3727 __ b(ne, receiver_not_string_); | 4571 __ b(ne, receiver_not_string_); |
| 3728 | 4572 |
| 3729 // If the index is non-smi trigger the non-smi case. | 4573 // If the index is non-smi trigger the non-smi case. |
| 3730 __ BranchOnNotSmi(index_, &index_not_smi_); | 4574 __ JumpIfNotSmi(index_, &index_not_smi_); |
| 3731 | 4575 |
| 3732 // Put smi-tagged index into scratch register. | 4576 // Put smi-tagged index into scratch register. |
| 3733 __ mov(scratch_, index_); | 4577 __ mov(scratch_, index_); |
| 3734 __ bind(&got_smi_index_); | 4578 __ bind(&got_smi_index_); |
| 3735 | 4579 |
| 3736 // Check for index out of range. | 4580 // Check for index out of range. |
| 3737 __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset)); | 4581 __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset)); |
| 3738 __ cmp(ip, Operand(scratch_)); | 4582 __ cmp(ip, Operand(scratch_)); |
| 3739 __ b(ls, index_out_of_range_); | 4583 __ b(ls, index_out_of_range_); |
| 3740 | 4584 |
| (...skipping 15 matching lines...) Expand all Loading... |
| 3756 __ LoadRoot(ip, Heap::kEmptyStringRootIndex); | 4600 __ LoadRoot(ip, Heap::kEmptyStringRootIndex); |
| 3757 __ cmp(result_, Operand(ip)); | 4601 __ cmp(result_, Operand(ip)); |
| 3758 __ b(ne, &call_runtime_); | 4602 __ b(ne, &call_runtime_); |
| 3759 // Get the first of the two strings and load its instance type. | 4603 // Get the first of the two strings and load its instance type. |
| 3760 __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset)); | 4604 __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset)); |
| 3761 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 4605 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
| 3762 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 4606 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
| 3763 // If the first cons component is also non-flat, then go to runtime. | 4607 // If the first cons component is also non-flat, then go to runtime. |
| 3764 STATIC_ASSERT(kSeqStringTag == 0); | 4608 STATIC_ASSERT(kSeqStringTag == 0); |
| 3765 __ tst(result_, Operand(kStringRepresentationMask)); | 4609 __ tst(result_, Operand(kStringRepresentationMask)); |
| 3766 __ b(nz, &call_runtime_); | 4610 __ b(ne, &call_runtime_); |
| 3767 | 4611 |
| 3768 // Check for 1-byte or 2-byte string. | 4612 // Check for 1-byte or 2-byte string. |
| 3769 __ bind(&flat_string); | 4613 __ bind(&flat_string); |
| 3770 STATIC_ASSERT(kAsciiStringTag != 0); | 4614 STATIC_ASSERT(kAsciiStringTag != 0); |
| 3771 __ tst(result_, Operand(kStringEncodingMask)); | 4615 __ tst(result_, Operand(kStringEncodingMask)); |
| 3772 __ b(nz, &ascii_string); | 4616 __ b(ne, &ascii_string); |
| 3773 | 4617 |
| 3774 // 2-byte string. | 4618 // 2-byte string. |
| 3775 // Load the 2-byte character code into the result register. We can | 4619 // Load the 2-byte character code into the result register. We can |
| 3776 // add without shifting since the smi tag size is the log2 of the | 4620 // add without shifting since the smi tag size is the log2 of the |
| 3777 // number of bytes in a two-byte character. | 4621 // number of bytes in a two-byte character. |
| 3778 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0); | 4622 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0); |
| 3779 __ add(scratch_, object_, Operand(scratch_)); | 4623 __ add(scratch_, object_, Operand(scratch_)); |
| 3780 __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize)); | 4624 __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize)); |
| 3781 __ jmp(&got_char_code); | 4625 __ jmp(&got_char_code); |
| 3782 | 4626 |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3817 // Save the conversion result before the pop instructions below | 4661 // Save the conversion result before the pop instructions below |
| 3818 // have a chance to overwrite it. | 4662 // have a chance to overwrite it. |
| 3819 __ Move(scratch_, r0); | 4663 __ Move(scratch_, r0); |
| 3820 __ pop(index_); | 4664 __ pop(index_); |
| 3821 __ pop(object_); | 4665 __ pop(object_); |
| 3822 // Reload the instance type. | 4666 // Reload the instance type. |
| 3823 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 4667 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
| 3824 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 4668 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
| 3825 call_helper.AfterCall(masm); | 4669 call_helper.AfterCall(masm); |
| 3826 // If index is still not a smi, it must be out of range. | 4670 // If index is still not a smi, it must be out of range. |
| 3827 __ BranchOnNotSmi(scratch_, index_out_of_range_); | 4671 __ JumpIfNotSmi(scratch_, index_out_of_range_); |
| 3828 // Otherwise, return to the fast path. | 4672 // Otherwise, return to the fast path. |
| 3829 __ jmp(&got_smi_index_); | 4673 __ jmp(&got_smi_index_); |
| 3830 | 4674 |
| 3831 // Call runtime. We get here when the receiver is a string and the | 4675 // Call runtime. We get here when the receiver is a string and the |
| 3832 // index is a number, but the code of getting the actual character | 4676 // index is a number, but the code of getting the actual character |
| 3833 // is too complex (e.g., when the string needs to be flattened). | 4677 // is too complex (e.g., when the string needs to be flattened). |
| 3834 __ bind(&call_runtime_); | 4678 __ bind(&call_runtime_); |
| 3835 call_helper.BeforeCall(masm); | 4679 call_helper.BeforeCall(masm); |
| 3836 __ Push(object_, index_); | 4680 __ Push(object_, index_); |
| 3837 __ CallRuntime(Runtime::kStringCharCodeAt, 2); | 4681 __ CallRuntime(Runtime::kStringCharCodeAt, 2); |
| 3838 __ Move(result_, r0); | 4682 __ Move(result_, r0); |
| 3839 call_helper.AfterCall(masm); | 4683 call_helper.AfterCall(masm); |
| 3840 __ jmp(&exit_); | 4684 __ jmp(&exit_); |
| 3841 | 4685 |
| 3842 __ Abort("Unexpected fallthrough from CharCodeAt slow case"); | 4686 __ Abort("Unexpected fallthrough from CharCodeAt slow case"); |
| 3843 } | 4687 } |
| 3844 | 4688 |
| 3845 | 4689 |
| 3846 // ------------------------------------------------------------------------- | 4690 // ------------------------------------------------------------------------- |
| 3847 // StringCharFromCodeGenerator | 4691 // StringCharFromCodeGenerator |
| 3848 | 4692 |
| 3849 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { | 4693 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { |
| 3850 // Fast case of Heap::LookupSingleCharacterStringFromCode. | 4694 // Fast case of Heap::LookupSingleCharacterStringFromCode. |
| 3851 STATIC_ASSERT(kSmiTag == 0); | 4695 STATIC_ASSERT(kSmiTag == 0); |
| 3852 STATIC_ASSERT(kSmiShiftSize == 0); | 4696 STATIC_ASSERT(kSmiShiftSize == 0); |
| 3853 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); | 4697 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); |
| 3854 __ tst(code_, | 4698 __ tst(code_, |
| 3855 Operand(kSmiTagMask | | 4699 Operand(kSmiTagMask | |
| 3856 ((~String::kMaxAsciiCharCode) << kSmiTagSize))); | 4700 ((~String::kMaxAsciiCharCode) << kSmiTagSize))); |
| 3857 __ b(nz, &slow_case_); | 4701 __ b(ne, &slow_case_); |
| 3858 | 4702 |
| 3859 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); | 4703 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); |
| 3860 // At this point code register contains smi tagged ascii char code. | 4704 // At this point code register contains smi tagged ascii char code. |
| 3861 STATIC_ASSERT(kSmiTag == 0); | 4705 STATIC_ASSERT(kSmiTag == 0); |
| 3862 __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize)); | 4706 __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize)); |
| 3863 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); | 4707 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); |
| 3864 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 4708 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
| 3865 __ cmp(result_, Operand(ip)); | 4709 __ cmp(result_, Operand(ip)); |
| 3866 __ b(eq, &slow_case_); | 4710 __ b(eq, &slow_case_); |
| 3867 __ bind(&exit_); | 4711 __ bind(&exit_); |
| (...skipping 426 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4294 void StringHelper::GenerateHashGetHash(MacroAssembler* masm, | 5138 void StringHelper::GenerateHashGetHash(MacroAssembler* masm, |
| 4295 Register hash) { | 5139 Register hash) { |
| 4296 // hash += hash << 3; | 5140 // hash += hash << 3; |
| 4297 __ add(hash, hash, Operand(hash, LSL, 3)); | 5141 __ add(hash, hash, Operand(hash, LSL, 3)); |
| 4298 // hash ^= hash >> 11; | 5142 // hash ^= hash >> 11; |
| 4299 __ eor(hash, hash, Operand(hash, ASR, 11)); | 5143 __ eor(hash, hash, Operand(hash, ASR, 11)); |
| 4300 // hash += hash << 15; | 5144 // hash += hash << 15; |
| 4301 __ add(hash, hash, Operand(hash, LSL, 15), SetCC); | 5145 __ add(hash, hash, Operand(hash, LSL, 15), SetCC); |
| 4302 | 5146 |
| 4303 // if (hash == 0) hash = 27; | 5147 // if (hash == 0) hash = 27; |
| 4304 __ mov(hash, Operand(27), LeaveCC, nz); | 5148 __ mov(hash, Operand(27), LeaveCC, ne); |
| 4305 } | 5149 } |
| 4306 | 5150 |
| 4307 | 5151 |
| 4308 void SubStringStub::Generate(MacroAssembler* masm) { | 5152 void SubStringStub::Generate(MacroAssembler* masm) { |
| 4309 Label runtime; | 5153 Label runtime; |
| 4310 | 5154 |
| 4311 // Stack frame on entry. | 5155 // Stack frame on entry. |
| 4312 // lr: return address | 5156 // lr: return address |
| 4313 // sp[0]: to | 5157 // sp[0]: to |
| 4314 // sp[4]: from | 5158 // sp[4]: from |
| (...skipping 547 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4862 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); | 5706 __ IncrementCounter(&Counters::string_add_native, 1, r2, r3); |
| 4863 __ add(sp, sp, Operand(2 * kPointerSize)); | 5707 __ add(sp, sp, Operand(2 * kPointerSize)); |
| 4864 __ Ret(); | 5708 __ Ret(); |
| 4865 | 5709 |
| 4866 // Just jump to runtime to add the two strings. | 5710 // Just jump to runtime to add the two strings. |
| 4867 __ bind(&string_add_runtime); | 5711 __ bind(&string_add_runtime); |
| 4868 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); | 5712 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); |
| 4869 } | 5713 } |
| 4870 | 5714 |
| 4871 | 5715 |
| 5716 void StringCharAtStub::Generate(MacroAssembler* masm) { |
| 5717 // Expects two arguments (object, index) on the stack: |
| 5718 // lr: return address |
| 5719 // sp[0]: index |
| 5720 // sp[4]: object |
| 5721 Register object = r1; |
| 5722 Register index = r0; |
| 5723 Register scratch1 = r2; |
| 5724 Register scratch2 = r3; |
| 5725 Register result = r0; |
| 5726 |
| 5727 // Get object and index from the stack. |
| 5728 __ pop(index); |
| 5729 __ pop(object); |
| 5730 |
| 5731 Label need_conversion; |
| 5732 Label index_out_of_range; |
| 5733 Label done; |
| 5734 StringCharAtGenerator generator(object, |
| 5735 index, |
| 5736 scratch1, |
| 5737 scratch2, |
| 5738 result, |
| 5739 &need_conversion, |
| 5740 &need_conversion, |
| 5741 &index_out_of_range, |
| 5742 STRING_INDEX_IS_NUMBER); |
| 5743 generator.GenerateFast(masm); |
| 5744 __ b(&done); |
| 5745 |
| 5746 __ bind(&index_out_of_range); |
| 5747 // When the index is out of range, the spec requires us to return |
| 5748 // the empty string. |
| 5749 __ LoadRoot(result, Heap::kEmptyStringRootIndex); |
| 5750 __ jmp(&done); |
| 5751 |
| 5752 __ bind(&need_conversion); |
| 5753 // Move smi zero into the result register, which will trigger |
| 5754 // conversion. |
| 5755 __ mov(result, Operand(Smi::FromInt(0))); |
| 5756 __ b(&done); |
| 5757 |
| 5758 StubRuntimeCallHelper call_helper; |
| 5759 generator.GenerateSlow(masm, call_helper); |
| 5760 |
| 5761 __ bind(&done); |
| 5762 __ Ret(); |
| 5763 } |
| 5764 |
| 5765 |
| 4872 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { | 5766 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { |
| 4873 ASSERT(state_ == CompareIC::SMIS); | 5767 ASSERT(state_ == CompareIC::SMIS); |
| 4874 Label miss; | 5768 Label miss; |
| 4875 __ orr(r2, r1, r0); | 5769 __ orr(r2, r1, r0); |
| 4876 __ tst(r2, Operand(kSmiTagMask)); | 5770 __ tst(r2, Operand(kSmiTagMask)); |
| 4877 __ b(ne, &miss); | 5771 __ b(ne, &miss); |
| 4878 | 5772 |
| 4879 if (GetCondition() == eq) { | 5773 if (GetCondition() == eq) { |
| 4880 // For equality we do not care about the sign of the result. | 5774 // For equality we do not care about the sign of the result. |
| 4881 __ sub(r0, r0, r1, SetCC); | 5775 __ sub(r0, r0, r1, SetCC); |
| 4882 } else { | 5776 } else { |
| 4883 __ sub(r1, r1, r0, SetCC); | 5777 // Untag before subtracting to avoid handling overflow. |
| 4884 // Correct sign of result in case of overflow. | 5778 __ SmiUntag(r1); |
| 4885 __ rsb(r1, r1, Operand(0), SetCC, vs); | 5779 __ sub(r0, r1, SmiUntagOperand(r0)); |
| 4886 __ mov(r0, r1); | |
| 4887 } | 5780 } |
| 4888 __ Ret(); | 5781 __ Ret(); |
| 4889 | 5782 |
| 4890 __ bind(&miss); | 5783 __ bind(&miss); |
| 4891 GenerateMiss(masm); | 5784 GenerateMiss(masm); |
| 4892 } | 5785 } |
| 4893 | 5786 |
| 4894 | 5787 |
| 4895 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { | 5788 void ICCompareStub::GenerateHeapNumbers(MacroAssembler* masm) { |
| 4896 ASSERT(state_ == CompareIC::HEAP_NUMBERS); | 5789 ASSERT(state_ == CompareIC::HEAP_NUMBERS); |
| (...skipping 15 matching lines...) Expand all Loading... |
| 4912 if (CpuFeatures::IsSupported(VFP3)) { | 5805 if (CpuFeatures::IsSupported(VFP3)) { |
| 4913 CpuFeatures::Scope scope(VFP3); | 5806 CpuFeatures::Scope scope(VFP3); |
| 4914 | 5807 |
| 4915 // Load left and right operand | 5808 // Load left and right operand |
| 4916 __ sub(r2, r1, Operand(kHeapObjectTag)); | 5809 __ sub(r2, r1, Operand(kHeapObjectTag)); |
| 4917 __ vldr(d0, r2, HeapNumber::kValueOffset); | 5810 __ vldr(d0, r2, HeapNumber::kValueOffset); |
| 4918 __ sub(r2, r0, Operand(kHeapObjectTag)); | 5811 __ sub(r2, r0, Operand(kHeapObjectTag)); |
| 4919 __ vldr(d1, r2, HeapNumber::kValueOffset); | 5812 __ vldr(d1, r2, HeapNumber::kValueOffset); |
| 4920 | 5813 |
| 4921 // Compare operands | 5814 // Compare operands |
| 4922 __ vcmp(d0, d1); | 5815 __ VFPCompareAndSetFlags(d0, d1); |
| 4923 __ vmrs(pc); // Move vector status bits to normal status bits. | |
| 4924 | 5816 |
| 4925 // Don't base result on status bits when a NaN is involved. | 5817 // Don't base result on status bits when a NaN is involved. |
| 4926 __ b(vs, &unordered); | 5818 __ b(vs, &unordered); |
| 4927 | 5819 |
| 4928 // Return a result of -1, 0, or 1, based on status bits. | 5820 // Return a result of -1, 0, or 1, based on status bits. |
| 4929 __ mov(r0, Operand(EQUAL), LeaveCC, eq); | 5821 __ mov(r0, Operand(EQUAL), LeaveCC, eq); |
| 4930 __ mov(r0, Operand(LESS), LeaveCC, lt); | 5822 __ mov(r0, Operand(LESS), LeaveCC, lt); |
| 4931 __ mov(r0, Operand(GREATER), LeaveCC, gt); | 5823 __ mov(r0, Operand(GREATER), LeaveCC, gt); |
| 4932 __ Ret(); | 5824 __ Ret(); |
| 4933 | 5825 |
| (...skipping 45 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4979 // Compute the entry point of the rewritten stub. | 5871 // Compute the entry point of the rewritten stub. |
| 4980 __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); | 5872 __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 4981 // Restore registers. | 5873 // Restore registers. |
| 4982 __ pop(lr); | 5874 __ pop(lr); |
| 4983 __ pop(r0); | 5875 __ pop(r0); |
| 4984 __ pop(r1); | 5876 __ pop(r1); |
| 4985 __ Jump(r2); | 5877 __ Jump(r2); |
| 4986 } | 5878 } |
| 4987 | 5879 |
| 4988 | 5880 |
| 5881 void DirectCEntryStub::Generate(MacroAssembler* masm) { |
| 5882 __ ldr(pc, MemOperand(sp, 0)); |
| 5883 } |
| 5884 |
| 5885 |
| 5886 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, |
| 5887 ApiFunction *function) { |
| 5888 __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()), |
| 5889 RelocInfo::CODE_TARGET)); |
| 5890 __ mov(r2, |
| 5891 Operand(ExternalReference(function, ExternalReference::DIRECT_CALL))); |
| 5892 // Push return address (accessible to GC through exit frame pc). |
| 5893 __ str(pc, MemOperand(sp, 0)); |
| 5894 __ Jump(r2); // Call the api function. |
| 5895 } |
| 5896 |
| 5897 |
| 5898 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, |
| 5899 Register target) { |
| 5900 __ mov(lr, Operand(reinterpret_cast<intptr_t>(GetCode().location()), |
| 5901 RelocInfo::CODE_TARGET)); |
| 5902 // Push return address (accessible to GC through exit frame pc). |
| 5903 __ str(pc, MemOperand(sp, 0)); |
| 5904 __ Jump(target); // Call the C++ function. |
| 5905 } |
| 5906 |
| 5907 |
| 5908 void GenerateFastPixelArrayLoad(MacroAssembler* masm, |
| 5909 Register receiver, |
| 5910 Register key, |
| 5911 Register elements_map, |
| 5912 Register elements, |
| 5913 Register scratch1, |
| 5914 Register scratch2, |
| 5915 Register result, |
| 5916 Label* not_pixel_array, |
| 5917 Label* key_not_smi, |
| 5918 Label* out_of_range) { |
| 5919 // Register use: |
| 5920 // |
| 5921 // receiver - holds the receiver on entry. |
| 5922 // Unchanged unless 'result' is the same register. |
| 5923 // |
| 5924 // key - holds the smi key on entry. |
| 5925 // Unchanged unless 'result' is the same register. |
| 5926 // |
| 5927 // elements - set to be the receiver's elements on exit. |
| 5928 // |
| 5929 // elements_map - set to be the map of the receiver's elements |
| 5930 // on exit. |
| 5931 // |
| 5932 // result - holds the result of the pixel array load on exit, |
| 5933 // tagged as a smi if successful. |
| 5934 // |
| 5935 // Scratch registers: |
| 5936 // |
| 5937 // scratch1 - used a scratch register in map check, if map |
| 5938 // check is successful, contains the length of the |
| 5939 // pixel array, the pointer to external elements and |
| 5940 // the untagged result. |
| 5941 // |
| 5942 // scratch2 - holds the untaged key. |
| 5943 |
| 5944 // Some callers already have verified that the key is a smi. key_not_smi is |
| 5945 // set to NULL as a sentinel for that case. Otherwise, add an explicit check |
| 5946 // to ensure the key is a smi must be added. |
| 5947 if (key_not_smi != NULL) { |
| 5948 __ JumpIfNotSmi(key, key_not_smi); |
| 5949 } else { |
| 5950 if (FLAG_debug_code) { |
| 5951 __ AbortIfNotSmi(key); |
| 5952 } |
| 5953 } |
| 5954 __ SmiUntag(scratch2, key); |
| 5955 |
| 5956 // Verify that the receiver has pixel array elements. |
| 5957 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 5958 __ CheckMap(elements, scratch1, Heap::kPixelArrayMapRootIndex, |
| 5959 not_pixel_array, true); |
| 5960 |
| 5961 // Key must be in range of the pixel array. |
| 5962 __ ldr(scratch1, FieldMemOperand(elements, PixelArray::kLengthOffset)); |
| 5963 __ cmp(scratch2, scratch1); |
| 5964 __ b(hs, out_of_range); // unsigned check handles negative keys. |
| 5965 |
| 5966 // Perform the indexed load and tag the result as a smi. |
| 5967 __ ldr(scratch1, |
| 5968 FieldMemOperand(elements, PixelArray::kExternalPointerOffset)); |
| 5969 __ ldrb(scratch1, MemOperand(scratch1, scratch2)); |
| 5970 __ SmiTag(r0, scratch1); |
| 5971 __ Ret(); |
| 5972 } |
| 5973 |
| 5974 |
| 5975 void GenerateFastPixelArrayStore(MacroAssembler* masm, |
| 5976 Register receiver, |
| 5977 Register key, |
| 5978 Register value, |
| 5979 Register elements, |
| 5980 Register elements_map, |
| 5981 Register scratch1, |
| 5982 Register scratch2, |
| 5983 bool load_elements_from_receiver, |
| 5984 bool load_elements_map_from_elements, |
| 5985 Label* key_not_smi, |
| 5986 Label* value_not_smi, |
| 5987 Label* not_pixel_array, |
| 5988 Label* out_of_range) { |
| 5989 // Register use: |
| 5990 // receiver - holds the receiver and is unchanged unless the |
| 5991 // store succeeds. |
| 5992 // key - holds the key (must be a smi) and is unchanged. |
| 5993 // value - holds the value (must be a smi) and is unchanged. |
| 5994 // elements - holds the element object of the receiver on entry if |
| 5995 // load_elements_from_receiver is false, otherwise used |
| 5996 // internally to store the pixel arrays elements and |
| 5997 // external array pointer. |
| 5998 // elements_map - holds the map of the element object if |
| 5999 // load_elements_map_from_elements is false, otherwise |
| 6000 // loaded with the element map. |
| 6001 // |
| 6002 Register external_pointer = elements; |
| 6003 Register untagged_key = scratch1; |
| 6004 Register untagged_value = scratch2; |
| 6005 |
| 6006 if (load_elements_from_receiver) { |
| 6007 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 6008 } |
| 6009 |
| 6010 // By passing NULL as not_pixel_array, callers signal that they have already |
| 6011 // verified that the receiver has pixel array elements. |
| 6012 if (not_pixel_array != NULL) { |
| 6013 if (load_elements_map_from_elements) { |
| 6014 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); |
| 6015 } |
| 6016 __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); |
| 6017 __ cmp(elements_map, ip); |
| 6018 __ b(ne, not_pixel_array); |
| 6019 } else { |
| 6020 if (FLAG_debug_code) { |
| 6021 // Map check should have already made sure that elements is a pixel array. |
| 6022 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); |
| 6023 __ LoadRoot(ip, Heap::kPixelArrayMapRootIndex); |
| 6024 __ cmp(elements_map, ip); |
| 6025 __ Assert(eq, "Elements isn't a pixel array"); |
| 6026 } |
| 6027 } |
| 6028 |
| 6029 // Some callers already have verified that the key is a smi. key_not_smi is |
| 6030 // set to NULL as a sentinel for that case. Otherwise, add an explicit check |
| 6031 // to ensure the key is a smi must be added. |
| 6032 if (key_not_smi != NULL) { |
| 6033 __ JumpIfNotSmi(key, key_not_smi); |
| 6034 } else { |
| 6035 if (FLAG_debug_code) { |
| 6036 __ AbortIfNotSmi(key); |
| 6037 } |
| 6038 } |
| 6039 |
| 6040 __ SmiUntag(untagged_key, key); |
| 6041 |
| 6042 // Perform bounds check. |
| 6043 __ ldr(scratch2, FieldMemOperand(elements, PixelArray::kLengthOffset)); |
| 6044 __ cmp(untagged_key, scratch2); |
| 6045 __ b(hs, out_of_range); // unsigned check handles negative keys. |
| 6046 |
| 6047 __ JumpIfNotSmi(value, value_not_smi); |
| 6048 __ SmiUntag(untagged_value, value); |
| 6049 |
| 6050 // Clamp the value to [0..255]. |
| 6051 __ Usat(untagged_value, 8, Operand(untagged_value)); |
| 6052 // Get the pointer to the external array. This clobbers elements. |
| 6053 __ ldr(external_pointer, |
| 6054 FieldMemOperand(elements, PixelArray::kExternalPointerOffset)); |
| 6055 __ strb(untagged_value, MemOperand(external_pointer, untagged_key)); |
| 6056 __ Ret(); |
| 6057 } |
| 6058 |
| 6059 |
| 4989 #undef __ | 6060 #undef __ |
| 4990 | 6061 |
| 4991 } } // namespace v8::internal | 6062 } } // namespace v8::internal |
| 4992 | 6063 |
| 4993 #endif // V8_TARGET_ARCH_ARM | 6064 #endif // V8_TARGET_ARCH_ARM |
| OLD | NEW |