OLD | NEW |
1 // Copyright 2010 the V8 project authors. All rights reserved. | 1 // Copyright 2010 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 23 matching lines...) Expand all Loading... |
34 #include "regexp-macro-assembler.h" | 34 #include "regexp-macro-assembler.h" |
35 | 35 |
36 namespace v8 { | 36 namespace v8 { |
37 namespace internal { | 37 namespace internal { |
38 | 38 |
39 | 39 |
40 #define __ ACCESS_MASM(masm) | 40 #define __ ACCESS_MASM(masm) |
41 | 41 |
42 static void EmitIdenticalObjectComparison(MacroAssembler* masm, | 42 static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
43 Label* slow, | 43 Label* slow, |
44 Condition cc, | 44 Condition cond, |
45 bool never_nan_nan); | 45 bool never_nan_nan); |
46 static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 46 static void EmitSmiNonsmiComparison(MacroAssembler* masm, |
47 Register lhs, | 47 Register lhs, |
48 Register rhs, | 48 Register rhs, |
49 Label* lhs_not_nan, | 49 Label* lhs_not_nan, |
50 Label* slow, | 50 Label* slow, |
51 bool strict); | 51 bool strict); |
52 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc); | 52 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond); |
53 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 53 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, |
54 Register lhs, | 54 Register lhs, |
55 Register rhs); | 55 Register rhs); |
56 | 56 |
57 | 57 |
58 void FastNewClosureStub::Generate(MacroAssembler* masm) { | 58 void FastNewClosureStub::Generate(MacroAssembler* masm) { |
59 // Create a new closure from the given function info in new | 59 // Create a new closure from the given function info in new |
60 // space. Set the context to the current context in cp. | 60 // space. Set the context to the current context in cp. |
61 Label gc; | 61 Label gc; |
62 | 62 |
(...skipping 325 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
388 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); | 388 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); |
389 __ Ret(); | 389 __ Ret(); |
390 } | 390 } |
391 | 391 |
392 | 392 |
393 // Handle the case where the lhs and rhs are the same object. | 393 // Handle the case where the lhs and rhs are the same object. |
394 // Equality is almost reflexive (everything but NaN), so this is a test | 394 // Equality is almost reflexive (everything but NaN), so this is a test |
395 // for "identity and not NaN". | 395 // for "identity and not NaN". |
396 static void EmitIdenticalObjectComparison(MacroAssembler* masm, | 396 static void EmitIdenticalObjectComparison(MacroAssembler* masm, |
397 Label* slow, | 397 Label* slow, |
398 Condition cc, | 398 Condition cond, |
399 bool never_nan_nan) { | 399 bool never_nan_nan) { |
400 Label not_identical; | 400 Label not_identical; |
401 Label heap_number, return_equal; | 401 Label heap_number, return_equal; |
402 __ cmp(r0, r1); | 402 __ cmp(r0, r1); |
403 __ b(ne, ¬_identical); | 403 __ b(ne, ¬_identical); |
404 | 404 |
405 // The two objects are identical. If we know that one of them isn't NaN then | 405 // The two objects are identical. If we know that one of them isn't NaN then |
406 // we now know they test equal. | 406 // we now know they test equal. |
407 if (cc != eq || !never_nan_nan) { | 407 if (cond != eq || !never_nan_nan) { |
408 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), | 408 // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), |
409 // so we do the second best thing - test it ourselves. | 409 // so we do the second best thing - test it ourselves. |
410 // They are both equal and they are not both Smis so both of them are not | 410 // They are both equal and they are not both Smis so both of them are not |
411 // Smis. If it's not a heap number, then return equal. | 411 // Smis. If it's not a heap number, then return equal. |
412 if (cc == lt || cc == gt) { | 412 if (cond == lt || cond == gt) { |
413 __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE); | 413 __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE); |
414 __ b(ge, slow); | 414 __ b(ge, slow); |
415 } else { | 415 } else { |
416 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); | 416 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); |
417 __ b(eq, &heap_number); | 417 __ b(eq, &heap_number); |
418 // Comparing JS objects with <=, >= is complicated. | 418 // Comparing JS objects with <=, >= is complicated. |
419 if (cc != eq) { | 419 if (cond != eq) { |
420 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); | 420 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); |
421 __ b(ge, slow); | 421 __ b(ge, slow); |
422 // Normally here we fall through to return_equal, but undefined is | 422 // Normally here we fall through to return_equal, but undefined is |
423 // special: (undefined == undefined) == true, but | 423 // special: (undefined == undefined) == true, but |
424 // (undefined <= undefined) == false! See ECMAScript 11.8.5. | 424 // (undefined <= undefined) == false! See ECMAScript 11.8.5. |
425 if (cc == le || cc == ge) { | 425 if (cond == le || cond == ge) { |
426 __ cmp(r4, Operand(ODDBALL_TYPE)); | 426 __ cmp(r4, Operand(ODDBALL_TYPE)); |
427 __ b(ne, &return_equal); | 427 __ b(ne, &return_equal); |
428 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); | 428 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); |
429 __ cmp(r0, r2); | 429 __ cmp(r0, r2); |
430 __ b(ne, &return_equal); | 430 __ b(ne, &return_equal); |
431 if (cc == le) { | 431 if (cond == le) { |
432 // undefined <= undefined should fail. | 432 // undefined <= undefined should fail. |
433 __ mov(r0, Operand(GREATER)); | 433 __ mov(r0, Operand(GREATER)); |
434 } else { | 434 } else { |
435 // undefined >= undefined should fail. | 435 // undefined >= undefined should fail. |
436 __ mov(r0, Operand(LESS)); | 436 __ mov(r0, Operand(LESS)); |
437 } | 437 } |
438 __ Ret(); | 438 __ Ret(); |
439 } | 439 } |
440 } | 440 } |
441 } | 441 } |
442 } | 442 } |
443 | 443 |
444 __ bind(&return_equal); | 444 __ bind(&return_equal); |
445 if (cc == lt) { | 445 if (cond == lt) { |
446 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves. | 446 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves. |
447 } else if (cc == gt) { | 447 } else if (cond == gt) { |
448 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves. | 448 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves. |
449 } else { | 449 } else { |
450 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves. | 450 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves. |
451 } | 451 } |
452 __ Ret(); | 452 __ Ret(); |
453 | 453 |
454 if (cc != eq || !never_nan_nan) { | 454 if (cond != eq || !never_nan_nan) { |
455 // For less and greater we don't have to check for NaN since the result of | 455 // For less and greater we don't have to check for NaN since the result of |
456 // x < x is false regardless. For the others here is some code to check | 456 // x < x is false regardless. For the others here is some code to check |
457 // for NaN. | 457 // for NaN. |
458 if (cc != lt && cc != gt) { | 458 if (cond != lt && cond != gt) { |
459 __ bind(&heap_number); | 459 __ bind(&heap_number); |
460 // It is a heap number, so return non-equal if it's NaN and equal if it's | 460 // It is a heap number, so return non-equal if it's NaN and equal if it's |
461 // not NaN. | 461 // not NaN. |
462 | 462 |
463 // The representation of NaN values has all exponent bits (52..62) set, | 463 // The representation of NaN values has all exponent bits (52..62) set, |
464 // and not all mantissa bits (0..51) clear. | 464 // and not all mantissa bits (0..51) clear. |
465 // Read top bits of double representation (second word of value). | 465 // Read top bits of double representation (second word of value). |
466 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 466 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
467 // Test that exponent bits are all set. | 467 // Test that exponent bits are all set. |
468 __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); | 468 __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); |
469 // NaNs have all-one exponents so they sign extend to -1. | 469 // NaNs have all-one exponents so they sign extend to -1. |
470 __ cmp(r3, Operand(-1)); | 470 __ cmp(r3, Operand(-1)); |
471 __ b(ne, &return_equal); | 471 __ b(ne, &return_equal); |
472 | 472 |
473 // Shift out flag and all exponent bits, retaining only mantissa. | 473 // Shift out flag and all exponent bits, retaining only mantissa. |
474 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); | 474 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); |
475 // Or with all low-bits of mantissa. | 475 // Or with all low-bits of mantissa. |
476 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | 476 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); |
477 __ orr(r0, r3, Operand(r2), SetCC); | 477 __ orr(r0, r3, Operand(r2), SetCC); |
478 // For equal we already have the right value in r0: Return zero (equal) | 478 // For equal we already have the right value in r0: Return zero (equal) |
479 // if all bits in mantissa are zero (it's an Infinity) and non-zero if | 479 // if all bits in mantissa are zero (it's an Infinity) and non-zero if |
480 // not (it's a NaN). For <= and >= we need to load r0 with the failing | 480 // not (it's a NaN). For <= and >= we need to load r0 with the failing |
481 // value if it's a NaN. | 481 // value if it's a NaN. |
482 if (cc != eq) { | 482 if (cond != eq) { |
483 // All-zero means Infinity means equal. | 483 // All-zero means Infinity means equal. |
484 __ Ret(eq); | 484 __ Ret(eq); |
485 if (cc == le) { | 485 if (cond == le) { |
486 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. | 486 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. |
487 } else { | 487 } else { |
488 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. | 488 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. |
489 } | 489 } |
490 } | 490 } |
491 __ Ret(); | 491 __ Ret(); |
492 } | 492 } |
493 // No fall through here. | 493 // No fall through here. |
494 } | 494 } |
495 | 495 |
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
582 // Convert rhs to a double in r0, r1. | 582 // Convert rhs to a double in r0, r1. |
583 __ mov(r7, Operand(rhs)); | 583 __ mov(r7, Operand(rhs)); |
584 ConvertToDoubleStub stub2(r1, r0, r7, r6); | 584 ConvertToDoubleStub stub2(r1, r0, r7, r6); |
585 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); | 585 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); |
586 __ pop(lr); | 586 __ pop(lr); |
587 } | 587 } |
588 // Fall through to both_loaded_as_doubles. | 588 // Fall through to both_loaded_as_doubles. |
589 } | 589 } |
590 | 590 |
591 | 591 |
592 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) { | 592 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) { |
593 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | 593 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); |
594 Register rhs_exponent = exp_first ? r0 : r1; | 594 Register rhs_exponent = exp_first ? r0 : r1; |
595 Register lhs_exponent = exp_first ? r2 : r3; | 595 Register lhs_exponent = exp_first ? r2 : r3; |
596 Register rhs_mantissa = exp_first ? r1 : r0; | 596 Register rhs_mantissa = exp_first ? r1 : r0; |
597 Register lhs_mantissa = exp_first ? r3 : r2; | 597 Register lhs_mantissa = exp_first ? r3 : r2; |
598 Label one_is_nan, neither_is_nan; | 598 Label one_is_nan, neither_is_nan; |
599 | 599 |
600 __ Sbfx(r4, | 600 __ Sbfx(r4, |
601 lhs_exponent, | 601 lhs_exponent, |
602 HeapNumber::kExponentShift, | 602 HeapNumber::kExponentShift, |
(...skipping 19 matching lines...) Expand all Loading... |
622 __ mov(r4, | 622 __ mov(r4, |
623 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), | 623 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), |
624 SetCC); | 624 SetCC); |
625 __ b(ne, &one_is_nan); | 625 __ b(ne, &one_is_nan); |
626 __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE)); | 626 __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE)); |
627 __ b(eq, &neither_is_nan); | 627 __ b(eq, &neither_is_nan); |
628 | 628 |
629 __ bind(&one_is_nan); | 629 __ bind(&one_is_nan); |
630 // NaN comparisons always fail. | 630 // NaN comparisons always fail. |
631 // Load whatever we need in r0 to make the comparison fail. | 631 // Load whatever we need in r0 to make the comparison fail. |
632 if (cc == lt || cc == le) { | 632 if (cond == lt || cond == le) { |
633 __ mov(r0, Operand(GREATER)); | 633 __ mov(r0, Operand(GREATER)); |
634 } else { | 634 } else { |
635 __ mov(r0, Operand(LESS)); | 635 __ mov(r0, Operand(LESS)); |
636 } | 636 } |
637 __ Ret(); | 637 __ Ret(); |
638 | 638 |
639 __ bind(&neither_is_nan); | 639 __ bind(&neither_is_nan); |
640 } | 640 } |
641 | 641 |
642 | 642 |
643 // See comment at call site. | 643 // See comment at call site. |
644 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { | 644 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, |
| 645 Condition cond) { |
645 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); | 646 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); |
646 Register rhs_exponent = exp_first ? r0 : r1; | 647 Register rhs_exponent = exp_first ? r0 : r1; |
647 Register lhs_exponent = exp_first ? r2 : r3; | 648 Register lhs_exponent = exp_first ? r2 : r3; |
648 Register rhs_mantissa = exp_first ? r1 : r0; | 649 Register rhs_mantissa = exp_first ? r1 : r0; |
649 Register lhs_mantissa = exp_first ? r3 : r2; | 650 Register lhs_mantissa = exp_first ? r3 : r2; |
650 | 651 |
651 // r0, r1, r2, r3 have the two doubles. Neither is a NaN. | 652 // r0, r1, r2, r3 have the two doubles. Neither is a NaN. |
652 if (cc == eq) { | 653 if (cond == eq) { |
653 // Doubles are not equal unless they have the same bit pattern. | 654 // Doubles are not equal unless they have the same bit pattern. |
654 // Exception: 0 and -0. | 655 // Exception: 0 and -0. |
655 __ cmp(rhs_mantissa, Operand(lhs_mantissa)); | 656 __ cmp(rhs_mantissa, Operand(lhs_mantissa)); |
656 __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne); | 657 __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne); |
657 // Return non-zero if the numbers are unequal. | 658 // Return non-zero if the numbers are unequal. |
658 __ Ret(ne); | 659 __ Ret(ne); |
659 | 660 |
660 __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC); | 661 __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC); |
661 // If exponents are equal then return 0. | 662 // If exponents are equal then return 0. |
662 __ Ret(eq); | 663 __ Ret(eq); |
(...skipping 268 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
931 __ orr(r2, r1, r0); | 932 __ orr(r2, r1, r0); |
932 __ tst(r2, Operand(kSmiTagMask)); | 933 __ tst(r2, Operand(kSmiTagMask)); |
933 __ b(ne, ¬_two_smis); | 934 __ b(ne, ¬_two_smis); |
934 __ mov(r1, Operand(r1, ASR, 1)); | 935 __ mov(r1, Operand(r1, ASR, 1)); |
935 __ sub(r0, r1, Operand(r0, ASR, 1)); | 936 __ sub(r0, r1, Operand(r0, ASR, 1)); |
936 __ Ret(); | 937 __ Ret(); |
937 __ bind(¬_two_smis); | 938 __ bind(¬_two_smis); |
938 } else if (FLAG_debug_code) { | 939 } else if (FLAG_debug_code) { |
939 __ orr(r2, r1, r0); | 940 __ orr(r2, r1, r0); |
940 __ tst(r2, Operand(kSmiTagMask)); | 941 __ tst(r2, Operand(kSmiTagMask)); |
941 __ Assert(nz, "CompareStub: unexpected smi operands."); | 942 __ Assert(ne, "CompareStub: unexpected smi operands."); |
942 } | 943 } |
943 | 944 |
944 // NOTICE! This code is only reached after a smi-fast-case check, so | 945 // NOTICE! This code is only reached after a smi-fast-case check, so |
945 // it is certain that at least one operand isn't a smi. | 946 // it is certain that at least one operand isn't a smi. |
946 | 947 |
947 // Handle the case where the objects are identical. Either returns the answer | 948 // Handle the case where the objects are identical. Either returns the answer |
948 // or goes to slow. Only falls through if the objects were not identical. | 949 // or goes to slow. Only falls through if the objects were not identical. |
949 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); | 950 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); |
950 | 951 |
951 // If either is a Smi (we know that not both are), then they can only | 952 // If either is a Smi (we know that not both are), then they can only |
(...skipping 2327 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3279 STATIC_ASSERT(kSmiTag == 0); | 3280 STATIC_ASSERT(kSmiTag == 0); |
3280 __ tst(r0, Operand(kSmiTagMask)); | 3281 __ tst(r0, Operand(kSmiTagMask)); |
3281 __ b(eq, &runtime); | 3282 __ b(eq, &runtime); |
3282 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); | 3283 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); |
3283 __ b(ne, &runtime); | 3284 __ b(ne, &runtime); |
3284 | 3285 |
3285 // Check that the RegExp has been compiled (data contains a fixed array). | 3286 // Check that the RegExp has been compiled (data contains a fixed array). |
3286 __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); | 3287 __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); |
3287 if (FLAG_debug_code) { | 3288 if (FLAG_debug_code) { |
3288 __ tst(regexp_data, Operand(kSmiTagMask)); | 3289 __ tst(regexp_data, Operand(kSmiTagMask)); |
3289 __ Check(nz, "Unexpected type for RegExp data, FixedArray expected"); | 3290 __ Check(ne, "Unexpected type for RegExp data, FixedArray expected"); |
3290 __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE); | 3291 __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE); |
3291 __ Check(eq, "Unexpected type for RegExp data, FixedArray expected"); | 3292 __ Check(eq, "Unexpected type for RegExp data, FixedArray expected"); |
3292 } | 3293 } |
3293 | 3294 |
3294 // regexp_data: RegExp data (FixedArray) | 3295 // regexp_data: RegExp data (FixedArray) |
3295 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. | 3296 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. |
3296 __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); | 3297 __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); |
3297 __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); | 3298 __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); |
3298 __ b(ne, &runtime); | 3299 __ b(ne, &runtime); |
3299 | 3300 |
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3382 __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset)); | 3383 __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset)); |
3383 __ LoadRoot(r1, Heap::kEmptyStringRootIndex); | 3384 __ LoadRoot(r1, Heap::kEmptyStringRootIndex); |
3384 __ cmp(r0, r1); | 3385 __ cmp(r0, r1); |
3385 __ b(ne, &runtime); | 3386 __ b(ne, &runtime); |
3386 __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); | 3387 __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); |
3387 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); | 3388 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); |
3388 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); | 3389 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); |
3389 // Is first part a flat string? | 3390 // Is first part a flat string? |
3390 STATIC_ASSERT(kSeqStringTag == 0); | 3391 STATIC_ASSERT(kSeqStringTag == 0); |
3391 __ tst(r0, Operand(kStringRepresentationMask)); | 3392 __ tst(r0, Operand(kStringRepresentationMask)); |
3392 __ b(nz, &runtime); | 3393 __ b(ne, &runtime); |
3393 | 3394 |
3394 __ bind(&seq_string); | 3395 __ bind(&seq_string); |
3395 // subject: Subject string | 3396 // subject: Subject string |
3396 // regexp_data: RegExp data (FixedArray) | 3397 // regexp_data: RegExp data (FixedArray) |
3397 // r0: Instance type of subject string | 3398 // r0: Instance type of subject string |
3398 STATIC_ASSERT(4 == kAsciiStringTag); | 3399 STATIC_ASSERT(4 == kAsciiStringTag); |
3399 STATIC_ASSERT(kTwoByteStringTag == 0); | 3400 STATIC_ASSERT(kTwoByteStringTag == 0); |
3400 // Find the code object based on the assumptions above. | 3401 // Find the code object based on the assumptions above. |
3401 __ and_(r0, r0, Operand(kStringEncodingMask)); | 3402 __ and_(r0, r0, Operand(kStringEncodingMask)); |
3402 __ mov(r3, Operand(r0, ASR, 2), SetCC); | 3403 __ mov(r3, Operand(r0, ASR, 2), SetCC); |
(...skipping 427 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3830 __ LoadRoot(ip, Heap::kEmptyStringRootIndex); | 3831 __ LoadRoot(ip, Heap::kEmptyStringRootIndex); |
3831 __ cmp(result_, Operand(ip)); | 3832 __ cmp(result_, Operand(ip)); |
3832 __ b(ne, &call_runtime_); | 3833 __ b(ne, &call_runtime_); |
3833 // Get the first of the two strings and load its instance type. | 3834 // Get the first of the two strings and load its instance type. |
3834 __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset)); | 3835 __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset)); |
3835 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 3836 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); |
3836 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 3837 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); |
3837 // If the first cons component is also non-flat, then go to runtime. | 3838 // If the first cons component is also non-flat, then go to runtime. |
3838 STATIC_ASSERT(kSeqStringTag == 0); | 3839 STATIC_ASSERT(kSeqStringTag == 0); |
3839 __ tst(result_, Operand(kStringRepresentationMask)); | 3840 __ tst(result_, Operand(kStringRepresentationMask)); |
3840 __ b(nz, &call_runtime_); | 3841 __ b(ne, &call_runtime_); |
3841 | 3842 |
3842 // Check for 1-byte or 2-byte string. | 3843 // Check for 1-byte or 2-byte string. |
3843 __ bind(&flat_string); | 3844 __ bind(&flat_string); |
3844 STATIC_ASSERT(kAsciiStringTag != 0); | 3845 STATIC_ASSERT(kAsciiStringTag != 0); |
3845 __ tst(result_, Operand(kStringEncodingMask)); | 3846 __ tst(result_, Operand(kStringEncodingMask)); |
3846 __ b(nz, &ascii_string); | 3847 __ b(ne, &ascii_string); |
3847 | 3848 |
3848 // 2-byte string. | 3849 // 2-byte string. |
3849 // Load the 2-byte character code into the result register. We can | 3850 // Load the 2-byte character code into the result register. We can |
3850 // add without shifting since the smi tag size is the log2 of the | 3851 // add without shifting since the smi tag size is the log2 of the |
3851 // number of bytes in a two-byte character. | 3852 // number of bytes in a two-byte character. |
3852 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0); | 3853 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0); |
3853 __ add(scratch_, object_, Operand(scratch_)); | 3854 __ add(scratch_, object_, Operand(scratch_)); |
3854 __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize)); | 3855 __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize)); |
3855 __ jmp(&got_char_code); | 3856 __ jmp(&got_char_code); |
3856 | 3857 |
(...skipping 64 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3921 // StringCharFromCodeGenerator | 3922 // StringCharFromCodeGenerator |
3922 | 3923 |
3923 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { | 3924 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { |
3924 // Fast case of Heap::LookupSingleCharacterStringFromCode. | 3925 // Fast case of Heap::LookupSingleCharacterStringFromCode. |
3925 STATIC_ASSERT(kSmiTag == 0); | 3926 STATIC_ASSERT(kSmiTag == 0); |
3926 STATIC_ASSERT(kSmiShiftSize == 0); | 3927 STATIC_ASSERT(kSmiShiftSize == 0); |
3927 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); | 3928 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); |
3928 __ tst(code_, | 3929 __ tst(code_, |
3929 Operand(kSmiTagMask | | 3930 Operand(kSmiTagMask | |
3930 ((~String::kMaxAsciiCharCode) << kSmiTagSize))); | 3931 ((~String::kMaxAsciiCharCode) << kSmiTagSize))); |
3931 __ b(nz, &slow_case_); | 3932 __ b(ne, &slow_case_); |
3932 | 3933 |
3933 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); | 3934 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); |
3934 // At this point code register contains smi tagged ascii char code. | 3935 // At this point code register contains smi tagged ascii char code. |
3935 STATIC_ASSERT(kSmiTag == 0); | 3936 STATIC_ASSERT(kSmiTag == 0); |
3936 __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize)); | 3937 __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize)); |
3937 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); | 3938 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); |
3938 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); | 3939 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); |
3939 __ cmp(result_, Operand(ip)); | 3940 __ cmp(result_, Operand(ip)); |
3940 __ b(eq, &slow_case_); | 3941 __ b(eq, &slow_case_); |
3941 __ bind(&exit_); | 3942 __ bind(&exit_); |
(...skipping 426 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4368 void StringHelper::GenerateHashGetHash(MacroAssembler* masm, | 4369 void StringHelper::GenerateHashGetHash(MacroAssembler* masm, |
4369 Register hash) { | 4370 Register hash) { |
4370 // hash += hash << 3; | 4371 // hash += hash << 3; |
4371 __ add(hash, hash, Operand(hash, LSL, 3)); | 4372 __ add(hash, hash, Operand(hash, LSL, 3)); |
4372 // hash ^= hash >> 11; | 4373 // hash ^= hash >> 11; |
4373 __ eor(hash, hash, Operand(hash, ASR, 11)); | 4374 __ eor(hash, hash, Operand(hash, ASR, 11)); |
4374 // hash += hash << 15; | 4375 // hash += hash << 15; |
4375 __ add(hash, hash, Operand(hash, LSL, 15), SetCC); | 4376 __ add(hash, hash, Operand(hash, LSL, 15), SetCC); |
4376 | 4377 |
4377 // if (hash == 0) hash = 27; | 4378 // if (hash == 0) hash = 27; |
4378 __ mov(hash, Operand(27), LeaveCC, nz); | 4379 __ mov(hash, Operand(27), LeaveCC, ne); |
4379 } | 4380 } |
4380 | 4381 |
4381 | 4382 |
4382 void SubStringStub::Generate(MacroAssembler* masm) { | 4383 void SubStringStub::Generate(MacroAssembler* masm) { |
4383 Label runtime; | 4384 Label runtime; |
4384 | 4385 |
4385 // Stack frame on entry. | 4386 // Stack frame on entry. |
4386 // lr: return address | 4387 // lr: return address |
4387 // sp[0]: to | 4388 // sp[0]: to |
4388 // sp[4]: from | 4389 // sp[4]: from |
(...skipping 718 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5107 __ pop(r1); | 5108 __ pop(r1); |
5108 __ Jump(r2); | 5109 __ Jump(r2); |
5109 } | 5110 } |
5110 | 5111 |
5111 | 5112 |
5112 #undef __ | 5113 #undef __ |
5113 | 5114 |
5114 } } // namespace v8::internal | 5115 } } // namespace v8::internal |
5115 | 5116 |
5116 #endif // V8_TARGET_ARCH_ARM | 5117 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |