Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(661)

Side by Side Diff: src/arm/code-stubs-arm.cc

Issue 6597029: [Isolates] Merge r 6300:6500 from bleeding_edge to isolates. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/isolates/
Patch Set: Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/codegen-arm.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
(...skipping 22 matching lines...) Expand all
34 #include "regexp-macro-assembler.h" 34 #include "regexp-macro-assembler.h"
35 35
36 namespace v8 { 36 namespace v8 {
37 namespace internal { 37 namespace internal {
38 38
39 39
40 #define __ ACCESS_MASM(masm) 40 #define __ ACCESS_MASM(masm)
41 41
42 static void EmitIdenticalObjectComparison(MacroAssembler* masm, 42 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
43 Label* slow, 43 Label* slow,
44 Condition cc, 44 Condition cond,
45 bool never_nan_nan); 45 bool never_nan_nan);
46 static void EmitSmiNonsmiComparison(MacroAssembler* masm, 46 static void EmitSmiNonsmiComparison(MacroAssembler* masm,
47 Register lhs, 47 Register lhs,
48 Register rhs, 48 Register rhs,
49 Label* lhs_not_nan, 49 Label* lhs_not_nan,
50 Label* slow, 50 Label* slow,
51 bool strict); 51 bool strict);
52 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc); 52 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cond);
53 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, 53 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm,
54 Register lhs, 54 Register lhs,
55 Register rhs); 55 Register rhs);
56 56
57 57
58 void FastNewClosureStub::Generate(MacroAssembler* masm) { 58 void FastNewClosureStub::Generate(MacroAssembler* masm) {
59 // Create a new closure from the given function info in new 59 // Create a new closure from the given function info in new
60 // space. Set the context to the current context in cp. 60 // space. Set the context to the current context in cp.
61 Label gc; 61 Label gc;
62 62
(...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after
337 // Compute lower part of fraction (last 12 bits). 337 // Compute lower part of fraction (last 12 bits).
338 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord)); 338 __ mov(mantissa, Operand(source_, LSL, HeapNumber::kMantissaBitsInTopWord));
339 // And the top (top 20 bits). 339 // And the top (top 20 bits).
340 __ orr(exponent, 340 __ orr(exponent,
341 exponent, 341 exponent,
342 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); 342 Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord));
343 __ Ret(); 343 __ Ret();
344 } 344 }
345 345
346 346
347 class FloatingPointHelper : public AllStatic {
348 public:
349
350 enum Destination {
351 kVFPRegisters,
352 kCoreRegisters
353 };
354
355
356 // Loads smis from r0 and r1 (right and left in binary operations) into
357 // floating point registers. Depending on the destination the values ends up
358 // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
359 // floating point registers VFP3 must be supported. If core registers are
360 // requested when VFP3 is supported d6 and d7 will be scratched.
361 static void LoadSmis(MacroAssembler* masm,
362 Destination destination,
363 Register scratch1,
364 Register scratch2);
365
366 // Loads objects from r0 and r1 (right and left in binary operations) into
367 // floating point registers. Depending on the destination the values ends up
368 // either d7 and d6 or in r2/r3 and r0/r1 respectively. If the destination is
369 // floating point registers VFP3 must be supported. If core registers are
370 // requested when VFP3 is supported d6 and d7 will still be scratched. If
371 // either r0 or r1 is not a number (not smi and not heap number object) the
372 // not_number label is jumped to.
373 static void LoadOperands(MacroAssembler* masm,
374 FloatingPointHelper::Destination destination,
375 Register heap_number_map,
376 Register scratch1,
377 Register scratch2,
378 Label* not_number);
379 private:
380 static void LoadNumber(MacroAssembler* masm,
381 FloatingPointHelper::Destination destination,
382 Register object,
383 DwVfpRegister dst,
384 Register dst1,
385 Register dst2,
386 Register heap_number_map,
387 Register scratch1,
388 Register scratch2,
389 Label* not_number);
390 };
391
392
393 void FloatingPointHelper::LoadSmis(MacroAssembler* masm,
394 FloatingPointHelper::Destination destination,
395 Register scratch1,
396 Register scratch2) {
397 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
398 CpuFeatures::Scope scope(VFP3);
399 __ mov(scratch1, Operand(r0, ASR, kSmiTagSize));
400 __ vmov(s15, scratch1);
401 __ vcvt_f64_s32(d7, s15);
402 __ mov(scratch1, Operand(r1, ASR, kSmiTagSize));
403 __ vmov(s13, scratch1);
404 __ vcvt_f64_s32(d6, s13);
405 if (destination == kCoreRegisters) {
406 __ vmov(r2, r3, d7);
407 __ vmov(r0, r1, d6);
408 }
409 } else {
410 ASSERT(destination == kCoreRegisters);
411 // Write Smi from r0 to r3 and r2 in double format.
412 __ mov(scratch1, Operand(r0));
413 ConvertToDoubleStub stub1(r3, r2, scratch1, scratch2);
414 __ push(lr);
415 __ Call(stub1.GetCode(), RelocInfo::CODE_TARGET);
416 // Write Smi from r1 to r1 and r0 in double format. r9 is scratch.
417 __ mov(scratch1, Operand(r1));
418 ConvertToDoubleStub stub2(r1, r0, scratch1, scratch2);
419 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
420 __ pop(lr);
421 }
422 }
423
424
425 void FloatingPointHelper::LoadOperands(
426 MacroAssembler* masm,
427 FloatingPointHelper::Destination destination,
428 Register heap_number_map,
429 Register scratch1,
430 Register scratch2,
431 Label* slow) {
432
433 // Load right operand (r0) to d6 or r2/r3.
434 LoadNumber(masm, destination,
435 r0, d7, r2, r3, heap_number_map, scratch1, scratch2, slow);
436
437 // Load left operand (r1) to d7 or r0/r1.
438 LoadNumber(masm, destination,
439 r1, d6, r0, r1, heap_number_map, scratch1, scratch2, slow);
440 }
441
442
443 void FloatingPointHelper::LoadNumber(MacroAssembler* masm,
444 Destination destination,
445 Register object,
446 DwVfpRegister dst,
447 Register dst1,
448 Register dst2,
449 Register heap_number_map,
450 Register scratch1,
451 Register scratch2,
452 Label* not_number) {
453 Label is_smi, done;
454
455 __ JumpIfSmi(object, &is_smi);
456 __ JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
457
458 // Handle loading a double from a heap number.
459 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
460 CpuFeatures::Scope scope(VFP3);
461 // Load the double from tagged HeapNumber to double register.
462 __ sub(scratch1, object, Operand(kHeapObjectTag));
463 __ vldr(dst, scratch1, HeapNumber::kValueOffset);
464 } else {
465 ASSERT(destination == kCoreRegisters);
466 // Load the double from heap number to dst1 and dst2 in double format.
467 __ Ldrd(dst1, dst2, FieldMemOperand(object, HeapNumber::kValueOffset));
468 }
469 __ jmp(&done);
470
471 // Handle loading a double from a smi.
472 __ bind(&is_smi);
473 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
474 CpuFeatures::Scope scope(VFP3);
475 // Convert smi to double.
476 __ SmiUntag(scratch1, object);
477 __ vmov(dst.high(), scratch1);
478 __ vcvt_f64_s32(dst, dst.high());
479 if (destination == kCoreRegisters) {
480 __ vmov(dst1, dst2, dst);
481 }
482 } else {
483 ASSERT(destination == kCoreRegisters);
484 // Write Smi to dst1 and dst2 double format.
485 __ mov(scratch1, Operand(object));
486 ConvertToDoubleStub stub(dst2, dst1, scratch1, scratch2);
487 __ push(lr);
488 __ Call(stub.GetCode(), RelocInfo::CODE_TARGET);
489 __ pop(lr);
490 }
491
492 __ bind(&done);
493 }
494
495
347 // See comment for class. 496 // See comment for class.
348 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { 497 void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) {
349 Label max_negative_int; 498 Label max_negative_int;
350 // the_int_ has the answer which is a signed int32 but not a Smi. 499 // the_int_ has the answer which is a signed int32 but not a Smi.
351 // We test for the special value that has a different exponent. This test 500 // We test for the special value that has a different exponent. This test
352 // has the neat side effect of setting the flags according to the sign. 501 // has the neat side effect of setting the flags according to the sign.
353 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u); 502 STATIC_ASSERT(HeapNumber::kSignMask == 0x80000000u);
354 __ cmp(the_int_, Operand(0x80000000u)); 503 __ cmp(the_int_, Operand(0x80000000u));
355 __ b(eq, &max_negative_int); 504 __ b(eq, &max_negative_int);
356 // Set up the correct exponent in scratch_. All non-Smi int32s have the same. 505 // Set up the correct exponent in scratch_. All non-Smi int32s have the same.
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
388 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); 537 __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset));
389 __ Ret(); 538 __ Ret();
390 } 539 }
391 540
392 541
393 // Handle the case where the lhs and rhs are the same object. 542 // Handle the case where the lhs and rhs are the same object.
394 // Equality is almost reflexive (everything but NaN), so this is a test 543 // Equality is almost reflexive (everything but NaN), so this is a test
395 // for "identity and not NaN". 544 // for "identity and not NaN".
396 static void EmitIdenticalObjectComparison(MacroAssembler* masm, 545 static void EmitIdenticalObjectComparison(MacroAssembler* masm,
397 Label* slow, 546 Label* slow,
398 Condition cc, 547 Condition cond,
399 bool never_nan_nan) { 548 bool never_nan_nan) {
400 Label not_identical; 549 Label not_identical;
401 Label heap_number, return_equal; 550 Label heap_number, return_equal;
402 __ cmp(r0, r1); 551 __ cmp(r0, r1);
403 __ b(ne, &not_identical); 552 __ b(ne, &not_identical);
404 553
405 // The two objects are identical. If we know that one of them isn't NaN then 554 // The two objects are identical. If we know that one of them isn't NaN then
406 // we now know they test equal. 555 // we now know they test equal.
407 if (cc != eq || !never_nan_nan) { 556 if (cond != eq || !never_nan_nan) {
408 // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(), 557 // Test for NaN. Sadly, we can't just compare to FACTORY->nan_value(),
409 // so we do the second best thing - test it ourselves. 558 // so we do the second best thing - test it ourselves.
410 // They are both equal and they are not both Smis so both of them are not 559 // They are both equal and they are not both Smis so both of them are not
411 // Smis. If it's not a heap number, then return equal. 560 // Smis. If it's not a heap number, then return equal.
412 if (cc == lt || cc == gt) { 561 if (cond == lt || cond == gt) {
413 __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE); 562 __ CompareObjectType(r0, r4, r4, FIRST_JS_OBJECT_TYPE);
414 __ b(ge, slow); 563 __ b(ge, slow);
415 } else { 564 } else {
416 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); 565 __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE);
417 __ b(eq, &heap_number); 566 __ b(eq, &heap_number);
418 // Comparing JS objects with <=, >= is complicated. 567 // Comparing JS objects with <=, >= is complicated.
419 if (cc != eq) { 568 if (cond != eq) {
420 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); 569 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE));
421 __ b(ge, slow); 570 __ b(ge, slow);
422 // Normally here we fall through to return_equal, but undefined is 571 // Normally here we fall through to return_equal, but undefined is
423 // special: (undefined == undefined) == true, but 572 // special: (undefined == undefined) == true, but
424 // (undefined <= undefined) == false! See ECMAScript 11.8.5. 573 // (undefined <= undefined) == false! See ECMAScript 11.8.5.
425 if (cc == le || cc == ge) { 574 if (cond == le || cond == ge) {
426 __ cmp(r4, Operand(ODDBALL_TYPE)); 575 __ cmp(r4, Operand(ODDBALL_TYPE));
427 __ b(ne, &return_equal); 576 __ b(ne, &return_equal);
428 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); 577 __ LoadRoot(r2, Heap::kUndefinedValueRootIndex);
429 __ cmp(r0, r2); 578 __ cmp(r0, r2);
430 __ b(ne, &return_equal); 579 __ b(ne, &return_equal);
431 if (cc == le) { 580 if (cond == le) {
432 // undefined <= undefined should fail. 581 // undefined <= undefined should fail.
433 __ mov(r0, Operand(GREATER)); 582 __ mov(r0, Operand(GREATER));
434 } else { 583 } else {
435 // undefined >= undefined should fail. 584 // undefined >= undefined should fail.
436 __ mov(r0, Operand(LESS)); 585 __ mov(r0, Operand(LESS));
437 } 586 }
438 __ Ret(); 587 __ Ret();
439 } 588 }
440 } 589 }
441 } 590 }
442 } 591 }
443 592
444 __ bind(&return_equal); 593 __ bind(&return_equal);
445 if (cc == lt) { 594 if (cond == lt) {
446 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves. 595 __ mov(r0, Operand(GREATER)); // Things aren't less than themselves.
447 } else if (cc == gt) { 596 } else if (cond == gt) {
448 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves. 597 __ mov(r0, Operand(LESS)); // Things aren't greater than themselves.
449 } else { 598 } else {
450 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves. 599 __ mov(r0, Operand(EQUAL)); // Things are <=, >=, ==, === themselves.
451 } 600 }
452 __ Ret(); 601 __ Ret();
453 602
454 if (cc != eq || !never_nan_nan) { 603 if (cond != eq || !never_nan_nan) {
455 // For less and greater we don't have to check for NaN since the result of 604 // For less and greater we don't have to check for NaN since the result of
456 // x < x is false regardless. For the others here is some code to check 605 // x < x is false regardless. For the others here is some code to check
457 // for NaN. 606 // for NaN.
458 if (cc != lt && cc != gt) { 607 if (cond != lt && cond != gt) {
459 __ bind(&heap_number); 608 __ bind(&heap_number);
460 // It is a heap number, so return non-equal if it's NaN and equal if it's 609 // It is a heap number, so return non-equal if it's NaN and equal if it's
461 // not NaN. 610 // not NaN.
462 611
463 // The representation of NaN values has all exponent bits (52..62) set, 612 // The representation of NaN values has all exponent bits (52..62) set,
464 // and not all mantissa bits (0..51) clear. 613 // and not all mantissa bits (0..51) clear.
465 // Read top bits of double representation (second word of value). 614 // Read top bits of double representation (second word of value).
466 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); 615 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
467 // Test that exponent bits are all set. 616 // Test that exponent bits are all set.
468 __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); 617 __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
469 // NaNs have all-one exponents so they sign extend to -1. 618 // NaNs have all-one exponents so they sign extend to -1.
470 __ cmp(r3, Operand(-1)); 619 __ cmp(r3, Operand(-1));
471 __ b(ne, &return_equal); 620 __ b(ne, &return_equal);
472 621
473 // Shift out flag and all exponent bits, retaining only mantissa. 622 // Shift out flag and all exponent bits, retaining only mantissa.
474 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); 623 __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord));
475 // Or with all low-bits of mantissa. 624 // Or with all low-bits of mantissa.
476 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); 625 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
477 __ orr(r0, r3, Operand(r2), SetCC); 626 __ orr(r0, r3, Operand(r2), SetCC);
478 // For equal we already have the right value in r0: Return zero (equal) 627 // For equal we already have the right value in r0: Return zero (equal)
479 // if all bits in mantissa are zero (it's an Infinity) and non-zero if 628 // if all bits in mantissa are zero (it's an Infinity) and non-zero if
480 // not (it's a NaN). For <= and >= we need to load r0 with the failing 629 // not (it's a NaN). For <= and >= we need to load r0 with the failing
481 // value if it's a NaN. 630 // value if it's a NaN.
482 if (cc != eq) { 631 if (cond != eq) {
483 // All-zero means Infinity means equal. 632 // All-zero means Infinity means equal.
484 __ Ret(eq); 633 __ Ret(eq);
485 if (cc == le) { 634 if (cond == le) {
486 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail. 635 __ mov(r0, Operand(GREATER)); // NaN <= NaN should fail.
487 } else { 636 } else {
488 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail. 637 __ mov(r0, Operand(LESS)); // NaN >= NaN should fail.
489 } 638 }
490 } 639 }
491 __ Ret(); 640 __ Ret();
492 } 641 }
493 // No fall through here. 642 // No fall through here.
494 } 643 }
495 644
(...skipping 86 matching lines...) Expand 10 before | Expand all | Expand 10 after
582 // Convert rhs to a double in r0, r1. 731 // Convert rhs to a double in r0, r1.
583 __ mov(r7, Operand(rhs)); 732 __ mov(r7, Operand(rhs));
584 ConvertToDoubleStub stub2(r1, r0, r7, r6); 733 ConvertToDoubleStub stub2(r1, r0, r7, r6);
585 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET); 734 __ Call(stub2.GetCode(), RelocInfo::CODE_TARGET);
586 __ pop(lr); 735 __ pop(lr);
587 } 736 }
588 // Fall through to both_loaded_as_doubles. 737 // Fall through to both_loaded_as_doubles.
589 } 738 }
590 739
591 740
592 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cc) { 741 void EmitNanCheck(MacroAssembler* masm, Label* lhs_not_nan, Condition cond) {
593 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); 742 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
594 Register rhs_exponent = exp_first ? r0 : r1; 743 Register rhs_exponent = exp_first ? r0 : r1;
595 Register lhs_exponent = exp_first ? r2 : r3; 744 Register lhs_exponent = exp_first ? r2 : r3;
596 Register rhs_mantissa = exp_first ? r1 : r0; 745 Register rhs_mantissa = exp_first ? r1 : r0;
597 Register lhs_mantissa = exp_first ? r3 : r2; 746 Register lhs_mantissa = exp_first ? r3 : r2;
598 Label one_is_nan, neither_is_nan; 747 Label one_is_nan, neither_is_nan;
599 748
600 __ Sbfx(r4, 749 __ Sbfx(r4,
601 lhs_exponent, 750 lhs_exponent,
602 HeapNumber::kExponentShift, 751 HeapNumber::kExponentShift,
(...skipping 19 matching lines...) Expand all
622 __ mov(r4, 771 __ mov(r4,
623 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord), 772 Operand(rhs_exponent, LSL, HeapNumber::kNonMantissaBitsInTopWord),
624 SetCC); 773 SetCC);
625 __ b(ne, &one_is_nan); 774 __ b(ne, &one_is_nan);
626 __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE)); 775 __ cmp(rhs_mantissa, Operand(0, RelocInfo::NONE));
627 __ b(eq, &neither_is_nan); 776 __ b(eq, &neither_is_nan);
628 777
629 __ bind(&one_is_nan); 778 __ bind(&one_is_nan);
630 // NaN comparisons always fail. 779 // NaN comparisons always fail.
631 // Load whatever we need in r0 to make the comparison fail. 780 // Load whatever we need in r0 to make the comparison fail.
632 if (cc == lt || cc == le) { 781 if (cond == lt || cond == le) {
633 __ mov(r0, Operand(GREATER)); 782 __ mov(r0, Operand(GREATER));
634 } else { 783 } else {
635 __ mov(r0, Operand(LESS)); 784 __ mov(r0, Operand(LESS));
636 } 785 }
637 __ Ret(); 786 __ Ret();
638 787
639 __ bind(&neither_is_nan); 788 __ bind(&neither_is_nan);
640 } 789 }
641 790
642 791
643 // See comment at call site. 792 // See comment at call site.
644 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm, Condition cc) { 793 static void EmitTwoNonNanDoubleComparison(MacroAssembler* masm,
794 Condition cond) {
645 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset); 795 bool exp_first = (HeapNumber::kExponentOffset == HeapNumber::kValueOffset);
646 Register rhs_exponent = exp_first ? r0 : r1; 796 Register rhs_exponent = exp_first ? r0 : r1;
647 Register lhs_exponent = exp_first ? r2 : r3; 797 Register lhs_exponent = exp_first ? r2 : r3;
648 Register rhs_mantissa = exp_first ? r1 : r0; 798 Register rhs_mantissa = exp_first ? r1 : r0;
649 Register lhs_mantissa = exp_first ? r3 : r2; 799 Register lhs_mantissa = exp_first ? r3 : r2;
650 800
651 // r0, r1, r2, r3 have the two doubles. Neither is a NaN. 801 // r0, r1, r2, r3 have the two doubles. Neither is a NaN.
652 if (cc == eq) { 802 if (cond == eq) {
653 // Doubles are not equal unless they have the same bit pattern. 803 // Doubles are not equal unless they have the same bit pattern.
654 // Exception: 0 and -0. 804 // Exception: 0 and -0.
655 __ cmp(rhs_mantissa, Operand(lhs_mantissa)); 805 __ cmp(rhs_mantissa, Operand(lhs_mantissa));
656 __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne); 806 __ orr(r0, rhs_mantissa, Operand(lhs_mantissa), LeaveCC, ne);
657 // Return non-zero if the numbers are unequal. 807 // Return non-zero if the numbers are unequal.
658 __ Ret(ne); 808 __ Ret(ne);
659 809
660 __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC); 810 __ sub(r0, rhs_exponent, Operand(lhs_exponent), SetCC);
661 // If exponents are equal then return 0. 811 // If exponents are equal then return 0.
662 __ Ret(eq); 812 __ Ret(eq);
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
828 __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1)); 978 __ mov(mask, Operand(mask, ASR, kSmiTagSize + 1));
829 __ sub(mask, mask, Operand(1)); // Make mask. 979 __ sub(mask, mask, Operand(1)); // Make mask.
830 980
831 // Calculate the entry in the number string cache. The hash value in the 981 // Calculate the entry in the number string cache. The hash value in the
832 // number string cache for smis is just the smi value, and the hash for 982 // number string cache for smis is just the smi value, and the hash for
833 // doubles is the xor of the upper and lower words. See 983 // doubles is the xor of the upper and lower words. See
834 // Heap::GetNumberStringCache. 984 // Heap::GetNumberStringCache.
835 Label is_smi; 985 Label is_smi;
836 Label load_result_from_cache; 986 Label load_result_from_cache;
837 if (!object_is_smi) { 987 if (!object_is_smi) {
838 __ BranchOnSmi(object, &is_smi); 988 __ JumpIfSmi(object, &is_smi);
839 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { 989 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
840 CpuFeatures::Scope scope(VFP3); 990 CpuFeatures::Scope scope(VFP3);
841 __ CheckMap(object, 991 __ CheckMap(object,
842 scratch1, 992 scratch1,
843 Heap::kHeapNumberMapRootIndex, 993 Heap::kHeapNumberMapRootIndex,
844 not_found, 994 not_found,
845 true); 995 true);
846 996
847 STATIC_ASSERT(8 == kDoubleSize); 997 STATIC_ASSERT(8 == kDoubleSize);
848 __ add(scratch1, 998 __ add(scratch1,
849 object, 999 object,
850 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); 1000 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
851 __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit()); 1001 __ ldm(ia, scratch1, scratch1.bit() | scratch2.bit());
852 __ eor(scratch1, scratch1, Operand(scratch2)); 1002 __ eor(scratch1, scratch1, Operand(scratch2));
853 __ and_(scratch1, scratch1, Operand(mask)); 1003 __ and_(scratch1, scratch1, Operand(mask));
854 1004
855 // Calculate address of entry in string cache: each entry consists 1005 // Calculate address of entry in string cache: each entry consists
856 // of two pointer sized fields. 1006 // of two pointer sized fields.
857 __ add(scratch1, 1007 __ add(scratch1,
858 number_string_cache, 1008 number_string_cache,
859 Operand(scratch1, LSL, kPointerSizeLog2 + 1)); 1009 Operand(scratch1, LSL, kPointerSizeLog2 + 1));
860 1010
861 Register probe = mask; 1011 Register probe = mask;
862 __ ldr(probe, 1012 __ ldr(probe,
863 FieldMemOperand(scratch1, FixedArray::kHeaderSize)); 1013 FieldMemOperand(scratch1, FixedArray::kHeaderSize));
864 __ BranchOnSmi(probe, not_found); 1014 __ JumpIfSmi(probe, not_found);
865 __ sub(scratch2, object, Operand(kHeapObjectTag)); 1015 __ sub(scratch2, object, Operand(kHeapObjectTag));
866 __ vldr(d0, scratch2, HeapNumber::kValueOffset); 1016 __ vldr(d0, scratch2, HeapNumber::kValueOffset);
867 __ sub(probe, probe, Operand(kHeapObjectTag)); 1017 __ sub(probe, probe, Operand(kHeapObjectTag));
868 __ vldr(d1, probe, HeapNumber::kValueOffset); 1018 __ vldr(d1, probe, HeapNumber::kValueOffset);
869 __ VFPCompareAndSetFlags(d0, d1); 1019 __ VFPCompareAndSetFlags(d0, d1);
870 __ b(ne, not_found); // The cache did not contain this value. 1020 __ b(ne, not_found); // The cache did not contain this value.
871 __ b(&load_result_from_cache); 1021 __ b(&load_result_from_cache);
872 } else { 1022 } else {
873 __ b(not_found); 1023 __ b(not_found);
874 } 1024 }
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
931 __ orr(r2, r1, r0); 1081 __ orr(r2, r1, r0);
932 __ tst(r2, Operand(kSmiTagMask)); 1082 __ tst(r2, Operand(kSmiTagMask));
933 __ b(ne, &not_two_smis); 1083 __ b(ne, &not_two_smis);
934 __ mov(r1, Operand(r1, ASR, 1)); 1084 __ mov(r1, Operand(r1, ASR, 1));
935 __ sub(r0, r1, Operand(r0, ASR, 1)); 1085 __ sub(r0, r1, Operand(r0, ASR, 1));
936 __ Ret(); 1086 __ Ret();
937 __ bind(&not_two_smis); 1087 __ bind(&not_two_smis);
938 } else if (FLAG_debug_code) { 1088 } else if (FLAG_debug_code) {
939 __ orr(r2, r1, r0); 1089 __ orr(r2, r1, r0);
940 __ tst(r2, Operand(kSmiTagMask)); 1090 __ tst(r2, Operand(kSmiTagMask));
941 __ Assert(nz, "CompareStub: unexpected smi operands."); 1091 __ Assert(ne, "CompareStub: unexpected smi operands.");
942 } 1092 }
943 1093
944 // NOTICE! This code is only reached after a smi-fast-case check, so 1094 // NOTICE! This code is only reached after a smi-fast-case check, so
945 // it is certain that at least one operand isn't a smi. 1095 // it is certain that at least one operand isn't a smi.
946 1096
947 // Handle the case where the objects are identical. Either returns the answer 1097 // Handle the case where the objects are identical. Either returns the answer
948 // or goes to slow. Only falls through if the objects were not identical. 1098 // or goes to slow. Only falls through if the objects were not identical.
949 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_); 1099 EmitIdenticalObjectComparison(masm, &slow, cc_, never_nan_nan_);
950 1100
951 // If either is a Smi (we know that not both are), then they can only 1101 // If either is a Smi (we know that not both are), then they can only
(...skipping 418 matching lines...) Expand 10 before | Expand all | Expand 10 after
1370 } else if (Token::ADD == op_) { 1520 } else if (Token::ADD == op_) {
1371 __ vadd(d5, d6, d7); 1521 __ vadd(d5, d6, d7);
1372 } else if (Token::SUB == op_) { 1522 } else if (Token::SUB == op_) {
1373 __ vsub(d5, d6, d7); 1523 __ vsub(d5, d6, d7);
1374 } else { 1524 } else {
1375 UNREACHABLE(); 1525 UNREACHABLE();
1376 } 1526 }
1377 __ sub(r0, r5, Operand(kHeapObjectTag)); 1527 __ sub(r0, r5, Operand(kHeapObjectTag));
1378 __ vstr(d5, r0, HeapNumber::kValueOffset); 1528 __ vstr(d5, r0, HeapNumber::kValueOffset);
1379 __ add(r0, r0, Operand(kHeapObjectTag)); 1529 __ add(r0, r0, Operand(kHeapObjectTag));
1380 __ mov(pc, lr); 1530 __ Ret();
1381 } else { 1531 } else {
1382 // If we did not inline the operation, then the arguments are in: 1532 // If we did not inline the operation, then the arguments are in:
1383 // r0: Left value (least significant part of mantissa). 1533 // r0: Left value (least significant part of mantissa).
1384 // r1: Left value (sign, exponent, top of mantissa). 1534 // r1: Left value (sign, exponent, top of mantissa).
1385 // r2: Right value (least significant part of mantissa). 1535 // r2: Right value (least significant part of mantissa).
1386 // r3: Right value (sign, exponent, top of mantissa). 1536 // r3: Right value (sign, exponent, top of mantissa).
1387 // r5: Address of heap number for result. 1537 // r5: Address of heap number for result.
1388 1538
1389 __ push(lr); // For later. 1539 __ push(lr); // For later.
1390 __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments. 1540 __ PrepareCallCFunction(4, r4); // Two doubles count as 4 arguments.
(...skipping 564 matching lines...) Expand 10 before | Expand all | Expand 10 after
1955 } 2105 }
1956 HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::MUL); 2106 HandleBinaryOpSlowCases(masm, &not_smi, lhs, rhs, Builtins::MUL);
1957 break; 2107 break;
1958 } 2108 }
1959 2109
1960 case Token::DIV: 2110 case Token::DIV:
1961 case Token::MOD: { 2111 case Token::MOD: {
1962 Label not_smi; 2112 Label not_smi;
1963 if (ShouldGenerateSmiCode() && specialized_on_rhs_) { 2113 if (ShouldGenerateSmiCode() && specialized_on_rhs_) {
1964 Label lhs_is_unsuitable; 2114 Label lhs_is_unsuitable;
1965 __ BranchOnNotSmi(lhs, &not_smi); 2115 __ JumpIfNotSmi(lhs, &not_smi);
1966 if (IsPowerOf2(constant_rhs_)) { 2116 if (IsPowerOf2(constant_rhs_)) {
1967 if (op_ == Token::MOD) { 2117 if (op_ == Token::MOD) {
1968 __ and_(rhs, 2118 __ and_(rhs,
1969 lhs, 2119 lhs,
1970 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)), 2120 Operand(0x80000000u | ((constant_rhs_ << kSmiTagSize) - 1)),
1971 SetCC); 2121 SetCC);
1972 // We now have the answer, but if the input was negative we also 2122 // We now have the answer, but if the input was negative we also
1973 // have the sign bit. Our work is done if the result is 2123 // have the sign bit. Our work is done if the result is
1974 // positive or zero: 2124 // positive or zero:
1975 if (!rhs.is(r0)) { 2125 if (!rhs.is(r0)) {
(...skipping 226 matching lines...) Expand 10 before | Expand all | Expand 10 after
2202 2352
2203 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { 2353 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
2204 GenericBinaryOpStub stub(key, type_info); 2354 GenericBinaryOpStub stub(key, type_info);
2205 return stub.GetCode(); 2355 return stub.GetCode();
2206 } 2356 }
2207 2357
2208 2358
2209 Handle<Code> GetTypeRecordingBinaryOpStub(int key, 2359 Handle<Code> GetTypeRecordingBinaryOpStub(int key,
2210 TRBinaryOpIC::TypeInfo type_info, 2360 TRBinaryOpIC::TypeInfo type_info,
2211 TRBinaryOpIC::TypeInfo result_type_info) { 2361 TRBinaryOpIC::TypeInfo result_type_info) {
2362 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
2363 return stub.GetCode();
2364 }
2365
2366
2367 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
2368 Label get_result;
2369
2370 __ Push(r1, r0);
2371
2372 __ mov(r2, Operand(Smi::FromInt(MinorKey())));
2373 __ mov(r1, Operand(Smi::FromInt(op_)));
2374 __ mov(r0, Operand(Smi::FromInt(operands_type_)));
2375 __ Push(r2, r1, r0);
2376
2377 __ TailCallExternalReference(
2378 ExternalReference(IC_Utility(IC::kTypeRecordingBinaryOp_Patch)),
2379 5,
2380 1);
2381 }
2382
2383
2384 void TypeRecordingBinaryOpStub::GenerateTypeTransitionWithSavedArgs(
2385 MacroAssembler* masm) {
2212 UNIMPLEMENTED(); 2386 UNIMPLEMENTED();
2213 return Handle<Code>::null(); 2387 }
2388
2389
2390 void TypeRecordingBinaryOpStub::Generate(MacroAssembler* masm) {
2391 switch (operands_type_) {
2392 case TRBinaryOpIC::UNINITIALIZED:
2393 GenerateTypeTransition(masm);
2394 break;
2395 case TRBinaryOpIC::SMI:
2396 GenerateSmiStub(masm);
2397 break;
2398 case TRBinaryOpIC::INT32:
2399 GenerateInt32Stub(masm);
2400 break;
2401 case TRBinaryOpIC::HEAP_NUMBER:
2402 GenerateHeapNumberStub(masm);
2403 break;
2404 case TRBinaryOpIC::STRING:
2405 GenerateStringStub(masm);
2406 break;
2407 case TRBinaryOpIC::GENERIC:
2408 GenerateGeneric(masm);
2409 break;
2410 default:
2411 UNREACHABLE();
2412 }
2413 }
2414
2415
2416 const char* TypeRecordingBinaryOpStub::GetName() {
2417 if (name_ != NULL) return name_;
2418 const int kMaxNameLength = 100;
2419 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
2420 kMaxNameLength);
2421 if (name_ == NULL) return "OOM";
2422 const char* op_name = Token::Name(op_);
2423 const char* overwrite_name;
2424 switch (mode_) {
2425 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
2426 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
2427 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
2428 default: overwrite_name = "UnknownOverwrite"; break;
2429 }
2430
2431 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
2432 "TypeRecordingBinaryOpStub_%s_%s_%s",
2433 op_name,
2434 overwrite_name,
2435 TRBinaryOpIC::GetName(operands_type_));
2436 return name_;
2437 }
2438
2439
2440 void TypeRecordingBinaryOpStub::GenerateOptimisticSmiOperation(
2441 MacroAssembler* masm) {
2442 Register left = r1;
2443 Register right = r0;
2444
2445 ASSERT(right.is(r0));
2446
2447 switch (op_) {
2448 case Token::ADD:
2449 __ add(right, left, Operand(right), SetCC); // Add optimistically.
2450 __ Ret(vc);
2451 __ sub(right, right, Operand(left)); // Revert optimistic add.
2452 break;
2453 case Token::SUB:
2454 __ sub(right, left, Operand(right), SetCC); // Subtract optimistically.
2455 __ Ret(vc);
2456 __ sub(right, left, Operand(right)); // Revert optimistic subtract.
2457 break;
2458 default:
2459 UNREACHABLE();
2460 }
2461 }
2462
2463
2464 void TypeRecordingBinaryOpStub::GenerateVFPOperation(
2465 MacroAssembler* masm) {
2466 switch (op_) {
2467 case Token::ADD:
2468 __ vadd(d5, d6, d7);
2469 break;
2470 case Token::SUB:
2471 __ vsub(d5, d6, d7);
2472 break;
2473 default:
2474 UNREACHABLE();
2475 }
2476 }
2477
2478
2479 // Generate the smi code. If the operation on smis are successful this return is
2480 // generated. If the result is not a smi and heap number allocation is not
2481 // requested the code falls through. If number allocation is requested but a
2482 // heap number cannot be allocated the code jumps to the lable gc_required.
2483 void TypeRecordingBinaryOpStub::GenerateSmiCode(MacroAssembler* masm,
2484 Label* gc_required,
2485 SmiCodeGenerateHeapNumberResults allow_heapnumber_results) {
2486 Label not_smis;
2487
2488 ASSERT(op_ == Token::ADD || op_ == Token::SUB);
2489
2490 Register left = r1;
2491 Register right = r0;
2492 Register scratch1 = r7;
2493 Register scratch2 = r9;
2494
2495 // Perform combined smi check on both operands.
2496 __ orr(scratch1, left, Operand(right));
2497 STATIC_ASSERT(kSmiTag == 0);
2498 __ tst(scratch1, Operand(kSmiTagMask));
2499 __ b(ne, &not_smis);
2500
2501 GenerateOptimisticSmiOperation(masm);
2502
2503 // If heap number results are possible generate the result in an allocated
2504 // heap number.
2505 if (allow_heapnumber_results == ALLOW_HEAPNUMBER_RESULTS) {
2506 FloatingPointHelper::Destination destination =
2507 Isolate::Current()->cpu_features()->IsSupported(VFP3) &&
2508 Token::MOD != op_ ?
2509 FloatingPointHelper::kVFPRegisters :
2510 FloatingPointHelper::kCoreRegisters;
2511
2512 Register heap_number_map = r6;
2513 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2514
2515 // Allocate new heap number for result.
2516 Register heap_number = r5;
2517 __ AllocateHeapNumber(
2518 heap_number, scratch1, scratch2, heap_number_map, gc_required);
2519
2520 // Load the smis.
2521 FloatingPointHelper::LoadSmis(masm, destination, scratch1, scratch2);
2522
2523 // Calculate the result.
2524 if (destination == FloatingPointHelper::kVFPRegisters) {
2525 // Using VFP registers:
2526 // d6: Left value
2527 // d7: Right value
2528 CpuFeatures::Scope scope(VFP3);
2529 GenerateVFPOperation(masm);
2530
2531 __ sub(r0, heap_number, Operand(kHeapObjectTag));
2532 __ vstr(d5, r0, HeapNumber::kValueOffset);
2533 __ add(r0, r0, Operand(kHeapObjectTag));
2534 __ Ret();
2535 } else {
2536 // Using core registers:
2537 // r0: Left value (least significant part of mantissa).
2538 // r1: Left value (sign, exponent, top of mantissa).
2539 // r2: Right value (least significant part of mantissa).
2540 // r3: Right value (sign, exponent, top of mantissa).
2541
2542 __ push(lr); // For later.
2543 __ PrepareCallCFunction(4, scratch1); // Two doubles are 4 arguments.
2544 // Call C routine that may not cause GC or other trouble. r5 is callee
2545 // save.
2546 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
2547 // Store answer in the overwritable heap number.
2548 #if !defined(USE_ARM_EABI)
2549 // Double returned in fp coprocessor register 0 and 1, encoded as
2550 // register cr8. Offsets must be divisible by 4 for coprocessor so we
2551 // need to substract the tag from r5.
2552 __ sub(scratch1, heap_number, Operand(kHeapObjectTag));
2553 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
2554 #else
2555 // Double returned in registers 0 and 1.
2556 __ Strd(r0, r1, FieldMemOperand(heap_number, HeapNumber::kValueOffset));
2557 #endif
2558 __ mov(r0, Operand(heap_number));
2559 // And we are done.
2560 __ pop(pc);
2561 }
2562 }
2563 __ bind(&not_smis);
2564 }
2565
2566
2567 void TypeRecordingBinaryOpStub::GenerateSmiStub(MacroAssembler* masm) {
2568 Label not_smis, call_runtime;
2569
2570 ASSERT(op_ == Token::ADD || op_ == Token::SUB);
2571
2572 if (result_type_ == TRBinaryOpIC::UNINITIALIZED ||
2573 result_type_ == TRBinaryOpIC::SMI) {
2574 // Only allow smi results.
2575 GenerateSmiCode(masm, NULL, NO_HEAPNUMBER_RESULTS);
2576 } else {
2577 // Allow heap number result and don't make a transition if a heap number
2578 // cannot be allocated.
2579 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2580 }
2581
2582 // Code falls through if the result is not returned as either a smi or heap
2583 // number.
2584 GenerateTypeTransition(masm);
2585
2586 __ bind(&call_runtime);
2587 GenerateCallRuntime(masm);
2588 }
2589
2590
2591 void TypeRecordingBinaryOpStub::GenerateStringStub(MacroAssembler* masm) {
2592 ASSERT(operands_type_ == TRBinaryOpIC::STRING);
2593 ASSERT(op_ == Token::ADD);
2594 // Try to add arguments as strings, otherwise, transition to the generic
2595 // TRBinaryOpIC type.
2596 GenerateAddStrings(masm);
2597 GenerateTypeTransition(masm);
2598 }
2599
2600
2601 void TypeRecordingBinaryOpStub::GenerateInt32Stub(MacroAssembler* masm) {
2602 ASSERT(op_ == Token::ADD || op_ == Token::SUB);
2603
2604 ASSERT(operands_type_ == TRBinaryOpIC::INT32);
2605
2606 GenerateTypeTransition(masm);
2607 }
2608
2609
2610 void TypeRecordingBinaryOpStub::GenerateHeapNumberStub(MacroAssembler* masm) {
2611 ASSERT(op_ == Token::ADD || op_ == Token::SUB);
2612
2613 Register scratch1 = r7;
2614 Register scratch2 = r9;
2615
2616 Label not_number, call_runtime;
2617 ASSERT(operands_type_ == TRBinaryOpIC::HEAP_NUMBER);
2618
2619 Register heap_number_map = r6;
2620 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2621
2622 // Load left and right operands into d6 and d7 or r0/r1 and r2/r3 depending on
2623 // whether VFP3 is available.
2624 FloatingPointHelper::Destination destination =
2625 Isolate::Current()->cpu_features()->IsSupported(VFP3) ?
2626 FloatingPointHelper::kVFPRegisters :
2627 FloatingPointHelper::kCoreRegisters;
2628 FloatingPointHelper::LoadOperands(masm,
2629 destination,
2630 heap_number_map,
2631 scratch1,
2632 scratch2,
2633 &not_number);
2634 if (destination == FloatingPointHelper::kVFPRegisters) {
2635 // Use floating point instructions for the binary operation.
2636 CpuFeatures::Scope scope(VFP3);
2637 GenerateVFPOperation(masm);
2638
2639 // Get a heap number object for the result - might be left or right if one
2640 // of these are overwritable.
2641 GenerateHeapResultAllocation(
2642 masm, r4, heap_number_map, scratch1, scratch2, &call_runtime);
2643
2644 // Fill the result into the allocated heap number and return.
2645 __ sub(r0, r4, Operand(kHeapObjectTag));
2646 __ vstr(d5, r0, HeapNumber::kValueOffset);
2647 __ add(r0, r0, Operand(kHeapObjectTag));
2648 __ Ret();
2649
2650 } else {
2651 // Call a C function for the binary operation.
2652 // r0/r1: Left operand
2653 // r2/r3: Right operand
2654
2655 // Get a heap number object for the result - might be left or right if one
2656 // of these are overwritable. Uses a callee-save register to keep the value
2657 // across the c call.
2658 GenerateHeapResultAllocation(
2659 masm, r4, heap_number_map, scratch1, scratch2, &call_runtime);
2660
2661 __ push(lr); // For returning later (no GC after this point).
2662 __ PrepareCallCFunction(4, scratch1); // Two doubles count as 4 arguments.
2663 // Call C routine that may not cause GC or other trouble. r4 is callee
2664 // saved.
2665 __ CallCFunction(ExternalReference::double_fp_operation(op_), 4);
2666
2667 // Fill the result into the allocated heap number.
2668 #if !defined(USE_ARM_EABI)
2669 // Double returned in fp coprocessor register 0 and 1, encoded as
2670 // register cr8. Offsets must be divisible by 4 for coprocessor so we
2671 // need to substract the tag from r5.
2672 __ sub(scratch1, r4, Operand(kHeapObjectTag));
2673 __ stc(p1, cr8, MemOperand(scratch1, HeapNumber::kValueOffset));
2674 #else
2675 // Double returned in registers 0 and 1.
2676 __ Strd(r0, r1, FieldMemOperand(r4, HeapNumber::kValueOffset));
2677 #endif
2678 __ mov(r0, Operand(r4));
2679 __ pop(pc); // Return to the pushed lr.
2680 }
2681
2682 __ bind(&not_number);
2683 GenerateTypeTransition(masm);
2684
2685 __ bind(&call_runtime);
2686 GenerateCallRuntime(masm);
2687 }
2688
2689
2690 void TypeRecordingBinaryOpStub::GenerateGeneric(MacroAssembler* masm) {
2691 ASSERT(op_ == Token::ADD || op_ == Token::SUB);
2692
2693 Label call_runtime;
2694
2695 GenerateSmiCode(masm, &call_runtime, ALLOW_HEAPNUMBER_RESULTS);
2696
2697 // If all else fails, use the runtime system to get the correct
2698 // result.
2699 __ bind(&call_runtime);
2700
2701 // Try to add strings before calling runtime.
2702 if (op_ == Token::ADD) {
2703 GenerateAddStrings(masm);
2704 }
2705
2706 GenericBinaryOpStub stub(op_, mode_, r1, r0);
2707 __ TailCallStub(&stub);
2708 }
2709
2710
2711 void TypeRecordingBinaryOpStub::GenerateAddStrings(MacroAssembler* masm) {
2712 ASSERT(op_ == Token::ADD);
2713
2714 Register left = r1;
2715 Register right = r0;
2716 Label call_runtime;
2717
2718 // Check if first argument is a string.
2719 __ JumpIfSmi(left, &call_runtime);
2720 __ CompareObjectType(left, r2, r2, FIRST_NONSTRING_TYPE);
2721 __ b(ge, &call_runtime);
2722
2723 // First argument is a a string, test second.
2724 __ JumpIfSmi(right, &call_runtime);
2725 __ CompareObjectType(right, r2, r2, FIRST_NONSTRING_TYPE);
2726 __ b(ge, &call_runtime);
2727
2728 // First and second argument are strings.
2729 StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB);
2730 GenerateRegisterArgsPush(masm);
2731 __ TailCallStub(&string_add_stub);
2732
2733 // At least one argument is not a string.
2734 __ bind(&call_runtime);
2735 }
2736
2737
2738 void TypeRecordingBinaryOpStub::GenerateCallRuntime(MacroAssembler* masm) {
2739 GenerateRegisterArgsPush(masm);
2740 switch (op_) {
2741 case Token::ADD:
2742 __ InvokeBuiltin(Builtins::ADD, JUMP_JS);
2743 break;
2744 case Token::SUB:
2745 __ InvokeBuiltin(Builtins::SUB, JUMP_JS);
2746 break;
2747 default:
2748 UNREACHABLE();
2749 }
2750 }
2751
2752
2753 void TypeRecordingBinaryOpStub::GenerateHeapResultAllocation(
2754 MacroAssembler* masm,
2755 Register result,
2756 Register heap_number_map,
2757 Register scratch1,
2758 Register scratch2,
2759 Label* gc_required) {
2760
2761 // Code below will scratch result if allocation fails. To keep both arguments
2762 // intact for the runtime call result cannot be one of these.
2763 ASSERT(!result.is(r0) && !result.is(r1));
2764
2765 if (mode_ == OVERWRITE_LEFT || mode_ == OVERWRITE_RIGHT) {
2766 Label skip_allocation, allocated;
2767 Register overwritable_operand = mode_ == OVERWRITE_LEFT ? r1 : r0;
2768 // If the overwritable operand is already an object, we skip the
2769 // allocation of a heap number.
2770 __ JumpIfNotSmi(overwritable_operand, &skip_allocation);
2771 // Allocate a heap number for the result.
2772 __ AllocateHeapNumber(
2773 result, scratch1, scratch2, heap_number_map, gc_required);
2774 __ b(&allocated);
2775 __ bind(&skip_allocation);
2776 // Use object holding the overwritable operand for result.
2777 __ mov(result, Operand(overwritable_operand));
2778 __ bind(&allocated);
2779 } else {
2780 ASSERT(mode_ == NO_OVERWRITE);
2781 __ AllocateHeapNumber(
2782 result, scratch1, scratch2, heap_number_map, gc_required);
2783 }
2784 }
2785
2786
2787 void TypeRecordingBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
2788 __ Push(r1, r0);
2214 } 2789 }
2215 2790
2216 2791
2217 void TranscendentalCacheStub::Generate(MacroAssembler* masm) { 2792 void TranscendentalCacheStub::Generate(MacroAssembler* masm) {
2218 // Argument is a number and is on stack and in r0. 2793 // Argument is a number and is on stack and in r0.
2219 Label runtime_call; 2794 Label runtime_call;
2220 Label input_not_smi; 2795 Label input_not_smi;
2221 Label loaded; 2796 Label loaded;
2222 2797
2223 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) { 2798 if (Isolate::Current()->cpu_features()->IsSupported(VFP3)) {
2224 // Load argument and check if it is a smi. 2799 // Load argument and check if it is a smi.
2225 __ BranchOnNotSmi(r0, &input_not_smi); 2800 __ JumpIfNotSmi(r0, &input_not_smi);
2226 2801
2227 CpuFeatures::Scope scope(VFP3); 2802 CpuFeatures::Scope scope(VFP3);
2228 // Input is a smi. Convert to double and load the low and high words 2803 // Input is a smi. Convert to double and load the low and high words
2229 // of the double into r2, r3. 2804 // of the double into r2, r3.
2230 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2); 2805 __ IntegerToDoubleConversionWithVFP3(r0, r3, r2);
2231 __ b(&loaded); 2806 __ b(&loaded);
2232 2807
2233 __ bind(&input_not_smi); 2808 __ bind(&input_not_smi);
2234 // Check if input is a HeapNumber. 2809 // Check if input is a HeapNumber.
2235 __ CheckMap(r0, 2810 __ CheckMap(r0,
(...skipping 134 matching lines...) Expand 10 before | Expand all | Expand 10 after
2370 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); 2945 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset));
2371 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); 2946 __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset));
2372 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset)); 2947 __ str(r3, FieldMemOperand(r1, HeapNumber::kMantissaOffset));
2373 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign. 2948 __ eor(r2, r2, Operand(HeapNumber::kSignMask)); // Flip sign.
2374 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset)); 2949 __ str(r2, FieldMemOperand(r1, HeapNumber::kExponentOffset));
2375 __ mov(r0, Operand(r1)); 2950 __ mov(r0, Operand(r1));
2376 } 2951 }
2377 } else if (op_ == Token::BIT_NOT) { 2952 } else if (op_ == Token::BIT_NOT) {
2378 if (include_smi_code_) { 2953 if (include_smi_code_) {
2379 Label non_smi; 2954 Label non_smi;
2380 __ BranchOnNotSmi(r0, &non_smi); 2955 __ JumpIfNotSmi(r0, &non_smi);
2381 __ mvn(r0, Operand(r0)); 2956 __ mvn(r0, Operand(r0));
2382 // Bit-clear inverted smi-tag. 2957 // Bit-clear inverted smi-tag.
2383 __ bic(r0, r0, Operand(kSmiTagMask)); 2958 __ bic(r0, r0, Operand(kSmiTagMask));
2384 __ Ret(); 2959 __ Ret();
2385 __ bind(&non_smi); 2960 __ bind(&non_smi);
2386 } else if (FLAG_debug_code) { 2961 } else if (FLAG_debug_code) {
2387 __ tst(r0, Operand(kSmiTagMask)); 2962 __ tst(r0, Operand(kSmiTagMask));
2388 __ Assert(ne, "Unexpected smi operand."); 2963 __ Assert(ne, "Unexpected smi operand.");
2389 } 2964 }
2390 2965
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
2556 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize); 3131 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
2557 __ pop(pc); 3132 __ pop(pc);
2558 } 3133 }
2559 3134
2560 3135
2561 void CEntryStub::GenerateCore(MacroAssembler* masm, 3136 void CEntryStub::GenerateCore(MacroAssembler* masm,
2562 Label* throw_normal_exception, 3137 Label* throw_normal_exception,
2563 Label* throw_termination_exception, 3138 Label* throw_termination_exception,
2564 Label* throw_out_of_memory_exception, 3139 Label* throw_out_of_memory_exception,
2565 bool do_gc, 3140 bool do_gc,
2566 bool always_allocate, 3141 bool always_allocate) {
2567 int frame_alignment_skew) {
2568 // r0: result parameter for PerformGC, if any 3142 // r0: result parameter for PerformGC, if any
2569 // r4: number of arguments including receiver (C callee-saved) 3143 // r4: number of arguments including receiver (C callee-saved)
2570 // r5: pointer to builtin function (C callee-saved) 3144 // r5: pointer to builtin function (C callee-saved)
2571 // r6: pointer to the first argument (C callee-saved) 3145 // r6: pointer to the first argument (C callee-saved)
2572 3146
2573 if (do_gc) { 3147 if (do_gc) {
2574 // Passing r0. 3148 // Passing r0.
2575 __ PrepareCallCFunction(1, r1); 3149 __ PrepareCallCFunction(1, r1);
2576 __ CallCFunction(ExternalReference::perform_gc_function(), 1); 3150 __ CallCFunction(ExternalReference::perform_gc_function(), 1);
2577 } 3151 }
2578 3152
2579 ExternalReference scope_depth = 3153 ExternalReference scope_depth =
2580 ExternalReference::heap_always_allocate_scope_depth(); 3154 ExternalReference::heap_always_allocate_scope_depth();
2581 if (always_allocate) { 3155 if (always_allocate) {
2582 __ mov(r0, Operand(scope_depth)); 3156 __ mov(r0, Operand(scope_depth));
2583 __ ldr(r1, MemOperand(r0)); 3157 __ ldr(r1, MemOperand(r0));
2584 __ add(r1, r1, Operand(1)); 3158 __ add(r1, r1, Operand(1));
2585 __ str(r1, MemOperand(r0)); 3159 __ str(r1, MemOperand(r0));
2586 } 3160 }
2587 3161
2588 // Call C built-in. 3162 // Call C built-in.
2589 // r0 = argc, r1 = argv 3163 // r0 = argc, r1 = argv
2590 __ mov(r0, Operand(r4)); 3164 __ mov(r0, Operand(r4));
2591 __ mov(r1, Operand(r6)); 3165 __ mov(r1, Operand(r6));
2592 3166
3167 #if defined(V8_HOST_ARCH_ARM)
2593 int frame_alignment = MacroAssembler::ActivationFrameAlignment(); 3168 int frame_alignment = MacroAssembler::ActivationFrameAlignment();
2594 int frame_alignment_mask = frame_alignment - 1; 3169 int frame_alignment_mask = frame_alignment - 1;
2595 #if defined(V8_HOST_ARCH_ARM)
2596 if (FLAG_debug_code) { 3170 if (FLAG_debug_code) {
2597 if (frame_alignment > kPointerSize) { 3171 if (frame_alignment > kPointerSize) {
2598 Label alignment_as_expected; 3172 Label alignment_as_expected;
2599 ASSERT(IsPowerOf2(frame_alignment)); 3173 ASSERT(IsPowerOf2(frame_alignment));
2600 __ sub(r2, sp, Operand(frame_alignment_skew)); 3174 __ tst(sp, Operand(frame_alignment_mask));
2601 __ tst(r2, Operand(frame_alignment_mask));
2602 __ b(eq, &alignment_as_expected); 3175 __ b(eq, &alignment_as_expected);
2603 // Don't use Check here, as it will call Runtime_Abort re-entering here. 3176 // Don't use Check here, as it will call Runtime_Abort re-entering here.
2604 __ stop("Unexpected alignment"); 3177 __ stop("Unexpected alignment");
2605 __ bind(&alignment_as_expected); 3178 __ bind(&alignment_as_expected);
2606 } 3179 }
2607 } 3180 }
2608 #endif 3181 #endif
2609 3182
2610 // Just before the call (jump) below lr is pushed, so the actual alignment is
2611 // adding one to the current skew.
2612 int alignment_before_call =
2613 (frame_alignment_skew + kPointerSize) & frame_alignment_mask;
2614 if (alignment_before_call > 0) {
2615 // Push until the alignment before the call is met.
2616 __ mov(r2, Operand(0, RelocInfo::NONE));
2617 for (int i = alignment_before_call;
2618 (i & frame_alignment_mask) != 0;
2619 i += kPointerSize) {
2620 __ push(r2);
2621 }
2622 }
2623
2624 __ mov(r2, Operand(ExternalReference::isolate_address())); 3183 __ mov(r2, Operand(ExternalReference::isolate_address()));
2625 3184
2626 3185
2627 // TODO(1242173): To let the GC traverse the return address of the exit 3186 // TODO(1242173): To let the GC traverse the return address of the exit
2628 // frames, we need to know where the return address is. Right now, 3187 // frames, we need to know where the return address is. Right now,
2629 // we push it on the stack to be able to find it again, but we never 3188 // we store it on the stack to be able to find it again, but we never
2630 // restore from it in case of changes, which makes it impossible to 3189 // restore from it in case of changes, which makes it impossible to
2631 // support moving the C entry code stub. This should be fixed, but currently 3190 // support moving the C entry code stub. This should be fixed, but currently
2632 // this is OK because the CEntryStub gets generated so early in the V8 boot 3191 // this is OK because the CEntryStub gets generated so early in the V8 boot
2633 // sequence that it is not moving ever. 3192 // sequence that it is not moving ever.
2634 masm->add(lr, pc, Operand(4)); // Compute return address: (pc + 8) + 4 3193
2635 masm->push(lr); 3194 // Compute the return address in lr to return to after the jump below. Pc is
3195 // already at '+ 8' from the current instruction but return is after three
3196 // instructions so add another 4 to pc to get the return address.
3197 masm->add(lr, pc, Operand(4));
3198 __ str(lr, MemOperand(sp, 0));
2636 masm->Jump(r5); 3199 masm->Jump(r5);
2637 3200
2638 // Restore sp back to before aligning the stack.
2639 if (alignment_before_call > 0) {
2640 __ add(sp, sp, Operand(alignment_before_call));
2641 }
2642
2643 if (always_allocate) { 3201 if (always_allocate) {
2644 // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1 3202 // It's okay to clobber r2 and r3 here. Don't mess with r0 and r1
2645 // though (contain the result). 3203 // though (contain the result).
2646 __ mov(r2, Operand(scope_depth)); 3204 __ mov(r2, Operand(scope_depth));
2647 __ ldr(r3, MemOperand(r2)); 3205 __ ldr(r3, MemOperand(r2));
2648 __ sub(r3, r3, Operand(1)); 3206 __ sub(r3, r3, Operand(1));
2649 __ str(r3, MemOperand(r2)); 3207 __ str(r3, MemOperand(r2));
2650 } 3208 }
2651 3209
2652 // check for failure result 3210 // check for failure result
(...skipping 66 matching lines...) Expand 10 before | Expand all | Expand 10 after
2719 Label throw_normal_exception; 3277 Label throw_normal_exception;
2720 Label throw_termination_exception; 3278 Label throw_termination_exception;
2721 Label throw_out_of_memory_exception; 3279 Label throw_out_of_memory_exception;
2722 3280
2723 // Call into the runtime system. 3281 // Call into the runtime system.
2724 GenerateCore(masm, 3282 GenerateCore(masm,
2725 &throw_normal_exception, 3283 &throw_normal_exception,
2726 &throw_termination_exception, 3284 &throw_termination_exception,
2727 &throw_out_of_memory_exception, 3285 &throw_out_of_memory_exception,
2728 false, 3286 false,
2729 false, 3287 false);
2730 -kPointerSize);
2731 3288
2732 // Do space-specific GC and retry runtime call. 3289 // Do space-specific GC and retry runtime call.
2733 GenerateCore(masm, 3290 GenerateCore(masm,
2734 &throw_normal_exception, 3291 &throw_normal_exception,
2735 &throw_termination_exception, 3292 &throw_termination_exception,
2736 &throw_out_of_memory_exception, 3293 &throw_out_of_memory_exception,
2737 true, 3294 true,
2738 false, 3295 false);
2739 0);
2740 3296
2741 // Do full GC and retry runtime call one final time. 3297 // Do full GC and retry runtime call one final time.
2742 Failure* failure = Failure::InternalError(); 3298 Failure* failure = Failure::InternalError();
2743 __ mov(r0, Operand(reinterpret_cast<int32_t>(failure))); 3299 __ mov(r0, Operand(reinterpret_cast<int32_t>(failure)));
2744 GenerateCore(masm, 3300 GenerateCore(masm,
2745 &throw_normal_exception, 3301 &throw_normal_exception,
2746 &throw_termination_exception, 3302 &throw_termination_exception,
2747 &throw_out_of_memory_exception, 3303 &throw_out_of_memory_exception,
2748 true, 3304 true,
2749 true, 3305 true);
2750 kPointerSize);
2751 3306
2752 __ bind(&throw_out_of_memory_exception); 3307 __ bind(&throw_out_of_memory_exception);
2753 GenerateThrowUncatchable(masm, OUT_OF_MEMORY); 3308 GenerateThrowUncatchable(masm, OUT_OF_MEMORY);
2754 3309
2755 __ bind(&throw_termination_exception); 3310 __ bind(&throw_termination_exception);
2756 GenerateThrowUncatchable(masm, TERMINATION); 3311 GenerateThrowUncatchable(masm, TERMINATION);
2757 3312
2758 __ bind(&throw_normal_exception); 3313 __ bind(&throw_normal_exception);
2759 GenerateThrowTOS(masm); 3314 GenerateThrowTOS(masm);
2760 } 3315 }
(...skipping 130 matching lines...) Expand 10 before | Expand all | Expand 10 after
2891 // Restore callee-saved registers and return. 3446 // Restore callee-saved registers and return.
2892 #ifdef DEBUG 3447 #ifdef DEBUG
2893 if (FLAG_debug_code) { 3448 if (FLAG_debug_code) {
2894 __ mov(lr, Operand(pc)); 3449 __ mov(lr, Operand(pc));
2895 } 3450 }
2896 #endif 3451 #endif
2897 __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); 3452 __ ldm(ia_w, sp, kCalleeSaved | pc.bit());
2898 } 3453 }
2899 3454
2900 3455
2901 // Uses registers r0 to r4. Expected input is 3456 // Uses registers r0 to r4.
2902 // object in r0 (or at sp+1*kPointerSize) and function in 3457 // Expected input (depending on whether args are in registers or on the stack):
2903 // r1 (or at sp), depending on whether or not 3458 // * object: r0 or at sp + 1 * kPointerSize.
2904 // args_in_registers() is true. 3459 // * function: r1 or at sp.
3460 //
3461 // An inlined call site may have been generated before calling this stub.
3462 // In this case the offset to the inline site to patch is passed on the stack,
3463 // in the safepoint slot for register r4.
3464 // (See LCodeGen::DoInstanceOfKnownGlobal)
2905 void InstanceofStub::Generate(MacroAssembler* masm) { 3465 void InstanceofStub::Generate(MacroAssembler* masm) {
3466 // Call site inlining and patching implies arguments in registers.
3467 ASSERT(HasArgsInRegisters() || !HasCallSiteInlineCheck());
3468 // ReturnTrueFalse is only implemented for inlined call sites.
3469 ASSERT(!ReturnTrueFalseObject() || HasCallSiteInlineCheck());
3470
2906 // Fixed register usage throughout the stub: 3471 // Fixed register usage throughout the stub:
2907 const Register object = r0; // Object (lhs). 3472 const Register object = r0; // Object (lhs).
2908 const Register map = r3; // Map of the object. 3473 Register map = r3; // Map of the object.
2909 const Register function = r1; // Function (rhs). 3474 const Register function = r1; // Function (rhs).
2910 const Register prototype = r4; // Prototype of the function. 3475 const Register prototype = r4; // Prototype of the function.
3476 const Register inline_site = r9;
2911 const Register scratch = r2; 3477 const Register scratch = r2;
3478
3479 const int32_t kDeltaToLoadBoolResult = 3 * kPointerSize;
3480
2912 Label slow, loop, is_instance, is_not_instance, not_js_object; 3481 Label slow, loop, is_instance, is_not_instance, not_js_object;
3482
2913 if (!HasArgsInRegisters()) { 3483 if (!HasArgsInRegisters()) {
2914 __ ldr(object, MemOperand(sp, 1 * kPointerSize)); 3484 __ ldr(object, MemOperand(sp, 1 * kPointerSize));
2915 __ ldr(function, MemOperand(sp, 0)); 3485 __ ldr(function, MemOperand(sp, 0));
2916 } 3486 }
2917 3487
2918 // Check that the left hand is a JS object and load map. 3488 // Check that the left hand is a JS object and load map.
2919 __ BranchOnSmi(object, &not_js_object); 3489 __ JumpIfSmi(object, &not_js_object);
2920 __ IsObjectJSObjectType(object, map, scratch, &not_js_object); 3490 __ IsObjectJSObjectType(object, map, scratch, &not_js_object);
2921 3491
2922 // Look up the function and the map in the instanceof cache. 3492 // If there is a call site cache don't look in the global cache, but do the
2923 Label miss; 3493 // real lookup and update the call site cache.
2924 __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex); 3494 if (!HasCallSiteInlineCheck()) {
2925 __ cmp(function, ip); 3495 Label miss;
2926 __ b(ne, &miss); 3496 __ LoadRoot(ip, Heap::kInstanceofCacheFunctionRootIndex);
2927 __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex); 3497 __ cmp(function, ip);
2928 __ cmp(map, ip); 3498 __ b(ne, &miss);
2929 __ b(ne, &miss); 3499 __ LoadRoot(ip, Heap::kInstanceofCacheMapRootIndex);
2930 __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); 3500 __ cmp(map, ip);
2931 __ Ret(HasArgsInRegisters() ? 0 : 2); 3501 __ b(ne, &miss);
3502 __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
3503 __ Ret(HasArgsInRegisters() ? 0 : 2);
2932 3504
2933 __ bind(&miss); 3505 __ bind(&miss);
3506 }
3507
3508 // Get the prototype of the function.
2934 __ TryGetFunctionPrototype(function, prototype, scratch, &slow); 3509 __ TryGetFunctionPrototype(function, prototype, scratch, &slow);
2935 3510
2936 // Check that the function prototype is a JS object. 3511 // Check that the function prototype is a JS object.
2937 __ BranchOnSmi(prototype, &slow); 3512 __ JumpIfSmi(prototype, &slow);
2938 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow); 3513 __ IsObjectJSObjectType(prototype, scratch, scratch, &slow);
2939 3514
2940 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); 3515 // Update the global instanceof or call site inlined cache with the current
2941 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex); 3516 // map and function. The cached answer will be set when it is known below.
3517 if (!HasCallSiteInlineCheck()) {
3518 __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex);
3519 __ StoreRoot(map, Heap::kInstanceofCacheMapRootIndex);
3520 } else {
3521 ASSERT(HasArgsInRegisters());
3522 // Patch the (relocated) inlined map check.
3523
3524 // The offset was stored in r4 safepoint slot.
3525 // (See LCodeGen::DoDeferredLInstanceOfKnownGlobal)
3526 __ ldr(scratch, MacroAssembler::SafepointRegisterSlot(r4));
3527 __ sub(inline_site, lr, scratch);
3528 // Get the map location in scratch and patch it.
3529 __ GetRelocatedValueLocation(inline_site, scratch);
3530 __ str(map, MemOperand(scratch));
3531 }
2942 3532
2943 // Register mapping: r3 is object map and r4 is function prototype. 3533 // Register mapping: r3 is object map and r4 is function prototype.
2944 // Get prototype of object into r2. 3534 // Get prototype of object into r2.
2945 __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); 3535 __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset));
2946 3536
3537 // We don't need map any more. Use it as a scratch register.
3538 Register scratch2 = map;
3539 map = no_reg;
3540
2947 // Loop through the prototype chain looking for the function prototype. 3541 // Loop through the prototype chain looking for the function prototype.
3542 __ LoadRoot(scratch2, Heap::kNullValueRootIndex);
2948 __ bind(&loop); 3543 __ bind(&loop);
2949 __ cmp(scratch, Operand(prototype)); 3544 __ cmp(scratch, Operand(prototype));
2950 __ b(eq, &is_instance); 3545 __ b(eq, &is_instance);
2951 __ LoadRoot(ip, Heap::kNullValueRootIndex); 3546 __ cmp(scratch, scratch2);
2952 __ cmp(scratch, ip);
2953 __ b(eq, &is_not_instance); 3547 __ b(eq, &is_not_instance);
2954 __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); 3548 __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset));
2955 __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); 3549 __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset));
2956 __ jmp(&loop); 3550 __ jmp(&loop);
2957 3551
2958 __ bind(&is_instance); 3552 __ bind(&is_instance);
2959 __ mov(r0, Operand(Smi::FromInt(0))); 3553 if (!HasCallSiteInlineCheck()) {
2960 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); 3554 __ mov(r0, Operand(Smi::FromInt(0)));
3555 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
3556 } else {
3557 // Patch the call site to return true.
3558 __ LoadRoot(r0, Heap::kTrueValueRootIndex);
3559 __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
3560 // Get the boolean result location in scratch and patch it.
3561 __ GetRelocatedValueLocation(inline_site, scratch);
3562 __ str(r0, MemOperand(scratch));
3563
3564 if (!ReturnTrueFalseObject()) {
3565 __ mov(r0, Operand(Smi::FromInt(0)));
3566 }
3567 }
2961 __ Ret(HasArgsInRegisters() ? 0 : 2); 3568 __ Ret(HasArgsInRegisters() ? 0 : 2);
2962 3569
2963 __ bind(&is_not_instance); 3570 __ bind(&is_not_instance);
2964 __ mov(r0, Operand(Smi::FromInt(1))); 3571 if (!HasCallSiteInlineCheck()) {
2965 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); 3572 __ mov(r0, Operand(Smi::FromInt(1)));
3573 __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex);
3574 } else {
3575 // Patch the call site to return false.
3576 __ LoadRoot(r0, Heap::kFalseValueRootIndex);
3577 __ add(inline_site, inline_site, Operand(kDeltaToLoadBoolResult));
3578 // Get the boolean result location in scratch and patch it.
3579 __ GetRelocatedValueLocation(inline_site, scratch);
3580 __ str(r0, MemOperand(scratch));
3581
3582 if (!ReturnTrueFalseObject()) {
3583 __ mov(r0, Operand(Smi::FromInt(1)));
3584 }
3585 }
2966 __ Ret(HasArgsInRegisters() ? 0 : 2); 3586 __ Ret(HasArgsInRegisters() ? 0 : 2);
2967 3587
2968 Label object_not_null, object_not_null_or_smi; 3588 Label object_not_null, object_not_null_or_smi;
2969 __ bind(&not_js_object); 3589 __ bind(&not_js_object);
2970 // Before null, smi and string value checks, check that the rhs is a function 3590 // Before null, smi and string value checks, check that the rhs is a function
2971 // as for a non-function rhs an exception needs to be thrown. 3591 // as for a non-function rhs an exception needs to be thrown.
2972 __ BranchOnSmi(function, &slow); 3592 __ JumpIfSmi(function, &slow);
2973 __ CompareObjectType(function, map, scratch, JS_FUNCTION_TYPE); 3593 __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE);
2974 __ b(ne, &slow); 3594 __ b(ne, &slow);
2975 3595
2976 // Null is not instance of anything. 3596 // Null is not instance of anything.
2977 __ cmp(scratch, Operand(FACTORY->null_value())); 3597 __ cmp(scratch, Operand(FACTORY->null_value()));
2978 __ b(ne, &object_not_null); 3598 __ b(ne, &object_not_null);
2979 __ mov(r0, Operand(Smi::FromInt(1))); 3599 __ mov(r0, Operand(Smi::FromInt(1)));
2980 __ Ret(HasArgsInRegisters() ? 0 : 2); 3600 __ Ret(HasArgsInRegisters() ? 0 : 2);
2981 3601
2982 __ bind(&object_not_null); 3602 __ bind(&object_not_null);
2983 // Smi values are not instances of anything. 3603 // Smi values are not instances of anything.
2984 __ BranchOnNotSmi(object, &object_not_null_or_smi); 3604 __ JumpIfNotSmi(object, &object_not_null_or_smi);
2985 __ mov(r0, Operand(Smi::FromInt(1))); 3605 __ mov(r0, Operand(Smi::FromInt(1)));
2986 __ Ret(HasArgsInRegisters() ? 0 : 2); 3606 __ Ret(HasArgsInRegisters() ? 0 : 2);
2987 3607
2988 __ bind(&object_not_null_or_smi); 3608 __ bind(&object_not_null_or_smi);
2989 // String values are not instances of anything. 3609 // String values are not instances of anything.
2990 __ IsObjectJSStringType(object, scratch, &slow); 3610 __ IsObjectJSStringType(object, scratch, &slow);
2991 __ mov(r0, Operand(Smi::FromInt(1))); 3611 __ mov(r0, Operand(Smi::FromInt(1)));
2992 __ Ret(HasArgsInRegisters() ? 0 : 2); 3612 __ Ret(HasArgsInRegisters() ? 0 : 2);
2993 3613
2994 // Slow-case. Tail call builtin. 3614 // Slow-case. Tail call builtin.
2995 __ bind(&slow); 3615 __ bind(&slow);
2996 if (HasArgsInRegisters()) { 3616 if (!ReturnTrueFalseObject()) {
3617 if (HasArgsInRegisters()) {
3618 __ Push(r0, r1);
3619 }
3620 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
3621 } else {
3622 __ EnterInternalFrame();
2997 __ Push(r0, r1); 3623 __ Push(r0, r1);
3624 __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_JS);
3625 __ LeaveInternalFrame();
3626 __ cmp(r0, Operand(0));
3627 __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq);
3628 __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne);
3629 __ Ret(HasArgsInRegisters() ? 0 : 2);
2998 } 3630 }
2999 __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_JS);
3000 } 3631 }
3001 3632
3002 3633
3634 Register InstanceofStub::left() { return r0; }
3635
3636
3637 Register InstanceofStub::right() { return r1; }
3638
3639
3003 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { 3640 void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) {
3004 // The displacement is the offset of the last parameter (if any) 3641 // The displacement is the offset of the last parameter (if any)
3005 // relative to the frame pointer. 3642 // relative to the frame pointer.
3006 static const int kDisplacement = 3643 static const int kDisplacement =
3007 StandardFrameConstants::kCallerSPOffset - kPointerSize; 3644 StandardFrameConstants::kCallerSPOffset - kPointerSize;
3008 3645
3009 // Check that the key is a smi. 3646 // Check that the key is a smi.
3010 Label slow; 3647 Label slow;
3011 __ BranchOnNotSmi(r1, &slow); 3648 __ JumpIfNotSmi(r1, &slow);
3012 3649
3013 // Check if the calling frame is an arguments adaptor frame. 3650 // Check if the calling frame is an arguments adaptor frame.
3014 Label adaptor; 3651 Label adaptor;
3015 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); 3652 __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
3016 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); 3653 __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset));
3017 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); 3654 __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR)));
3018 __ b(eq, &adaptor); 3655 __ b(eq, &adaptor);
3019 3656
3020 // Check index against formal parameters count limit passed in 3657 // Check index against formal parameters count limit passed in
3021 // through register r0. Use unsigned comparison to get negative 3658 // through register r0. Use unsigned comparison to get negative
(...skipping 183 matching lines...) Expand 10 before | Expand all | Expand 10 after
3205 STATIC_ASSERT(kSmiTag == 0); 3842 STATIC_ASSERT(kSmiTag == 0);
3206 __ tst(r0, Operand(kSmiTagMask)); 3843 __ tst(r0, Operand(kSmiTagMask));
3207 __ b(eq, &runtime); 3844 __ b(eq, &runtime);
3208 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); 3845 __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE);
3209 __ b(ne, &runtime); 3846 __ b(ne, &runtime);
3210 3847
3211 // Check that the RegExp has been compiled (data contains a fixed array). 3848 // Check that the RegExp has been compiled (data contains a fixed array).
3212 __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); 3849 __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset));
3213 if (FLAG_debug_code) { 3850 if (FLAG_debug_code) {
3214 __ tst(regexp_data, Operand(kSmiTagMask)); 3851 __ tst(regexp_data, Operand(kSmiTagMask));
3215 __ Check(nz, "Unexpected type for RegExp data, FixedArray expected"); 3852 __ Check(ne, "Unexpected type for RegExp data, FixedArray expected");
3216 __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE); 3853 __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE);
3217 __ Check(eq, "Unexpected type for RegExp data, FixedArray expected"); 3854 __ Check(eq, "Unexpected type for RegExp data, FixedArray expected");
3218 } 3855 }
3219 3856
3220 // regexp_data: RegExp data (FixedArray) 3857 // regexp_data: RegExp data (FixedArray)
3221 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. 3858 // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP.
3222 __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); 3859 __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset));
3223 __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); 3860 __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP)));
3224 __ b(ne, &runtime); 3861 __ b(ne, &runtime);
3225 3862
(...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after
3308 __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset)); 3945 __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset));
3309 __ LoadRoot(r1, Heap::kEmptyStringRootIndex); 3946 __ LoadRoot(r1, Heap::kEmptyStringRootIndex);
3310 __ cmp(r0, r1); 3947 __ cmp(r0, r1);
3311 __ b(ne, &runtime); 3948 __ b(ne, &runtime);
3312 __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); 3949 __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset));
3313 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); 3950 __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset));
3314 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); 3951 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset));
3315 // Is first part a flat string? 3952 // Is first part a flat string?
3316 STATIC_ASSERT(kSeqStringTag == 0); 3953 STATIC_ASSERT(kSeqStringTag == 0);
3317 __ tst(r0, Operand(kStringRepresentationMask)); 3954 __ tst(r0, Operand(kStringRepresentationMask));
3318 __ b(nz, &runtime); 3955 __ b(ne, &runtime);
3319 3956
3320 __ bind(&seq_string); 3957 __ bind(&seq_string);
3321 // subject: Subject string 3958 // subject: Subject string
3322 // regexp_data: RegExp data (FixedArray) 3959 // regexp_data: RegExp data (FixedArray)
3323 // r0: Instance type of subject string 3960 // r0: Instance type of subject string
3324 STATIC_ASSERT(4 == kAsciiStringTag); 3961 STATIC_ASSERT(4 == kAsciiStringTag);
3325 STATIC_ASSERT(kTwoByteStringTag == 0); 3962 STATIC_ASSERT(kTwoByteStringTag == 0);
3326 // Find the code object based on the assumptions above. 3963 // Find the code object based on the assumptions above.
3327 __ and_(r0, r0, Operand(kStringEncodingMask)); 3964 __ and_(r0, r0, Operand(kStringEncodingMask));
3328 __ mov(r3, Operand(r0, ASR, 2), SetCC); 3965 __ mov(r3, Operand(r0, ASR, 2), SetCC);
(...skipping 254 matching lines...) Expand 10 before | Expand all | Expand 10 after
3583 4220
3584 // If the receiver might be a value (string, number or boolean) check for this 4221 // If the receiver might be a value (string, number or boolean) check for this
3585 // and box it if it is. 4222 // and box it if it is.
3586 if (ReceiverMightBeValue()) { 4223 if (ReceiverMightBeValue()) {
3587 // Get the receiver from the stack. 4224 // Get the receiver from the stack.
3588 // function, receiver [, arguments] 4225 // function, receiver [, arguments]
3589 Label receiver_is_value, receiver_is_js_object; 4226 Label receiver_is_value, receiver_is_js_object;
3590 __ ldr(r1, MemOperand(sp, argc_ * kPointerSize)); 4227 __ ldr(r1, MemOperand(sp, argc_ * kPointerSize));
3591 4228
3592 // Check if receiver is a smi (which is a number value). 4229 // Check if receiver is a smi (which is a number value).
3593 __ BranchOnSmi(r1, &receiver_is_value); 4230 __ JumpIfSmi(r1, &receiver_is_value);
3594 4231
3595 // Check if the receiver is a valid JS object. 4232 // Check if the receiver is a valid JS object.
3596 __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE); 4233 __ CompareObjectType(r1, r2, r2, FIRST_JS_OBJECT_TYPE);
3597 __ b(ge, &receiver_is_js_object); 4234 __ b(ge, &receiver_is_js_object);
3598 4235
3599 // Call the runtime to box the value. 4236 // Call the runtime to box the value.
3600 __ bind(&receiver_is_value); 4237 __ bind(&receiver_is_value);
3601 __ EnterInternalFrame(); 4238 __ EnterInternalFrame();
3602 __ push(r1); 4239 __ push(r1);
3603 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS); 4240 __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_JS);
3604 __ LeaveInternalFrame(); 4241 __ LeaveInternalFrame();
3605 __ str(r0, MemOperand(sp, argc_ * kPointerSize)); 4242 __ str(r0, MemOperand(sp, argc_ * kPointerSize));
3606 4243
3607 __ bind(&receiver_is_js_object); 4244 __ bind(&receiver_is_js_object);
3608 } 4245 }
3609 4246
3610 // Get the function to call from the stack. 4247 // Get the function to call from the stack.
3611 // function, receiver [, arguments] 4248 // function, receiver [, arguments]
3612 __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize)); 4249 __ ldr(r1, MemOperand(sp, (argc_ + 1) * kPointerSize));
3613 4250
3614 // Check that the function is really a JavaScript function. 4251 // Check that the function is really a JavaScript function.
3615 // r1: pushed function (to be verified) 4252 // r1: pushed function (to be verified)
3616 __ BranchOnSmi(r1, &slow); 4253 __ JumpIfSmi(r1, &slow);
3617 // Get the map of the function object. 4254 // Get the map of the function object.
3618 __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE); 4255 __ CompareObjectType(r1, r2, r2, JS_FUNCTION_TYPE);
3619 __ b(ne, &slow); 4256 __ b(ne, &slow);
3620 4257
3621 // Fast-case: Invoke the function now. 4258 // Fast-case: Invoke the function now.
3622 // r1: pushed function 4259 // r1: pushed function
3623 ParameterCount actual(argc_); 4260 ParameterCount actual(argc_);
3624 __ InvokeFunction(r1, actual, JUMP_FUNCTION); 4261 __ InvokeFunction(r1, actual, JUMP_FUNCTION);
3625 4262
3626 // Slow-case: Non-function called. 4263 // Slow-case: Non-function called.
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
3706 return ConditionField::encode(static_cast<unsigned>(cc_) >> 28) 4343 return ConditionField::encode(static_cast<unsigned>(cc_) >> 28)
3707 | RegisterField::encode(lhs_.is(r0)) 4344 | RegisterField::encode(lhs_.is(r0))
3708 | StrictField::encode(strict_) 4345 | StrictField::encode(strict_)
3709 | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false) 4346 | NeverNanNanField::encode(cc_ == eq ? never_nan_nan_ : false)
3710 | IncludeNumberCompareField::encode(include_number_compare_) 4347 | IncludeNumberCompareField::encode(include_number_compare_)
3711 | IncludeSmiCompareField::encode(include_smi_compare_); 4348 | IncludeSmiCompareField::encode(include_smi_compare_);
3712 } 4349 }
3713 4350
3714 4351
3715 // StringCharCodeAtGenerator 4352 // StringCharCodeAtGenerator
3716
3717 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { 4353 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) {
3718 Label flat_string; 4354 Label flat_string;
3719 Label ascii_string; 4355 Label ascii_string;
3720 Label got_char_code; 4356 Label got_char_code;
3721 4357
3722 // If the receiver is a smi trigger the non-string case. 4358 // If the receiver is a smi trigger the non-string case.
3723 __ BranchOnSmi(object_, receiver_not_string_); 4359 __ JumpIfSmi(object_, receiver_not_string_);
3724 4360
3725 // Fetch the instance type of the receiver into result register. 4361 // Fetch the instance type of the receiver into result register.
3726 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); 4362 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3727 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); 4363 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3728 // If the receiver is not a string trigger the non-string case. 4364 // If the receiver is not a string trigger the non-string case.
3729 __ tst(result_, Operand(kIsNotStringMask)); 4365 __ tst(result_, Operand(kIsNotStringMask));
3730 __ b(ne, receiver_not_string_); 4366 __ b(ne, receiver_not_string_);
3731 4367
3732 // If the index is non-smi trigger the non-smi case. 4368 // If the index is non-smi trigger the non-smi case.
3733 __ BranchOnNotSmi(index_, &index_not_smi_); 4369 __ JumpIfNotSmi(index_, &index_not_smi_);
3734 4370
3735 // Put smi-tagged index into scratch register. 4371 // Put smi-tagged index into scratch register.
3736 __ mov(scratch_, index_); 4372 __ mov(scratch_, index_);
3737 __ bind(&got_smi_index_); 4373 __ bind(&got_smi_index_);
3738 4374
3739 // Check for index out of range. 4375 // Check for index out of range.
3740 __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset)); 4376 __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset));
3741 __ cmp(ip, Operand(scratch_)); 4377 __ cmp(ip, Operand(scratch_));
3742 __ b(ls, index_out_of_range_); 4378 __ b(ls, index_out_of_range_);
3743 4379
(...skipping 15 matching lines...) Expand all
3759 __ LoadRoot(ip, Heap::kEmptyStringRootIndex); 4395 __ LoadRoot(ip, Heap::kEmptyStringRootIndex);
3760 __ cmp(result_, Operand(ip)); 4396 __ cmp(result_, Operand(ip));
3761 __ b(ne, &call_runtime_); 4397 __ b(ne, &call_runtime_);
3762 // Get the first of the two strings and load its instance type. 4398 // Get the first of the two strings and load its instance type.
3763 __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset)); 4399 __ ldr(object_, FieldMemOperand(object_, ConsString::kFirstOffset));
3764 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); 4400 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3765 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); 4401 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3766 // If the first cons component is also non-flat, then go to runtime. 4402 // If the first cons component is also non-flat, then go to runtime.
3767 STATIC_ASSERT(kSeqStringTag == 0); 4403 STATIC_ASSERT(kSeqStringTag == 0);
3768 __ tst(result_, Operand(kStringRepresentationMask)); 4404 __ tst(result_, Operand(kStringRepresentationMask));
3769 __ b(nz, &call_runtime_); 4405 __ b(ne, &call_runtime_);
3770 4406
3771 // Check for 1-byte or 2-byte string. 4407 // Check for 1-byte or 2-byte string.
3772 __ bind(&flat_string); 4408 __ bind(&flat_string);
3773 STATIC_ASSERT(kAsciiStringTag != 0); 4409 STATIC_ASSERT(kAsciiStringTag != 0);
3774 __ tst(result_, Operand(kStringEncodingMask)); 4410 __ tst(result_, Operand(kStringEncodingMask));
3775 __ b(nz, &ascii_string); 4411 __ b(ne, &ascii_string);
3776 4412
3777 // 2-byte string. 4413 // 2-byte string.
3778 // Load the 2-byte character code into the result register. We can 4414 // Load the 2-byte character code into the result register. We can
3779 // add without shifting since the smi tag size is the log2 of the 4415 // add without shifting since the smi tag size is the log2 of the
3780 // number of bytes in a two-byte character. 4416 // number of bytes in a two-byte character.
3781 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0); 4417 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1 && kSmiShiftSize == 0);
3782 __ add(scratch_, object_, Operand(scratch_)); 4418 __ add(scratch_, object_, Operand(scratch_));
3783 __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize)); 4419 __ ldrh(result_, FieldMemOperand(scratch_, SeqTwoByteString::kHeaderSize));
3784 __ jmp(&got_char_code); 4420 __ jmp(&got_char_code);
3785 4421
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after
3820 // Save the conversion result before the pop instructions below 4456 // Save the conversion result before the pop instructions below
3821 // have a chance to overwrite it. 4457 // have a chance to overwrite it.
3822 __ Move(scratch_, r0); 4458 __ Move(scratch_, r0);
3823 __ pop(index_); 4459 __ pop(index_);
3824 __ pop(object_); 4460 __ pop(object_);
3825 // Reload the instance type. 4461 // Reload the instance type.
3826 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); 4462 __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset));
3827 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); 4463 __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset));
3828 call_helper.AfterCall(masm); 4464 call_helper.AfterCall(masm);
3829 // If index is still not a smi, it must be out of range. 4465 // If index is still not a smi, it must be out of range.
3830 __ BranchOnNotSmi(scratch_, index_out_of_range_); 4466 __ JumpIfNotSmi(scratch_, index_out_of_range_);
3831 // Otherwise, return to the fast path. 4467 // Otherwise, return to the fast path.
3832 __ jmp(&got_smi_index_); 4468 __ jmp(&got_smi_index_);
3833 4469
3834 // Call runtime. We get here when the receiver is a string and the 4470 // Call runtime. We get here when the receiver is a string and the
3835 // index is a number, but the code of getting the actual character 4471 // index is a number, but the code of getting the actual character
3836 // is too complex (e.g., when the string needs to be flattened). 4472 // is too complex (e.g., when the string needs to be flattened).
3837 __ bind(&call_runtime_); 4473 __ bind(&call_runtime_);
3838 call_helper.BeforeCall(masm); 4474 call_helper.BeforeCall(masm);
3839 __ Push(object_, index_); 4475 __ Push(object_, index_);
3840 __ CallRuntime(Runtime::kStringCharCodeAt, 2); 4476 __ CallRuntime(Runtime::kStringCharCodeAt, 2);
3841 __ Move(result_, r0); 4477 __ Move(result_, r0);
3842 call_helper.AfterCall(masm); 4478 call_helper.AfterCall(masm);
3843 __ jmp(&exit_); 4479 __ jmp(&exit_);
3844 4480
3845 __ Abort("Unexpected fallthrough from CharCodeAt slow case"); 4481 __ Abort("Unexpected fallthrough from CharCodeAt slow case");
3846 } 4482 }
3847 4483
3848 4484
3849 // ------------------------------------------------------------------------- 4485 // -------------------------------------------------------------------------
3850 // StringCharFromCodeGenerator 4486 // StringCharFromCodeGenerator
3851 4487
3852 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { 4488 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) {
3853 // Fast case of Heap::LookupSingleCharacterStringFromCode. 4489 // Fast case of Heap::LookupSingleCharacterStringFromCode.
3854 STATIC_ASSERT(kSmiTag == 0); 4490 STATIC_ASSERT(kSmiTag == 0);
3855 STATIC_ASSERT(kSmiShiftSize == 0); 4491 STATIC_ASSERT(kSmiShiftSize == 0);
3856 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1)); 4492 ASSERT(IsPowerOf2(String::kMaxAsciiCharCode + 1));
3857 __ tst(code_, 4493 __ tst(code_,
3858 Operand(kSmiTagMask | 4494 Operand(kSmiTagMask |
3859 ((~String::kMaxAsciiCharCode) << kSmiTagSize))); 4495 ((~String::kMaxAsciiCharCode) << kSmiTagSize)));
3860 __ b(nz, &slow_case_); 4496 __ b(ne, &slow_case_);
3861 4497
3862 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); 4498 __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex);
3863 // At this point code register contains smi tagged ascii char code. 4499 // At this point code register contains smi tagged ascii char code.
3864 STATIC_ASSERT(kSmiTag == 0); 4500 STATIC_ASSERT(kSmiTag == 0);
3865 __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize)); 4501 __ add(result_, result_, Operand(code_, LSL, kPointerSizeLog2 - kSmiTagSize));
3866 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); 4502 __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize));
3867 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex); 4503 __ LoadRoot(ip, Heap::kUndefinedValueRootIndex);
3868 __ cmp(result_, Operand(ip)); 4504 __ cmp(result_, Operand(ip));
3869 __ b(eq, &slow_case_); 4505 __ b(eq, &slow_case_);
3870 __ bind(&exit_); 4506 __ bind(&exit_);
(...skipping 426 matching lines...) Expand 10 before | Expand all | Expand 10 after
4297 void StringHelper::GenerateHashGetHash(MacroAssembler* masm, 4933 void StringHelper::GenerateHashGetHash(MacroAssembler* masm,
4298 Register hash) { 4934 Register hash) {
4299 // hash += hash << 3; 4935 // hash += hash << 3;
4300 __ add(hash, hash, Operand(hash, LSL, 3)); 4936 __ add(hash, hash, Operand(hash, LSL, 3));
4301 // hash ^= hash >> 11; 4937 // hash ^= hash >> 11;
4302 __ eor(hash, hash, Operand(hash, ASR, 11)); 4938 __ eor(hash, hash, Operand(hash, ASR, 11));
4303 // hash += hash << 15; 4939 // hash += hash << 15;
4304 __ add(hash, hash, Operand(hash, LSL, 15), SetCC); 4940 __ add(hash, hash, Operand(hash, LSL, 15), SetCC);
4305 4941
4306 // if (hash == 0) hash = 27; 4942 // if (hash == 0) hash = 27;
4307 __ mov(hash, Operand(27), LeaveCC, nz); 4943 __ mov(hash, Operand(27), LeaveCC, ne);
4308 } 4944 }
4309 4945
4310 4946
4311 void SubStringStub::Generate(MacroAssembler* masm) { 4947 void SubStringStub::Generate(MacroAssembler* masm) {
4312 Label runtime; 4948 Label runtime;
4313 4949
4314 // Stack frame on entry. 4950 // Stack frame on entry.
4315 // lr: return address 4951 // lr: return address
4316 // sp[0]: to 4952 // sp[0]: to
4317 // sp[4]: from 4953 // sp[4]: from
(...skipping 547 matching lines...) Expand 10 before | Expand all | Expand 10 after
4865 __ IncrementCounter(COUNTERS->string_add_native(), 1, r2, r3); 5501 __ IncrementCounter(COUNTERS->string_add_native(), 1, r2, r3);
4866 __ add(sp, sp, Operand(2 * kPointerSize)); 5502 __ add(sp, sp, Operand(2 * kPointerSize));
4867 __ Ret(); 5503 __ Ret();
4868 5504
4869 // Just jump to runtime to add the two strings. 5505 // Just jump to runtime to add the two strings.
4870 __ bind(&string_add_runtime); 5506 __ bind(&string_add_runtime);
4871 __ TailCallRuntime(Runtime::kStringAdd, 2, 1); 5507 __ TailCallRuntime(Runtime::kStringAdd, 2, 1);
4872 } 5508 }
4873 5509
4874 5510
5511 void StringCharAtStub::Generate(MacroAssembler* masm) {
5512 // Expects two arguments (object, index) on the stack:
5513 // lr: return address
5514 // sp[0]: index
5515 // sp[4]: object
5516 Register object = r1;
5517 Register index = r0;
5518 Register scratch1 = r2;
5519 Register scratch2 = r3;
5520 Register result = r0;
5521
5522 // Get object and index from the stack.
5523 __ pop(index);
5524 __ pop(object);
5525
5526 Label need_conversion;
5527 Label index_out_of_range;
5528 Label done;
5529 StringCharAtGenerator generator(object,
5530 index,
5531 scratch1,
5532 scratch2,
5533 result,
5534 &need_conversion,
5535 &need_conversion,
5536 &index_out_of_range,
5537 STRING_INDEX_IS_NUMBER);
5538 generator.GenerateFast(masm);
5539 __ b(&done);
5540
5541 __ bind(&index_out_of_range);
5542 // When the index is out of range, the spec requires us to return
5543 // the empty string.
5544 __ LoadRoot(result, Heap::kEmptyStringRootIndex);
5545 __ jmp(&done);
5546
5547 __ bind(&need_conversion);
5548 // Move smi zero into the result register, which will trigger
5549 // conversion.
5550 __ mov(result, Operand(Smi::FromInt(0)));
5551 __ b(&done);
5552
5553 StubRuntimeCallHelper call_helper;
5554 generator.GenerateSlow(masm, call_helper);
5555
5556 __ bind(&done);
5557 __ Ret();
5558 }
5559
5560
4875 void ICCompareStub::GenerateSmis(MacroAssembler* masm) { 5561 void ICCompareStub::GenerateSmis(MacroAssembler* masm) {
4876 ASSERT(state_ == CompareIC::SMIS); 5562 ASSERT(state_ == CompareIC::SMIS);
4877 Label miss; 5563 Label miss;
4878 __ orr(r2, r1, r0); 5564 __ orr(r2, r1, r0);
4879 __ tst(r2, Operand(kSmiTagMask)); 5565 __ tst(r2, Operand(kSmiTagMask));
4880 __ b(ne, &miss); 5566 __ b(ne, &miss);
4881 5567
4882 if (GetCondition() == eq) { 5568 if (GetCondition() == eq) {
4883 // For equality we do not care about the sign of the result. 5569 // For equality we do not care about the sign of the result.
4884 __ sub(r0, r0, r1, SetCC); 5570 __ sub(r0, r0, r1, SetCC);
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
4986 __ pop(r1); 5672 __ pop(r1);
4987 __ Jump(r2); 5673 __ Jump(r2);
4988 } 5674 }
4989 5675
4990 5676
4991 #undef __ 5677 #undef __
4992 5678
4993 } } // namespace v8::internal 5679 } } // namespace v8::internal
4994 5680
4995 #endif // V8_TARGET_ARCH_ARM 5681 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/code-stubs-arm.h ('k') | src/arm/codegen-arm.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698