Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(382)

Side by Side Diff: src/arm/stub-cache-arm.cc

Issue 6529032: Merge 6168:6800 from bleeding_edge to experimental/gc branch. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/gc/
Patch Set: Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/simulator-arm.cc ('k') | src/arm/virtual-frame-arm.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2006-2009 the V8 project authors. All rights reserved. 1 // Copyright 2006-2009 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 352 matching lines...) Expand 10 before | Expand all | Expand 10 after
363 363
364 364
365 // Generate code to load the length from a string object and return the length. 365 // Generate code to load the length from a string object and return the length.
366 // If the receiver object is not a string or a wrapped string object the 366 // If the receiver object is not a string or a wrapped string object the
367 // execution continues at the miss label. The register containing the 367 // execution continues at the miss label. The register containing the
368 // receiver is potentially clobbered. 368 // receiver is potentially clobbered.
369 void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm, 369 void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm,
370 Register receiver, 370 Register receiver,
371 Register scratch1, 371 Register scratch1,
372 Register scratch2, 372 Register scratch2,
373 Label* miss) { 373 Label* miss,
374 bool support_wrappers) {
374 Label check_wrapper; 375 Label check_wrapper;
375 376
376 // Check if the object is a string leaving the instance type in the 377 // Check if the object is a string leaving the instance type in the
377 // scratch1 register. 378 // scratch1 register.
378 GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, &check_wrapper); 379 GenerateStringCheck(masm, receiver, scratch1, scratch2, miss,
380 support_wrappers ? &check_wrapper : miss);
379 381
380 // Load length directly from the string. 382 // Load length directly from the string.
381 __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset)); 383 __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset));
382 __ Ret(); 384 __ Ret();
383 385
384 // Check if the object is a JSValue wrapper. 386 if (support_wrappers) {
385 __ bind(&check_wrapper); 387 // Check if the object is a JSValue wrapper.
386 __ cmp(scratch1, Operand(JS_VALUE_TYPE)); 388 __ bind(&check_wrapper);
387 __ b(ne, miss); 389 __ cmp(scratch1, Operand(JS_VALUE_TYPE));
390 __ b(ne, miss);
388 391
389 // Unwrap the value and check if the wrapped value is a string. 392 // Unwrap the value and check if the wrapped value is a string.
390 __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset)); 393 __ ldr(scratch1, FieldMemOperand(receiver, JSValue::kValueOffset));
391 GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss); 394 GenerateStringCheck(masm, scratch1, scratch2, scratch2, miss, miss);
392 __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset)); 395 __ ldr(r0, FieldMemOperand(scratch1, String::kLengthOffset));
393 __ Ret(); 396 __ Ret();
397 }
394 } 398 }
395 399
396 400
397 void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm, 401 void StubCompiler::GenerateLoadFunctionPrototype(MacroAssembler* masm,
398 Register receiver, 402 Register receiver,
399 Register scratch1, 403 Register scratch1,
400 Register scratch2, 404 Register scratch2,
401 Label* miss_label) { 405 Label* miss_label) {
402 __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label); 406 __ TryGetFunctionPrototype(receiver, scratch1, scratch2, miss_label);
403 __ mov(r0, scratch1); 407 __ mov(r0, scratch1);
(...skipping 114 matching lines...) Expand 10 before | Expand all | Expand 10 after
518 static void GenerateCallFunction(MacroAssembler* masm, 522 static void GenerateCallFunction(MacroAssembler* masm,
519 Object* object, 523 Object* object,
520 const ParameterCount& arguments, 524 const ParameterCount& arguments,
521 Label* miss) { 525 Label* miss) {
522 // ----------- S t a t e ------------- 526 // ----------- S t a t e -------------
523 // -- r0: receiver 527 // -- r0: receiver
524 // -- r1: function to call 528 // -- r1: function to call
525 // ----------------------------------- 529 // -----------------------------------
526 530
527 // Check that the function really is a function. 531 // Check that the function really is a function.
528 __ BranchOnSmi(r1, miss); 532 __ JumpIfSmi(r1, miss);
529 __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); 533 __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE);
530 __ b(ne, miss); 534 __ b(ne, miss);
531 535
532 // Patch the receiver on the stack with the global proxy if 536 // Patch the receiver on the stack with the global proxy if
533 // necessary. 537 // necessary.
534 if (object->IsGlobalObject()) { 538 if (object->IsGlobalObject()) {
535 __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset)); 539 __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset));
536 __ str(r3, MemOperand(sp, arguments.immediate() * kPointerSize)); 540 __ str(r3, MemOperand(sp, arguments.immediate() * kPointerSize));
537 } 541 }
538 542
(...skipping 29 matching lines...) Expand all
568 572
569 ExternalReference ref = 573 ExternalReference ref =
570 ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly)); 574 ExternalReference(IC_Utility(IC::kLoadPropertyWithInterceptorOnly));
571 __ mov(r0, Operand(5)); 575 __ mov(r0, Operand(5));
572 __ mov(r1, Operand(ref)); 576 __ mov(r1, Operand(ref));
573 577
574 CEntryStub stub(1); 578 CEntryStub stub(1);
575 __ CallStub(&stub); 579 __ CallStub(&stub);
576 } 580 }
577 581
582 static const int kFastApiCallArguments = 3;
578 583
579 // Reserves space for the extra arguments to FastHandleApiCall in the 584 // Reserves space for the extra arguments to FastHandleApiCall in the
580 // caller's frame. 585 // caller's frame.
581 // 586 //
582 // These arguments are set by CheckPrototypes and GenerateFastApiCall. 587 // These arguments are set by CheckPrototypes and GenerateFastApiDirectCall.
583 static void ReserveSpaceForFastApiCall(MacroAssembler* masm, 588 static void ReserveSpaceForFastApiCall(MacroAssembler* masm,
584 Register scratch) { 589 Register scratch) {
585 __ mov(scratch, Operand(Smi::FromInt(0))); 590 __ mov(scratch, Operand(Smi::FromInt(0)));
586 __ push(scratch); 591 for (int i = 0; i < kFastApiCallArguments; i++) {
587 __ push(scratch); 592 __ push(scratch);
588 __ push(scratch); 593 }
589 __ push(scratch);
590 } 594 }
591 595
592 596
593 // Undoes the effects of ReserveSpaceForFastApiCall. 597 // Undoes the effects of ReserveSpaceForFastApiCall.
594 static void FreeSpaceForFastApiCall(MacroAssembler* masm) { 598 static void FreeSpaceForFastApiCall(MacroAssembler* masm) {
595 __ Drop(4); 599 __ Drop(kFastApiCallArguments);
596 } 600 }
597 601
598 602
599 // Generates call to FastHandleApiCall builtin. 603 static MaybeObject* GenerateFastApiDirectCall(MacroAssembler* masm,
600 static void GenerateFastApiCall(MacroAssembler* masm, 604 const CallOptimization& optimization,
601 const CallOptimization& optimization, 605 int argc) {
602 int argc) { 606 // ----------- S t a t e -------------
607 // -- sp[0] : holder (set by CheckPrototypes)
608 // -- sp[4] : callee js function
609 // -- sp[8] : call data
610 // -- sp[12] : last js argument
611 // -- ...
612 // -- sp[(argc + 3) * 4] : first js argument
613 // -- sp[(argc + 4) * 4] : receiver
614 // -----------------------------------
603 // Get the function and setup the context. 615 // Get the function and setup the context.
604 JSFunction* function = optimization.constant_function(); 616 JSFunction* function = optimization.constant_function();
605 __ mov(r5, Operand(Handle<JSFunction>(function))); 617 __ mov(r5, Operand(Handle<JSFunction>(function)));
606 __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset)); 618 __ ldr(cp, FieldMemOperand(r5, JSFunction::kContextOffset));
607 619
608 // Pass the additional arguments FastHandleApiCall expects. 620 // Pass the additional arguments FastHandleApiCall expects.
609 bool info_loaded = false;
610 Object* callback = optimization.api_call_info()->callback();
611 if (Heap::InNewSpace(callback)) {
612 info_loaded = true;
613 __ Move(r0, Handle<CallHandlerInfo>(optimization.api_call_info()));
614 __ ldr(r7, FieldMemOperand(r0, CallHandlerInfo::kCallbackOffset));
615 } else {
616 __ Move(r7, Handle<Object>(callback));
617 }
618 Object* call_data = optimization.api_call_info()->data(); 621 Object* call_data = optimization.api_call_info()->data();
622 Handle<CallHandlerInfo> api_call_info_handle(optimization.api_call_info());
619 if (Heap::InNewSpace(call_data)) { 623 if (Heap::InNewSpace(call_data)) {
620 if (!info_loaded) { 624 __ Move(r0, api_call_info_handle);
621 __ Move(r0, Handle<CallHandlerInfo>(optimization.api_call_info()));
622 }
623 __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset)); 625 __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset));
624 } else { 626 } else {
625 __ Move(r6, Handle<Object>(call_data)); 627 __ Move(r6, Handle<Object>(call_data));
626 } 628 }
629 // Store js function and call data.
630 __ stm(ib, sp, r5.bit() | r6.bit());
627 631
628 __ add(sp, sp, Operand(1 * kPointerSize)); 632 // r2 points to call data as expected by Arguments
629 __ stm(ia, sp, r5.bit() | r6.bit() | r7.bit()); 633 // (refer to layout above).
630 __ sub(sp, sp, Operand(1 * kPointerSize)); 634 __ add(r2, sp, Operand(2 * kPointerSize));
631 635
632 // Set the number of arguments. 636 Object* callback = optimization.api_call_info()->callback();
633 __ mov(r0, Operand(argc + 4)); 637 Address api_function_address = v8::ToCData<Address>(callback);
638 ApiFunction fun(api_function_address);
634 639
635 // Jump to the fast api call builtin (tail call). 640 const int kApiStackSpace = 4;
636 Handle<Code> code = Handle<Code>( 641 __ EnterExitFrame(false, kApiStackSpace);
637 Builtins::builtin(Builtins::FastHandleApiCall)); 642
638 ParameterCount expected(0); 643 // r0 = v8::Arguments&
639 __ InvokeCode(code, expected, expected, 644 // Arguments is after the return address.
640 RelocInfo::CODE_TARGET, JUMP_FUNCTION); 645 __ add(r0, sp, Operand(1 * kPointerSize));
646 // v8::Arguments::implicit_args = data
647 __ str(r2, MemOperand(r0, 0 * kPointerSize));
648 // v8::Arguments::values = last argument
649 __ add(ip, r2, Operand(argc * kPointerSize));
650 __ str(ip, MemOperand(r0, 1 * kPointerSize));
651 // v8::Arguments::length_ = argc
652 __ mov(ip, Operand(argc));
653 __ str(ip, MemOperand(r0, 2 * kPointerSize));
654 // v8::Arguments::is_construct_call = 0
655 __ mov(ip, Operand(0));
656 __ str(ip, MemOperand(r0, 3 * kPointerSize));
657
658 // Emitting a stub call may try to allocate (if the code is not
659 // already generated). Do not allow the assembler to perform a
660 // garbage collection but instead return the allocation failure
661 // object.
662 MaybeObject* result = masm->TryCallApiFunctionAndReturn(
663 &fun, argc + kFastApiCallArguments + 1);
664 if (result->IsFailure()) {
665 return result;
666 }
667 return Heap::undefined_value();
641 } 668 }
642 669
643
644 class CallInterceptorCompiler BASE_EMBEDDED { 670 class CallInterceptorCompiler BASE_EMBEDDED {
645 public: 671 public:
646 CallInterceptorCompiler(StubCompiler* stub_compiler, 672 CallInterceptorCompiler(StubCompiler* stub_compiler,
647 const ParameterCount& arguments, 673 const ParameterCount& arguments,
648 Register name) 674 Register name)
649 : stub_compiler_(stub_compiler), 675 : stub_compiler_(stub_compiler),
650 arguments_(arguments), 676 arguments_(arguments),
651 name_(name) {} 677 name_(name) {}
652 678
653 void Compile(MacroAssembler* masm, 679 MaybeObject* Compile(MacroAssembler* masm,
654 JSObject* object, 680 JSObject* object,
655 JSObject* holder, 681 JSObject* holder,
656 String* name, 682 String* name,
657 LookupResult* lookup, 683 LookupResult* lookup,
658 Register receiver, 684 Register receiver,
659 Register scratch1, 685 Register scratch1,
660 Register scratch2, 686 Register scratch2,
661 Register scratch3, 687 Register scratch3,
662 Label* miss) { 688 Label* miss) {
663 ASSERT(holder->HasNamedInterceptor()); 689 ASSERT(holder->HasNamedInterceptor());
664 ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined()); 690 ASSERT(!holder->GetNamedInterceptor()->getter()->IsUndefined());
665 691
666 // Check that the receiver isn't a smi. 692 // Check that the receiver isn't a smi.
667 __ BranchOnSmi(receiver, miss); 693 __ JumpIfSmi(receiver, miss);
668 694
669 CallOptimization optimization(lookup); 695 CallOptimization optimization(lookup);
670 696
671 if (optimization.is_constant_call()) { 697 if (optimization.is_constant_call()) {
672 CompileCacheable(masm, 698 return CompileCacheable(masm,
673 object, 699 object,
674 receiver, 700 receiver,
675 scratch1, 701 scratch1,
676 scratch2, 702 scratch2,
677 scratch3, 703 scratch3,
678 holder, 704 holder,
679 lookup, 705 lookup,
680 name, 706 name,
681 optimization, 707 optimization,
682 miss); 708 miss);
683 } else { 709 } else {
684 CompileRegular(masm, 710 CompileRegular(masm,
685 object, 711 object,
686 receiver, 712 receiver,
687 scratch1, 713 scratch1,
688 scratch2, 714 scratch2,
689 scratch3, 715 scratch3,
690 name, 716 name,
691 holder, 717 holder,
692 miss); 718 miss);
719 return Heap::undefined_value();
693 } 720 }
694 } 721 }
695 722
696 private: 723 private:
697 void CompileCacheable(MacroAssembler* masm, 724 MaybeObject* CompileCacheable(MacroAssembler* masm,
698 JSObject* object, 725 JSObject* object,
699 Register receiver, 726 Register receiver,
700 Register scratch1, 727 Register scratch1,
701 Register scratch2, 728 Register scratch2,
702 Register scratch3, 729 Register scratch3,
703 JSObject* interceptor_holder, 730 JSObject* interceptor_holder,
704 LookupResult* lookup, 731 LookupResult* lookup,
705 String* name, 732 String* name,
706 const CallOptimization& optimization, 733 const CallOptimization& optimization,
707 Label* miss_label) { 734 Label* miss_label) {
708 ASSERT(optimization.is_constant_call()); 735 ASSERT(optimization.is_constant_call());
709 ASSERT(!lookup->holder()->IsGlobalObject()); 736 ASSERT(!lookup->holder()->IsGlobalObject());
710 737
711 int depth1 = kInvalidProtoDepth; 738 int depth1 = kInvalidProtoDepth;
712 int depth2 = kInvalidProtoDepth; 739 int depth2 = kInvalidProtoDepth;
713 bool can_do_fast_api_call = false; 740 bool can_do_fast_api_call = false;
714 if (optimization.is_simple_api_call() && 741 if (optimization.is_simple_api_call() &&
715 !lookup->holder()->IsGlobalObject()) { 742 !lookup->holder()->IsGlobalObject()) {
716 depth1 = 743 depth1 =
717 optimization.GetPrototypeDepthOfExpectedType(object, 744 optimization.GetPrototypeDepthOfExpectedType(object,
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after
761 } else { 788 } else {
762 // CheckPrototypes has a side effect of fetching a 'holder' 789 // CheckPrototypes has a side effect of fetching a 'holder'
763 // for API (object which is instanceof for the signature). It's 790 // for API (object which is instanceof for the signature). It's
764 // safe to omit it here, as if present, it should be fetched 791 // safe to omit it here, as if present, it should be fetched
765 // by the previous CheckPrototypes. 792 // by the previous CheckPrototypes.
766 ASSERT(depth2 == kInvalidProtoDepth); 793 ASSERT(depth2 == kInvalidProtoDepth);
767 } 794 }
768 795
769 // Invoke function. 796 // Invoke function.
770 if (can_do_fast_api_call) { 797 if (can_do_fast_api_call) {
771 GenerateFastApiCall(masm, optimization, arguments_.immediate()); 798 MaybeObject* result = GenerateFastApiDirectCall(masm,
799 optimization,
800 arguments_.immediate());
801 if (result->IsFailure()) return result;
772 } else { 802 } else {
773 __ InvokeFunction(optimization.constant_function(), arguments_, 803 __ InvokeFunction(optimization.constant_function(), arguments_,
774 JUMP_FUNCTION); 804 JUMP_FUNCTION);
775 } 805 }
776 806
777 // Deferred code for fast API call case---clean preallocated space. 807 // Deferred code for fast API call case---clean preallocated space.
778 if (can_do_fast_api_call) { 808 if (can_do_fast_api_call) {
779 __ bind(&miss_cleanup); 809 __ bind(&miss_cleanup);
780 FreeSpaceForFastApiCall(masm); 810 FreeSpaceForFastApiCall(masm);
781 __ b(miss_label); 811 __ b(miss_label);
782 } 812 }
783 813
784 // Invoke a regular function. 814 // Invoke a regular function.
785 __ bind(&regular_invoke); 815 __ bind(&regular_invoke);
786 if (can_do_fast_api_call) { 816 if (can_do_fast_api_call) {
787 FreeSpaceForFastApiCall(masm); 817 FreeSpaceForFastApiCall(masm);
788 } 818 }
819
820 return Heap::undefined_value();
789 } 821 }
790 822
791 void CompileRegular(MacroAssembler* masm, 823 void CompileRegular(MacroAssembler* masm,
792 JSObject* object, 824 JSObject* object,
793 Register receiver, 825 Register receiver,
794 Register scratch1, 826 Register scratch1,
795 Register scratch2, 827 Register scratch2,
796 Register scratch3, 828 Register scratch3,
797 String* name, 829 String* name,
798 JSObject* interceptor_holder, 830 JSObject* interceptor_holder,
(...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after
899 miss); 931 miss);
900 if (result->IsFailure()) return result; 932 if (result->IsFailure()) return result;
901 } 933 }
902 ASSERT(current->IsJSObject()); 934 ASSERT(current->IsJSObject());
903 current = JSObject::cast(current->GetPrototype()); 935 current = JSObject::cast(current->GetPrototype());
904 } 936 }
905 return NULL; 937 return NULL;
906 } 938 }
907 939
908 940
941 // Convert and store int passed in register ival to IEEE 754 single precision
942 // floating point value at memory location (dst + 4 * wordoffset)
943 // If VFP3 is available use it for conversion.
944 static void StoreIntAsFloat(MacroAssembler* masm,
945 Register dst,
946 Register wordoffset,
947 Register ival,
948 Register fval,
949 Register scratch1,
950 Register scratch2) {
951 if (CpuFeatures::IsSupported(VFP3)) {
952 CpuFeatures::Scope scope(VFP3);
953 __ vmov(s0, ival);
954 __ add(scratch1, dst, Operand(wordoffset, LSL, 2));
955 __ vcvt_f32_s32(s0, s0);
956 __ vstr(s0, scratch1, 0);
957 } else {
958 Label not_special, done;
959 // Move sign bit from source to destination. This works because the sign
960 // bit in the exponent word of the double has the same position and polarity
961 // as the 2's complement sign bit in a Smi.
962 ASSERT(kBinary32SignMask == 0x80000000u);
963
964 __ and_(fval, ival, Operand(kBinary32SignMask), SetCC);
965 // Negate value if it is negative.
966 __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne);
967
968 // We have -1, 0 or 1, which we treat specially. Register ival contains
969 // absolute value: it is either equal to 1 (special case of -1 and 1),
970 // greater than 1 (not a special case) or less than 1 (special case of 0).
971 __ cmp(ival, Operand(1));
972 __ b(gt, &not_special);
973
974 // For 1 or -1 we need to or in the 0 exponent (biased).
975 static const uint32_t exponent_word_for_1 =
976 kBinary32ExponentBias << kBinary32ExponentShift;
977
978 __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq);
979 __ b(&done);
980
981 __ bind(&not_special);
982 // Count leading zeros.
983 // Gets the wrong answer for 0, but we already checked for that case above.
984 Register zeros = scratch2;
985 __ CountLeadingZeros(zeros, ival, scratch1);
986
987 // Compute exponent and or it into the exponent register.
988 __ rsb(scratch1,
989 zeros,
990 Operand((kBitsPerInt - 1) + kBinary32ExponentBias));
991
992 __ orr(fval,
993 fval,
994 Operand(scratch1, LSL, kBinary32ExponentShift));
995
996 // Shift up the source chopping the top bit off.
997 __ add(zeros, zeros, Operand(1));
998 // This wouldn't work for 1 and -1 as the shift would be 32 which means 0.
999 __ mov(ival, Operand(ival, LSL, zeros));
1000 // And the top (top 20 bits).
1001 __ orr(fval,
1002 fval,
1003 Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits));
1004
1005 __ bind(&done);
1006 __ str(fval, MemOperand(dst, wordoffset, LSL, 2));
1007 }
1008 }
1009
1010
1011 // Convert unsigned integer with specified number of leading zeroes in binary
1012 // representation to IEEE 754 double.
1013 // Integer to convert is passed in register hiword.
1014 // Resulting double is returned in registers hiword:loword.
1015 // This functions does not work correctly for 0.
1016 static void GenerateUInt2Double(MacroAssembler* masm,
1017 Register hiword,
1018 Register loword,
1019 Register scratch,
1020 int leading_zeroes) {
1021 const int meaningful_bits = kBitsPerInt - leading_zeroes - 1;
1022 const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits;
1023
1024 const int mantissa_shift_for_hi_word =
1025 meaningful_bits - HeapNumber::kMantissaBitsInTopWord;
1026
1027 const int mantissa_shift_for_lo_word =
1028 kBitsPerInt - mantissa_shift_for_hi_word;
1029
1030 __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift));
1031 if (mantissa_shift_for_hi_word > 0) {
1032 __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word));
1033 __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word));
1034 } else {
1035 __ mov(loword, Operand(0, RelocInfo::NONE));
1036 __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word));
1037 }
1038
1039 // If least significant bit of biased exponent was not 1 it was corrupted
1040 // by most significant bit of mantissa so we should fix that.
1041 if (!(biased_exponent & 1)) {
1042 __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift));
1043 }
1044 }
1045
909 1046
910 #undef __ 1047 #undef __
911 #define __ ACCESS_MASM(masm()) 1048 #define __ ACCESS_MASM(masm())
912 1049
913 1050
914 Register StubCompiler::CheckPrototypes(JSObject* object, 1051 Register StubCompiler::CheckPrototypes(JSObject* object,
915 Register object_reg, 1052 Register object_reg,
916 JSObject* holder, 1053 JSObject* holder,
917 Register holder_reg, 1054 Register holder_reg,
918 Register scratch1, 1055 Register scratch1,
(...skipping 167 matching lines...) Expand 10 before | Expand all | Expand 10 after
1086 Register reg = 1223 Register reg =
1087 CheckPrototypes(object, receiver, holder, 1224 CheckPrototypes(object, receiver, holder,
1088 scratch1, scratch2, scratch3, name, miss); 1225 scratch1, scratch2, scratch3, name, miss);
1089 1226
1090 // Return the constant value. 1227 // Return the constant value.
1091 __ mov(r0, Operand(Handle<Object>(value))); 1228 __ mov(r0, Operand(Handle<Object>(value)));
1092 __ Ret(); 1229 __ Ret();
1093 } 1230 }
1094 1231
1095 1232
1096 bool StubCompiler::GenerateLoadCallback(JSObject* object, 1233 MaybeObject* StubCompiler::GenerateLoadCallback(JSObject* object,
1097 JSObject* holder, 1234 JSObject* holder,
1098 Register receiver, 1235 Register receiver,
1099 Register name_reg, 1236 Register name_reg,
1100 Register scratch1, 1237 Register scratch1,
1101 Register scratch2, 1238 Register scratch2,
1102 Register scratch3, 1239 Register scratch3,
1103 AccessorInfo* callback, 1240 AccessorInfo* callback,
1104 String* name, 1241 String* name,
1105 Label* miss, 1242 Label* miss) {
1106 Failure** failure) {
1107 // Check that the receiver isn't a smi. 1243 // Check that the receiver isn't a smi.
1108 __ tst(receiver, Operand(kSmiTagMask)); 1244 __ tst(receiver, Operand(kSmiTagMask));
1109 __ b(eq, miss); 1245 __ b(eq, miss);
1110 1246
1111 // Check that the maps haven't changed. 1247 // Check that the maps haven't changed.
1112 Register reg = 1248 Register reg =
1113 CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3, 1249 CheckPrototypes(object, receiver, holder, scratch1, scratch2, scratch3,
1114 name, miss); 1250 name, miss);
1115 1251
1116 // Push the arguments on the JS stack of the caller. 1252 // Push the arguments on the JS stack of the caller.
1117 __ push(receiver); // Receiver. 1253 __ push(receiver); // Receiver.
1118 __ mov(scratch3, Operand(Handle<AccessorInfo>(callback))); // callback data 1254 __ mov(scratch3, Operand(Handle<AccessorInfo>(callback))); // callback data
1119 __ ldr(ip, FieldMemOperand(scratch3, AccessorInfo::kDataOffset)); 1255 __ ldr(ip, FieldMemOperand(scratch3, AccessorInfo::kDataOffset));
1120 __ Push(reg, ip, scratch3, name_reg); 1256 __ Push(reg, ip, scratch3, name_reg);
1121 1257
1122 // Do tail-call to the runtime system. 1258 // Do tail-call to the runtime system.
1123 ExternalReference load_callback_property = 1259 ExternalReference load_callback_property =
1124 ExternalReference(IC_Utility(IC::kLoadCallbackProperty)); 1260 ExternalReference(IC_Utility(IC::kLoadCallbackProperty));
1125 __ TailCallExternalReference(load_callback_property, 5, 1); 1261 __ TailCallExternalReference(load_callback_property, 5, 1);
1126 1262
1127 return true; 1263 return Heap::undefined_value(); // Success.
1128 } 1264 }
1129 1265
1130 1266
1131 void StubCompiler::GenerateLoadInterceptor(JSObject* object, 1267 void StubCompiler::GenerateLoadInterceptor(JSObject* object,
1132 JSObject* interceptor_holder, 1268 JSObject* interceptor_holder,
1133 LookupResult* lookup, 1269 LookupResult* lookup,
1134 Register receiver, 1270 Register receiver,
1135 Register name_reg, 1271 Register name_reg,
1136 Register scratch1, 1272 Register scratch1,
1137 Register scratch2, 1273 Register scratch2,
1138 Register scratch3, 1274 Register scratch3,
1139 String* name, 1275 String* name,
1140 Label* miss) { 1276 Label* miss) {
1141 ASSERT(interceptor_holder->HasNamedInterceptor()); 1277 ASSERT(interceptor_holder->HasNamedInterceptor());
1142 ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined()); 1278 ASSERT(!interceptor_holder->GetNamedInterceptor()->getter()->IsUndefined());
1143 1279
1144 // Check that the receiver isn't a smi. 1280 // Check that the receiver isn't a smi.
1145 __ BranchOnSmi(receiver, miss); 1281 __ JumpIfSmi(receiver, miss);
1146 1282
1147 // So far the most popular follow ups for interceptor loads are FIELD 1283 // So far the most popular follow ups for interceptor loads are FIELD
1148 // and CALLBACKS, so inline only them, other cases may be added 1284 // and CALLBACKS, so inline only them, other cases may be added
1149 // later. 1285 // later.
1150 bool compile_followup_inline = false; 1286 bool compile_followup_inline = false;
1151 if (lookup->IsProperty() && lookup->IsCacheable()) { 1287 if (lookup->IsProperty() && lookup->IsCacheable()) {
1152 if (lookup->type() == FIELD) { 1288 if (lookup->type() == FIELD) {
1153 compile_followup_inline = true; 1289 compile_followup_inline = true;
1154 } else if (lookup->type() == CALLBACKS && 1290 } else if (lookup->type() == CALLBACKS &&
1155 lookup->GetCallbackObject()->IsAccessorInfo() && 1291 lookup->GetCallbackObject()->IsAccessorInfo() &&
(...skipping 173 matching lines...) Expand 10 before | Expand all | Expand 10 after
1329 __ cmp(r4, r3); 1465 __ cmp(r4, r3);
1330 __ b(ne, miss); 1466 __ b(ne, miss);
1331 } else { 1467 } else {
1332 __ cmp(r1, Operand(Handle<JSFunction>(function))); 1468 __ cmp(r1, Operand(Handle<JSFunction>(function)));
1333 __ b(ne, miss); 1469 __ b(ne, miss);
1334 } 1470 }
1335 } 1471 }
1336 1472
1337 1473
1338 MaybeObject* CallStubCompiler::GenerateMissBranch() { 1474 MaybeObject* CallStubCompiler::GenerateMissBranch() {
1475 MaybeObject* maybe_obj = StubCache::ComputeCallMiss(arguments().immediate(),
1476 kind_);
1339 Object* obj; 1477 Object* obj;
1340 { MaybeObject* maybe_obj = 1478 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
1341 StubCache::ComputeCallMiss(arguments().immediate(), kind_);
1342 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
1343 }
1344 __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET); 1479 __ Jump(Handle<Code>(Code::cast(obj)), RelocInfo::CODE_TARGET);
1345 return obj; 1480 return obj;
1346 } 1481 }
1347 1482
1348 1483
1349 MaybeObject* CallStubCompiler::CompileCallField(JSObject* object, 1484 MaybeObject* CallStubCompiler::CompileCallField(JSObject* object,
1350 JSObject* holder, 1485 JSObject* holder,
1351 int index, 1486 int index,
1352 String* name) { 1487 String* name) {
1353 // ----------- S t a t e ------------- 1488 // ----------- S t a t e -------------
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
1404 1539
1405 GenerateNameCheck(name, &miss); 1540 GenerateNameCheck(name, &miss);
1406 1541
1407 Register receiver = r1; 1542 Register receiver = r1;
1408 1543
1409 // Get the receiver from the stack 1544 // Get the receiver from the stack
1410 const int argc = arguments().immediate(); 1545 const int argc = arguments().immediate();
1411 __ ldr(receiver, MemOperand(sp, argc * kPointerSize)); 1546 __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
1412 1547
1413 // Check that the receiver isn't a smi. 1548 // Check that the receiver isn't a smi.
1414 __ BranchOnSmi(receiver, &miss); 1549 __ JumpIfSmi(receiver, &miss);
1415 1550
1416 // Check that the maps haven't changed. 1551 // Check that the maps haven't changed.
1417 CheckPrototypes(JSObject::cast(object), receiver, 1552 CheckPrototypes(JSObject::cast(object), receiver,
1418 holder, r3, r0, r4, name, &miss); 1553 holder, r3, r0, r4, name, &miss);
1419 1554
1420 if (argc == 0) { 1555 if (argc == 0) {
1421 // Nothing to do, just return the length. 1556 // Nothing to do, just return the length.
1422 __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); 1557 __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset));
1423 __ Drop(argc + 1); 1558 __ Drop(argc + 1);
1424 __ Ret(); 1559 __ Ret();
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
1462 // We may need a register containing the address end_elements below, 1597 // We may need a register containing the address end_elements below,
1463 // so write back the value in end_elements. 1598 // so write back the value in end_elements.
1464 __ add(end_elements, elements, 1599 __ add(end_elements, elements,
1465 Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); 1600 Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize));
1466 const int kEndElementsOffset = 1601 const int kEndElementsOffset =
1467 FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize; 1602 FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize;
1468 __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); 1603 __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex));
1469 1604
1470 // Check for a smi. 1605 // Check for a smi.
1471 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER 1606 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER
1472 __ BranchOnNotSmi(r4, &with_write_barrier); 1607 __ JumpIfNotSmi(r4, &with_write_barrier);
1473 #endif 1608 #endif
1474 __ bind(&exit); 1609 __ bind(&exit);
1475 __ Drop(argc + 1); 1610 __ Drop(argc + 1);
1476 __ Ret(); 1611 __ Ret();
1477 1612
1478 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER 1613 #ifdef ENABLE_CARDMARKING_WRITE_BARRIER
1479 __ bind(&with_write_barrier); 1614 __ bind(&with_write_barrier);
1480 __ InNewSpace(elements, r4, eq, &exit); 1615 __ InNewSpace(elements, r4, eq, &exit);
1481 __ RecordWriteHelper(elements, end_elements, r4); 1616 __ RecordWriteHelper(elements, end_elements, r4);
1482 __ Drop(argc + 1); 1617 __ Drop(argc + 1);
(...skipping 89 matching lines...) Expand 10 before | Expand all | Expand 10 after
1572 Register receiver = r1; 1707 Register receiver = r1;
1573 Register elements = r3; 1708 Register elements = r3;
1574 1709
1575 GenerateNameCheck(name, &miss); 1710 GenerateNameCheck(name, &miss);
1576 1711
1577 // Get the receiver from the stack 1712 // Get the receiver from the stack
1578 const int argc = arguments().immediate(); 1713 const int argc = arguments().immediate();
1579 __ ldr(receiver, MemOperand(sp, argc * kPointerSize)); 1714 __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
1580 1715
1581 // Check that the receiver isn't a smi. 1716 // Check that the receiver isn't a smi.
1582 __ BranchOnSmi(receiver, &miss); 1717 __ JumpIfSmi(receiver, &miss);
1583 1718
1584 // Check that the maps haven't changed. 1719 // Check that the maps haven't changed.
1585 CheckPrototypes(JSObject::cast(object), 1720 CheckPrototypes(JSObject::cast(object),
1586 receiver, holder, elements, r4, r0, name, &miss); 1721 receiver, holder, elements, r4, r0, name, &miss);
1587 1722
1588 // Get the elements array of the object. 1723 // Get the elements array of the object.
1589 __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset)); 1724 __ ldr(elements, FieldMemOperand(receiver, JSArray::kElementsOffset));
1590 1725
1591 // Check that the elements are in fast mode and writable. 1726 // Check that the elements are in fast mode and writable.
1592 __ CheckMap(elements, r0, Heap::kFixedArrayMapRootIndex, &call_builtin, true); 1727 __ CheckMap(elements, r0, Heap::kFixedArrayMapRootIndex, &call_builtin, true);
(...skipping 57 matching lines...) Expand 10 before | Expand all | Expand 10 after
1650 // -- ... 1785 // -- ...
1651 // -- sp[argc * 4] : receiver 1786 // -- sp[argc * 4] : receiver
1652 // ----------------------------------- 1787 // -----------------------------------
1653 1788
1654 // If object is not a string, bail out to regular call. 1789 // If object is not a string, bail out to regular call.
1655 if (!object->IsString() || cell != NULL) return Heap::undefined_value(); 1790 if (!object->IsString() || cell != NULL) return Heap::undefined_value();
1656 1791
1657 const int argc = arguments().immediate(); 1792 const int argc = arguments().immediate();
1658 1793
1659 Label miss; 1794 Label miss;
1795 Label name_miss;
1660 Label index_out_of_range; 1796 Label index_out_of_range;
1661 GenerateNameCheck(name, &miss); 1797 Label* index_out_of_range_label = &index_out_of_range;
1798
1799 if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
1800 index_out_of_range_label = &miss;
1801 }
1802
1803 GenerateNameCheck(name, &name_miss);
1662 1804
1663 // Check that the maps starting from the prototype haven't changed. 1805 // Check that the maps starting from the prototype haven't changed.
1664 GenerateDirectLoadGlobalFunctionPrototype(masm(), 1806 GenerateDirectLoadGlobalFunctionPrototype(masm(),
1665 Context::STRING_FUNCTION_INDEX, 1807 Context::STRING_FUNCTION_INDEX,
1666 r0, 1808 r0,
1667 &miss); 1809 &miss);
1668 ASSERT(object != holder); 1810 ASSERT(object != holder);
1669 CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, 1811 CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
1670 r1, r3, r4, name, &miss); 1812 r1, r3, r4, name, &miss);
1671 1813
1672 Register receiver = r1; 1814 Register receiver = r1;
1673 Register index = r4; 1815 Register index = r4;
1674 Register scratch = r3; 1816 Register scratch = r3;
1675 Register result = r0; 1817 Register result = r0;
1676 __ ldr(receiver, MemOperand(sp, argc * kPointerSize)); 1818 __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
1677 if (argc > 0) { 1819 if (argc > 0) {
1678 __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize)); 1820 __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
1679 } else { 1821 } else {
1680 __ LoadRoot(index, Heap::kUndefinedValueRootIndex); 1822 __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
1681 } 1823 }
1682 1824
1683 StringCharCodeAtGenerator char_code_at_generator(receiver, 1825 StringCharCodeAtGenerator char_code_at_generator(receiver,
1684 index, 1826 index,
1685 scratch, 1827 scratch,
1686 result, 1828 result,
1687 &miss, // When not a string. 1829 &miss, // When not a string.
1688 &miss, // When not a number. 1830 &miss, // When not a number.
1689 &index_out_of_range, 1831 index_out_of_range_label,
1690 STRING_INDEX_IS_NUMBER); 1832 STRING_INDEX_IS_NUMBER);
1691 char_code_at_generator.GenerateFast(masm()); 1833 char_code_at_generator.GenerateFast(masm());
1692 __ Drop(argc + 1); 1834 __ Drop(argc + 1);
1693 __ Ret(); 1835 __ Ret();
1694 1836
1695 StubRuntimeCallHelper call_helper; 1837 StubRuntimeCallHelper call_helper;
1696 char_code_at_generator.GenerateSlow(masm(), call_helper); 1838 char_code_at_generator.GenerateSlow(masm(), call_helper);
1697 1839
1698 __ bind(&index_out_of_range); 1840 if (index_out_of_range.is_linked()) {
1699 __ LoadRoot(r0, Heap::kNanValueRootIndex); 1841 __ bind(&index_out_of_range);
1700 __ Drop(argc + 1); 1842 __ LoadRoot(r0, Heap::kNanValueRootIndex);
1701 __ Ret(); 1843 __ Drop(argc + 1);
1844 __ Ret();
1845 }
1702 1846
1703 __ bind(&miss); 1847 __ bind(&miss);
1848 // Restore function name in r2.
1849 __ Move(r2, Handle<String>(name));
1850 __ bind(&name_miss);
1704 Object* obj; 1851 Object* obj;
1705 { MaybeObject* maybe_obj = GenerateMissBranch(); 1852 { MaybeObject* maybe_obj = GenerateMissBranch();
1706 if (!maybe_obj->ToObject(&obj)) return maybe_obj; 1853 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
1707 } 1854 }
1708 1855
1709 // Return the generated code. 1856 // Return the generated code.
1710 return GetCode(function); 1857 return GetCode(function);
1711 } 1858 }
1712 1859
1713 1860
(...skipping 10 matching lines...) Expand all
1724 // -- ... 1871 // -- ...
1725 // -- sp[argc * 4] : receiver 1872 // -- sp[argc * 4] : receiver
1726 // ----------------------------------- 1873 // -----------------------------------
1727 1874
1728 // If object is not a string, bail out to regular call. 1875 // If object is not a string, bail out to regular call.
1729 if (!object->IsString() || cell != NULL) return Heap::undefined_value(); 1876 if (!object->IsString() || cell != NULL) return Heap::undefined_value();
1730 1877
1731 const int argc = arguments().immediate(); 1878 const int argc = arguments().immediate();
1732 1879
1733 Label miss; 1880 Label miss;
1881 Label name_miss;
1734 Label index_out_of_range; 1882 Label index_out_of_range;
1883 Label* index_out_of_range_label = &index_out_of_range;
1735 1884
1736 GenerateNameCheck(name, &miss); 1885 if (kind_ == Code::CALL_IC && extra_ic_state_ == DEFAULT_STRING_STUB) {
1886 index_out_of_range_label = &miss;
1887 }
1888
1889 GenerateNameCheck(name, &name_miss);
1737 1890
1738 // Check that the maps starting from the prototype haven't changed. 1891 // Check that the maps starting from the prototype haven't changed.
1739 GenerateDirectLoadGlobalFunctionPrototype(masm(), 1892 GenerateDirectLoadGlobalFunctionPrototype(masm(),
1740 Context::STRING_FUNCTION_INDEX, 1893 Context::STRING_FUNCTION_INDEX,
1741 r0, 1894 r0,
1742 &miss); 1895 &miss);
1743 ASSERT(object != holder); 1896 ASSERT(object != holder);
1744 CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder, 1897 CheckPrototypes(JSObject::cast(object->GetPrototype()), r0, holder,
1745 r1, r3, r4, name, &miss); 1898 r1, r3, r4, name, &miss);
1746 1899
1747 Register receiver = r0; 1900 Register receiver = r0;
1748 Register index = r4; 1901 Register index = r4;
1749 Register scratch1 = r1; 1902 Register scratch1 = r1;
1750 Register scratch2 = r3; 1903 Register scratch2 = r3;
1751 Register result = r0; 1904 Register result = r0;
1752 __ ldr(receiver, MemOperand(sp, argc * kPointerSize)); 1905 __ ldr(receiver, MemOperand(sp, argc * kPointerSize));
1753 if (argc > 0) { 1906 if (argc > 0) {
1754 __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize)); 1907 __ ldr(index, MemOperand(sp, (argc - 1) * kPointerSize));
1755 } else { 1908 } else {
1756 __ LoadRoot(index, Heap::kUndefinedValueRootIndex); 1909 __ LoadRoot(index, Heap::kUndefinedValueRootIndex);
1757 } 1910 }
1758 1911
1759 StringCharAtGenerator char_at_generator(receiver, 1912 StringCharAtGenerator char_at_generator(receiver,
1760 index, 1913 index,
1761 scratch1, 1914 scratch1,
1762 scratch2, 1915 scratch2,
1763 result, 1916 result,
1764 &miss, // When not a string. 1917 &miss, // When not a string.
1765 &miss, // When not a number. 1918 &miss, // When not a number.
1766 &index_out_of_range, 1919 index_out_of_range_label,
1767 STRING_INDEX_IS_NUMBER); 1920 STRING_INDEX_IS_NUMBER);
1768 char_at_generator.GenerateFast(masm()); 1921 char_at_generator.GenerateFast(masm());
1769 __ Drop(argc + 1); 1922 __ Drop(argc + 1);
1770 __ Ret(); 1923 __ Ret();
1771 1924
1772 StubRuntimeCallHelper call_helper; 1925 StubRuntimeCallHelper call_helper;
1773 char_at_generator.GenerateSlow(masm(), call_helper); 1926 char_at_generator.GenerateSlow(masm(), call_helper);
1774 1927
1775 __ bind(&index_out_of_range); 1928 if (index_out_of_range.is_linked()) {
1776 __ LoadRoot(r0, Heap::kEmptyStringRootIndex); 1929 __ bind(&index_out_of_range);
1777 __ Drop(argc + 1); 1930 __ LoadRoot(r0, Heap::kEmptyStringRootIndex);
1778 __ Ret(); 1931 __ Drop(argc + 1);
1932 __ Ret();
1933 }
1779 1934
1780 __ bind(&miss); 1935 __ bind(&miss);
1936 // Restore function name in r2.
1937 __ Move(r2, Handle<String>(name));
1938 __ bind(&name_miss);
1781 Object* obj; 1939 Object* obj;
1782 { MaybeObject* maybe_obj = GenerateMissBranch(); 1940 { MaybeObject* maybe_obj = GenerateMissBranch();
1783 if (!maybe_obj->ToObject(&obj)) return maybe_obj; 1941 if (!maybe_obj->ToObject(&obj)) return maybe_obj;
1784 } 1942 }
1785 1943
1786 // Return the generated code. 1944 // Return the generated code.
1787 return GetCode(function); 1945 return GetCode(function);
1788 } 1946 }
1789 1947
1790 1948
(...skipping 95 matching lines...) Expand 10 before | Expand all | Expand 10 after
1886 // arguments, bail out to the regular call. 2044 // arguments, bail out to the regular call.
1887 if (!object->IsJSObject() || argc != 1) return Heap::undefined_value(); 2045 if (!object->IsJSObject() || argc != 1) return Heap::undefined_value();
1888 2046
1889 Label miss, slow; 2047 Label miss, slow;
1890 GenerateNameCheck(name, &miss); 2048 GenerateNameCheck(name, &miss);
1891 2049
1892 if (cell == NULL) { 2050 if (cell == NULL) {
1893 __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); 2051 __ ldr(r1, MemOperand(sp, 1 * kPointerSize));
1894 2052
1895 STATIC_ASSERT(kSmiTag == 0); 2053 STATIC_ASSERT(kSmiTag == 0);
1896 __ BranchOnSmi(r1, &miss); 2054 __ JumpIfSmi(r1, &miss);
1897 2055
1898 CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name, 2056 CheckPrototypes(JSObject::cast(object), r1, holder, r0, r3, r4, name,
1899 &miss); 2057 &miss);
1900 } else { 2058 } else {
1901 ASSERT(cell->value() == function); 2059 ASSERT(cell->value() == function);
1902 GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss); 2060 GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
1903 GenerateLoadFunctionFromCell(cell, function, &miss); 2061 GenerateLoadFunctionFromCell(cell, function, &miss);
1904 } 2062 }
1905 2063
1906 // Load the (only) argument into r0. 2064 // Load the (only) argument into r0.
(...skipping 22 matching lines...) Expand all
1929 2087
1930 // Backup FPSCR. 2088 // Backup FPSCR.
1931 __ vmrs(r3); 2089 __ vmrs(r3);
1932 // Set custom FPCSR: 2090 // Set custom FPCSR:
1933 // - Set rounding mode to "Round towards Minus Infinity" 2091 // - Set rounding mode to "Round towards Minus Infinity"
1934 // (ie bits [23:22] = 0b10). 2092 // (ie bits [23:22] = 0b10).
1935 // - Clear vfp cumulative exception flags (bits [3:0]). 2093 // - Clear vfp cumulative exception flags (bits [3:0]).
1936 // - Make sure Flush-to-zero mode control bit is unset (bit 22). 2094 // - Make sure Flush-to-zero mode control bit is unset (bit 22).
1937 __ bic(r9, r3, 2095 __ bic(r9, r3,
1938 Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask)); 2096 Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask));
1939 __ orr(r9, r9, Operand(kVFPRoundToMinusInfinityBits)); 2097 __ orr(r9, r9, Operand(kRoundToMinusInf));
1940 __ vmsr(r9); 2098 __ vmsr(r9);
1941 2099
1942 // Convert the argument to an integer. 2100 // Convert the argument to an integer.
1943 __ vcvt_s32_f64(s0, d1, Assembler::FPSCRRounding, al); 2101 __ vcvt_s32_f64(s0, d1, kFPSCRRounding);
1944 2102
1945 // Use vcvt latency to start checking for special cases. 2103 // Use vcvt latency to start checking for special cases.
1946 // Get the argument exponent and clear the sign bit. 2104 // Get the argument exponent and clear the sign bit.
1947 __ bic(r6, r5, Operand(HeapNumber::kSignMask)); 2105 __ bic(r6, r5, Operand(HeapNumber::kSignMask));
1948 __ mov(r6, Operand(r6, LSR, HeapNumber::kMantissaBitsInTopWord)); 2106 __ mov(r6, Operand(r6, LSR, HeapNumber::kMantissaBitsInTopWord));
1949 2107
1950 // Retrieve FPSCR and check for vfp exceptions. 2108 // Retrieve FPSCR and check for vfp exceptions.
1951 __ vmrs(r9); 2109 __ vmrs(r9);
1952 __ tst(r9, Operand(kVFPExceptionMask)); 2110 __ tst(r9, Operand(kVFPExceptionMask));
1953 __ b(&no_vfp_exception, eq); 2111 __ b(&no_vfp_exception, eq);
1954 2112
1955 // Check for NaN, Infinity, and -Infinity. 2113 // Check for NaN, Infinity, and -Infinity.
1956 // They are invariant through a Math.Floor call, so just 2114 // They are invariant through a Math.Floor call, so just
1957 // return the original argument. 2115 // return the original argument.
1958 __ sub(r7, r6, Operand(HeapNumber::kExponentMask 2116 __ sub(r7, r6, Operand(HeapNumber::kExponentMask
1959 >> HeapNumber::kMantissaBitsInTopWord), SetCC); 2117 >> HeapNumber::kMantissaBitsInTopWord), SetCC);
1960 __ b(&restore_fpscr_and_return, eq); 2118 __ b(&restore_fpscr_and_return, eq);
1961 // We had an overflow or underflow in the conversion. Check if we 2119 // We had an overflow or underflow in the conversion. Check if we
1962 // have a big exponent. 2120 // have a big exponent.
1963 __ cmp(r7, Operand(HeapNumber::kMantissaBits)); 2121 __ cmp(r7, Operand(HeapNumber::kMantissaBits));
1964 // If greater or equal, the argument is already round and in r0. 2122 // If greater or equal, the argument is already round and in r0.
1965 __ b(&restore_fpscr_and_return, ge); 2123 __ b(&restore_fpscr_and_return, ge);
1966 __ b(&slow); 2124 __ b(&wont_fit_smi);
1967 2125
1968 __ bind(&no_vfp_exception); 2126 __ bind(&no_vfp_exception);
1969 // Move the result back to general purpose register r0. 2127 // Move the result back to general purpose register r0.
1970 __ vmov(r0, s0); 2128 __ vmov(r0, s0);
1971 // Check if the result fits into a smi. 2129 // Check if the result fits into a smi.
1972 __ add(r1, r0, Operand(0x40000000), SetCC); 2130 __ add(r1, r0, Operand(0x40000000), SetCC);
1973 __ b(&wont_fit_smi, mi); 2131 __ b(&wont_fit_smi, mi);
1974 // Tag the result. 2132 // Tag the result.
1975 STATIC_ASSERT(kSmiTag == 0); 2133 STATIC_ASSERT(kSmiTag == 0);
1976 __ mov(r0, Operand(r0, LSL, kSmiTagSize)); 2134 __ mov(r0, Operand(r0, LSL, kSmiTagSize));
1977 2135
1978 // Check for -0. 2136 // Check for -0.
1979 __ cmp(r0, Operand(0)); 2137 __ cmp(r0, Operand(0, RelocInfo::NONE));
1980 __ b(&restore_fpscr_and_return, ne); 2138 __ b(&restore_fpscr_and_return, ne);
1981 // r5 already holds the HeapNumber exponent. 2139 // r5 already holds the HeapNumber exponent.
1982 __ tst(r5, Operand(HeapNumber::kSignMask)); 2140 __ tst(r5, Operand(HeapNumber::kSignMask));
1983 // If our HeapNumber is negative it was -0, so load its address and return. 2141 // If our HeapNumber is negative it was -0, so load its address and return.
1984 // Else r0 is loaded with 0, so we can also just return. 2142 // Else r0 is loaded with 0, so we can also just return.
1985 __ ldr(r0, MemOperand(sp, 0 * kPointerSize), ne); 2143 __ ldr(r0, MemOperand(sp, 0 * kPointerSize), ne);
1986 2144
1987 __ bind(&restore_fpscr_and_return); 2145 __ bind(&restore_fpscr_and_return);
1988 // Restore FPSCR and return. 2146 // Restore FPSCR and return.
1989 __ vmsr(r3); 2147 __ vmsr(r3);
1990 __ Drop(argc + 1); 2148 __ Drop(argc + 1);
1991 __ Ret(); 2149 __ Ret();
1992 2150
1993 __ bind(&wont_fit_smi); 2151 __ bind(&wont_fit_smi);
1994 __ bind(&slow);
1995 // Restore FPCSR and fall to slow case. 2152 // Restore FPCSR and fall to slow case.
1996 __ vmsr(r3); 2153 __ vmsr(r3);
1997 2154
2155 __ bind(&slow);
1998 // Tail call the full function. We do not have to patch the receiver 2156 // Tail call the full function. We do not have to patch the receiver
1999 // because the function makes no use of it. 2157 // because the function makes no use of it.
2000 __ InvokeFunction(function, arguments(), JUMP_FUNCTION); 2158 __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
2001 2159
2002 __ bind(&miss); 2160 __ bind(&miss);
2003 // r2: function name. 2161 // r2: function name.
2004 MaybeObject* obj = GenerateMissBranch(); 2162 MaybeObject* obj = GenerateMissBranch();
2005 if (obj->IsFailure()) return obj; 2163 if (obj->IsFailure()) return obj;
2006 2164
2007 // Return the generated code. 2165 // Return the generated code.
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
2045 GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss); 2203 GenerateGlobalReceiverCheck(JSObject::cast(object), holder, name, &miss);
2046 GenerateLoadFunctionFromCell(cell, function, &miss); 2204 GenerateLoadFunctionFromCell(cell, function, &miss);
2047 } 2205 }
2048 2206
2049 // Load the (only) argument into r0. 2207 // Load the (only) argument into r0.
2050 __ ldr(r0, MemOperand(sp, 0 * kPointerSize)); 2208 __ ldr(r0, MemOperand(sp, 0 * kPointerSize));
2051 2209
2052 // Check if the argument is a smi. 2210 // Check if the argument is a smi.
2053 Label not_smi; 2211 Label not_smi;
2054 STATIC_ASSERT(kSmiTag == 0); 2212 STATIC_ASSERT(kSmiTag == 0);
2055 __ BranchOnNotSmi(r0, &not_smi); 2213 __ JumpIfNotSmi(r0, &not_smi);
2056 2214
2057 // Do bitwise not or do nothing depending on the sign of the 2215 // Do bitwise not or do nothing depending on the sign of the
2058 // argument. 2216 // argument.
2059 __ eor(r1, r0, Operand(r0, ASR, kBitsPerInt - 1)); 2217 __ eor(r1, r0, Operand(r0, ASR, kBitsPerInt - 1));
2060 2218
2061 // Add 1 or do nothing depending on the sign of the argument. 2219 // Add 1 or do nothing depending on the sign of the argument.
2062 __ sub(r0, r1, Operand(r0, ASR, kBitsPerInt - 1), SetCC); 2220 __ sub(r0, r1, Operand(r0, ASR, kBitsPerInt - 1), SetCC);
2063 2221
2064 // If the result is still negative, go to the slow case. 2222 // If the result is still negative, go to the slow case.
2065 // This only happens for the most negative smi. 2223 // This only happens for the most negative smi.
(...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after
2242 r1, r4, name, &miss); 2400 r1, r4, name, &miss);
2243 } 2401 }
2244 break; 2402 break;
2245 } 2403 }
2246 2404
2247 default: 2405 default:
2248 UNREACHABLE(); 2406 UNREACHABLE();
2249 } 2407 }
2250 2408
2251 if (depth != kInvalidProtoDepth) { 2409 if (depth != kInvalidProtoDepth) {
2252 GenerateFastApiCall(masm(), optimization, argc); 2410 MaybeObject* result = GenerateFastApiDirectCall(masm(), optimization, argc);
2411 if (result->IsFailure()) return result;
2253 } else { 2412 } else {
2254 __ InvokeFunction(function, arguments(), JUMP_FUNCTION); 2413 __ InvokeFunction(function, arguments(), JUMP_FUNCTION);
2255 } 2414 }
2256 2415
2257 // Handle call cache miss. 2416 // Handle call cache miss.
2258 __ bind(&miss); 2417 __ bind(&miss);
2259 if (depth != kInvalidProtoDepth) { 2418 if (depth != kInvalidProtoDepth) {
2260 FreeSpaceForFastApiCall(masm()); 2419 FreeSpaceForFastApiCall(masm());
2261 } 2420 }
2262 2421
(...skipping 23 matching lines...) Expand all
2286 // Get the number of arguments. 2445 // Get the number of arguments.
2287 const int argc = arguments().immediate(); 2446 const int argc = arguments().immediate();
2288 2447
2289 LookupResult lookup; 2448 LookupResult lookup;
2290 LookupPostInterceptor(holder, name, &lookup); 2449 LookupPostInterceptor(holder, name, &lookup);
2291 2450
2292 // Get the receiver from the stack. 2451 // Get the receiver from the stack.
2293 __ ldr(r1, MemOperand(sp, argc * kPointerSize)); 2452 __ ldr(r1, MemOperand(sp, argc * kPointerSize));
2294 2453
2295 CallInterceptorCompiler compiler(this, arguments(), r2); 2454 CallInterceptorCompiler compiler(this, arguments(), r2);
2296 compiler.Compile(masm(), 2455 MaybeObject* result = compiler.Compile(masm(),
2297 object, 2456 object,
2298 holder, 2457 holder,
2299 name, 2458 name,
2300 &lookup, 2459 &lookup,
2301 r1, 2460 r1,
2302 r3, 2461 r3,
2303 r4, 2462 r4,
2304 r0, 2463 r0,
2305 &miss); 2464 &miss);
2465 if (result->IsFailure()) {
2466 return result;
2467 }
2306 2468
2307 // Move returned value, the function to call, to r1. 2469 // Move returned value, the function to call, to r1.
2308 __ mov(r1, r0); 2470 __ mov(r1, r0);
2309 // Restore receiver. 2471 // Restore receiver.
2310 __ ldr(r0, MemOperand(sp, argc * kPointerSize)); 2472 __ ldr(r0, MemOperand(sp, argc * kPointerSize));
2311 2473
2312 GenerateCallFunction(masm(), object, arguments(), &miss); 2474 GenerateCallFunction(masm(), object, arguments(), &miss);
2313 2475
2314 // Handle call cache miss. 2476 // Handle call cache miss.
2315 __ bind(&miss); 2477 __ bind(&miss);
(...skipping 207 matching lines...) Expand 10 before | Expand all | Expand 10 after
2523 // -- r2 : name 2685 // -- r2 : name
2524 // -- lr : return address 2686 // -- lr : return address
2525 // ----------------------------------- 2687 // -----------------------------------
2526 Label miss; 2688 Label miss;
2527 2689
2528 // Check that the map of the global has not changed. 2690 // Check that the map of the global has not changed.
2529 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); 2691 __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset));
2530 __ cmp(r3, Operand(Handle<Map>(object->map()))); 2692 __ cmp(r3, Operand(Handle<Map>(object->map())));
2531 __ b(ne, &miss); 2693 __ b(ne, &miss);
2532 2694
2695 // Check that the value in the cell is not the hole. If it is, this
2696 // cell could have been deleted and reintroducing the global needs
2697 // to update the property details in the property dictionary of the
2698 // global object. We bail out to the runtime system to do that.
2699 __ mov(r4, Operand(Handle<JSGlobalPropertyCell>(cell)));
2700 __ LoadRoot(r5, Heap::kTheHoleValueRootIndex);
2701 __ ldr(r6, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
2702 __ cmp(r5, r6);
2703 __ b(eq, &miss);
2704
2533 // Store the value in the cell. 2705 // Store the value in the cell.
2534 __ mov(r2, Operand(Handle<JSGlobalPropertyCell>(cell))); 2706 __ str(r0, FieldMemOperand(r4, JSGlobalPropertyCell::kValueOffset));
2535 __ str(r0, FieldMemOperand(r2, JSGlobalPropertyCell::kValueOffset));
2536 2707
2537 __ IncrementCounter(&Counters::named_store_global_inline, 1, r4, r3); 2708 __ IncrementCounter(&Counters::named_store_global_inline, 1, r4, r3);
2538 __ Ret(); 2709 __ Ret();
2539 2710
2540 // Handle store cache miss. 2711 // Handle store cache miss.
2541 __ bind(&miss); 2712 __ bind(&miss);
2542 __ IncrementCounter(&Counters::named_store_global_inline_miss, 1, r4, r3); 2713 __ IncrementCounter(&Counters::named_store_global_inline_miss, 1, r4, r3);
2543 Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss)); 2714 Handle<Code> ic(Builtins::builtin(Builtins::StoreIC_Miss));
2544 __ Jump(ic, RelocInfo::CODE_TARGET); 2715 __ Jump(ic, RelocInfo::CODE_TARGET);
2545 2716
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after
2615 JSObject* object, 2786 JSObject* object,
2616 JSObject* holder, 2787 JSObject* holder,
2617 AccessorInfo* callback) { 2788 AccessorInfo* callback) {
2618 // ----------- S t a t e ------------- 2789 // ----------- S t a t e -------------
2619 // -- r0 : receiver 2790 // -- r0 : receiver
2620 // -- r2 : name 2791 // -- r2 : name
2621 // -- lr : return address 2792 // -- lr : return address
2622 // ----------------------------------- 2793 // -----------------------------------
2623 Label miss; 2794 Label miss;
2624 2795
2625 Failure* failure = Failure::InternalError(); 2796 MaybeObject* result = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4,
2626 bool success = GenerateLoadCallback(object, holder, r0, r2, r3, r1, r4, 2797 callback, name, &miss);
2627 callback, name, &miss, &failure); 2798 if (result->IsFailure()) {
2628 if (!success) {
2629 miss.Unuse(); 2799 miss.Unuse();
2630 return failure; 2800 return result;
2631 } 2801 }
2632 2802
2633 __ bind(&miss); 2803 __ bind(&miss);
2634 GenerateLoadMiss(masm(), Code::LOAD_IC); 2804 GenerateLoadMiss(masm(), Code::LOAD_IC);
2635 2805
2636 // Return the generated code. 2806 // Return the generated code.
2637 return GetCode(CALLBACKS, name); 2807 return GetCode(CALLBACKS, name);
2638 } 2808 }
2639 2809
2640 2810
(...skipping 126 matching lines...) Expand 10 before | Expand all | Expand 10 after
2767 // -- lr : return address 2937 // -- lr : return address
2768 // -- r0 : key 2938 // -- r0 : key
2769 // -- r1 : receiver 2939 // -- r1 : receiver
2770 // ----------------------------------- 2940 // -----------------------------------
2771 Label miss; 2941 Label miss;
2772 2942
2773 // Check the key is the cached one. 2943 // Check the key is the cached one.
2774 __ cmp(r0, Operand(Handle<String>(name))); 2944 __ cmp(r0, Operand(Handle<String>(name)));
2775 __ b(ne, &miss); 2945 __ b(ne, &miss);
2776 2946
2777 Failure* failure = Failure::InternalError(); 2947 MaybeObject* result = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3,
2778 bool success = GenerateLoadCallback(receiver, holder, r1, r0, r2, r3, r4, 2948 r4, callback, name, &miss);
2779 callback, name, &miss, &failure); 2949 if (result->IsFailure()) {
2780 if (!success) {
2781 miss.Unuse(); 2950 miss.Unuse();
2782 return failure; 2951 return result;
2783 } 2952 }
2784 2953
2785 __ bind(&miss); 2954 __ bind(&miss);
2786 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); 2955 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
2787 2956
2788 return GetCode(CALLBACKS, name); 2957 return GetCode(CALLBACKS, name);
2789 } 2958 }
2790 2959
2791 2960
2792 MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name, 2961 MaybeObject* KeyedLoadStubCompiler::CompileLoadConstant(String* name,
(...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after
2872 // -- r0 : key 3041 // -- r0 : key
2873 // -- r1 : receiver 3042 // -- r1 : receiver
2874 // ----------------------------------- 3043 // -----------------------------------
2875 Label miss; 3044 Label miss;
2876 __ IncrementCounter(&Counters::keyed_load_string_length, 1, r2, r3); 3045 __ IncrementCounter(&Counters::keyed_load_string_length, 1, r2, r3);
2877 3046
2878 // Check the key is the cached one. 3047 // Check the key is the cached one.
2879 __ cmp(r0, Operand(Handle<String>(name))); 3048 __ cmp(r0, Operand(Handle<String>(name)));
2880 __ b(ne, &miss); 3049 __ b(ne, &miss);
2881 3050
2882 GenerateLoadStringLength(masm(), r1, r2, r3, &miss); 3051 GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true);
2883 __ bind(&miss); 3052 __ bind(&miss);
2884 __ DecrementCounter(&Counters::keyed_load_string_length, 1, r2, r3); 3053 __ DecrementCounter(&Counters::keyed_load_string_length, 1, r2, r3);
2885 3054
2886 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); 3055 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
2887 3056
2888 return GetCode(CALLBACKS, name); 3057 return GetCode(CALLBACKS, name);
2889 } 3058 }
2890 3059
2891 3060
2892 MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) { 3061 MaybeObject* KeyedLoadStubCompiler::CompileLoadFunctionPrototype(String* name) {
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after
2954 __ Ret(); 3123 __ Ret();
2955 3124
2956 __ bind(&miss); 3125 __ bind(&miss);
2957 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); 3126 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC);
2958 3127
2959 // Return the generated code. 3128 // Return the generated code.
2960 return GetCode(NORMAL, NULL); 3129 return GetCode(NORMAL, NULL);
2961 } 3130 }
2962 3131
2963 3132
3133 MaybeObject* KeyedLoadStubCompiler::CompileLoadPixelArray(JSObject* receiver) {
3134 // ----------- S t a t e -------------
3135 // -- lr : return address
3136 // -- r0 : key
3137 // -- r1 : receiver
3138 // -----------------------------------
3139 Label miss;
3140
3141 // Check that the map matches.
3142 __ CheckMap(r1, r2, Handle<Map>(receiver->map()), &miss, false);
3143
3144 GenerateFastPixelArrayLoad(masm(),
3145 r1,
3146 r0,
3147 r2,
3148 r3,
3149 r4,
3150 r5,
3151 r0,
3152 &miss,
3153 &miss,
3154 &miss);
3155
3156 __ bind(&miss);
3157 Handle<Code> ic(Builtins::builtin(Builtins::KeyedLoadIC_Miss));
3158 __ Jump(ic, RelocInfo::CODE_TARGET);
3159
3160 // Return the generated code.
3161 return GetCode(NORMAL, NULL);
3162 }
3163
3164
2964 MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object, 3165 MaybeObject* KeyedStoreStubCompiler::CompileStoreField(JSObject* object,
2965 int index, 3166 int index,
2966 Map* transition, 3167 Map* transition,
2967 String* name) { 3168 String* name) {
2968 // ----------- S t a t e ------------- 3169 // ----------- S t a t e -------------
2969 // -- r0 : value 3170 // -- r0 : value
2970 // -- r1 : name 3171 // -- r1 : name
2971 // -- r2 : receiver 3172 // -- r2 : receiver
2972 // -- lr : return address 3173 // -- lr : return address
2973 // ----------------------------------- 3174 // -----------------------------------
(...skipping 90 matching lines...) Expand 10 before | Expand all | Expand 10 after
3064 3265
3065 __ bind(&miss); 3266 __ bind(&miss);
3066 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss)); 3267 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
3067 __ Jump(ic, RelocInfo::CODE_TARGET); 3268 __ Jump(ic, RelocInfo::CODE_TARGET);
3068 3269
3069 // Return the generated code. 3270 // Return the generated code.
3070 return GetCode(NORMAL, NULL); 3271 return GetCode(NORMAL, NULL);
3071 } 3272 }
3072 3273
3073 3274
3275 MaybeObject* KeyedStoreStubCompiler::CompileStorePixelArray(
3276 JSObject* receiver) {
3277 // ----------- S t a t e -------------
3278 // -- r0 : value
3279 // -- r1 : key
3280 // -- r2 : receiver
3281 // -- r3 : scratch
3282 // -- r4 : scratch
3283 // -- r5 : scratch
3284 // -- r6 : scratch
3285 // -- lr : return address
3286 // -----------------------------------
3287 Label miss;
3288
3289 // Check that the map matches.
3290 __ CheckMap(r2, r6, Handle<Map>(receiver->map()), &miss, false);
3291
3292 GenerateFastPixelArrayStore(masm(),
3293 r2,
3294 r1,
3295 r0,
3296 r3,
3297 r4,
3298 r5,
3299 r6,
3300 true,
3301 true,
3302 &miss,
3303 &miss,
3304 NULL,
3305 &miss);
3306
3307 __ bind(&miss);
3308 Handle<Code> ic(Builtins::builtin(Builtins::KeyedStoreIC_Miss));
3309 __ Jump(ic, RelocInfo::CODE_TARGET);
3310
3311 // Return the generated code.
3312 return GetCode(NORMAL, NULL);
3313 }
3314
3315
3074 MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) { 3316 MaybeObject* ConstructStubCompiler::CompileConstructStub(JSFunction* function) {
3075 // ----------- S t a t e ------------- 3317 // ----------- S t a t e -------------
3076 // -- r0 : argc 3318 // -- r0 : argc
3077 // -- r1 : constructor 3319 // -- r1 : constructor
3078 // -- lr : return address 3320 // -- lr : return address
3079 // -- [sp] : last argument 3321 // -- [sp] : last argument
3080 // ----------------------------------- 3322 // -----------------------------------
3081 Label generic_stub_call; 3323 Label generic_stub_call;
3082 3324
3083 // Use r7 for holding undefined which is used in several places below. 3325 // Use r7 for holding undefined which is used in several places below.
(...skipping 124 matching lines...) Expand 10 before | Expand all | Expand 10 after
3208 __ bind(&generic_stub_call); 3450 __ bind(&generic_stub_call);
3209 Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric); 3451 Code* code = Builtins::builtin(Builtins::JSConstructStubGeneric);
3210 Handle<Code> generic_construct_stub(code); 3452 Handle<Code> generic_construct_stub(code);
3211 __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET); 3453 __ Jump(generic_construct_stub, RelocInfo::CODE_TARGET);
3212 3454
3213 // Return the generated code. 3455 // Return the generated code.
3214 return GetCode(); 3456 return GetCode();
3215 } 3457 }
3216 3458
3217 3459
3460 static bool IsElementTypeSigned(ExternalArrayType array_type) {
3461 switch (array_type) {
3462 case kExternalByteArray:
3463 case kExternalShortArray:
3464 case kExternalIntArray:
3465 return true;
3466
3467 case kExternalUnsignedByteArray:
3468 case kExternalUnsignedShortArray:
3469 case kExternalUnsignedIntArray:
3470 return false;
3471
3472 default:
3473 UNREACHABLE();
3474 return false;
3475 }
3476 }
3477
3478
3479 MaybeObject* ExternalArrayStubCompiler::CompileKeyedLoadStub(
3480 ExternalArrayType array_type, Code::Flags flags) {
3481 // ---------- S t a t e --------------
3482 // -- lr : return address
3483 // -- r0 : key
3484 // -- r1 : receiver
3485 // -----------------------------------
3486 Label slow, failed_allocation;
3487
3488 Register key = r0;
3489 Register receiver = r1;
3490
3491 // Check that the object isn't a smi
3492 __ JumpIfSmi(receiver, &slow);
3493
3494 // Check that the key is a smi.
3495 __ JumpIfNotSmi(key, &slow);
3496
3497 // Check that the object is a JS object. Load map into r2.
3498 __ CompareObjectType(receiver, r2, r3, FIRST_JS_OBJECT_TYPE);
3499 __ b(lt, &slow);
3500
3501 // Check that the receiver does not require access checks. We need
3502 // to check this explicitly since this generic stub does not perform
3503 // map checks.
3504 __ ldrb(r3, FieldMemOperand(r2, Map::kBitFieldOffset));
3505 __ tst(r3, Operand(1 << Map::kIsAccessCheckNeeded));
3506 __ b(ne, &slow);
3507
3508 // Check that the elements array is the appropriate type of
3509 // ExternalArray.
3510 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
3511 __ ldr(r2, FieldMemOperand(r3, HeapObject::kMapOffset));
3512 __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
3513 __ cmp(r2, ip);
3514 __ b(ne, &slow);
3515
3516 // Check that the index is in range.
3517 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
3518 __ cmp(ip, Operand(key, ASR, kSmiTagSize));
3519 // Unsigned comparison catches both negative and too-large values.
3520 __ b(lo, &slow);
3521
3522 // r3: elements array
3523 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
3524 // r3: base pointer of external storage
3525
3526 // We are not untagging smi key and instead work with it
3527 // as if it was premultiplied by 2.
3528 ASSERT((kSmiTag == 0) && (kSmiTagSize == 1));
3529
3530 Register value = r2;
3531 switch (array_type) {
3532 case kExternalByteArray:
3533 __ ldrsb(value, MemOperand(r3, key, LSR, 1));
3534 break;
3535 case kExternalUnsignedByteArray:
3536 __ ldrb(value, MemOperand(r3, key, LSR, 1));
3537 break;
3538 case kExternalShortArray:
3539 __ ldrsh(value, MemOperand(r3, key, LSL, 0));
3540 break;
3541 case kExternalUnsignedShortArray:
3542 __ ldrh(value, MemOperand(r3, key, LSL, 0));
3543 break;
3544 case kExternalIntArray:
3545 case kExternalUnsignedIntArray:
3546 __ ldr(value, MemOperand(r3, key, LSL, 1));
3547 break;
3548 case kExternalFloatArray:
3549 if (CpuFeatures::IsSupported(VFP3)) {
3550 CpuFeatures::Scope scope(VFP3);
3551 __ add(r2, r3, Operand(key, LSL, 1));
3552 __ vldr(s0, r2, 0);
3553 } else {
3554 __ ldr(value, MemOperand(r3, key, LSL, 1));
3555 }
3556 break;
3557 default:
3558 UNREACHABLE();
3559 break;
3560 }
3561
3562 // For integer array types:
3563 // r2: value
3564 // For floating-point array type
3565 // s0: value (if VFP3 is supported)
3566 // r2: value (if VFP3 is not supported)
3567
3568 if (array_type == kExternalIntArray) {
3569 // For the Int and UnsignedInt array types, we need to see whether
3570 // the value can be represented in a Smi. If not, we need to convert
3571 // it to a HeapNumber.
3572 Label box_int;
3573 __ cmp(value, Operand(0xC0000000));
3574 __ b(mi, &box_int);
3575 // Tag integer as smi and return it.
3576 __ mov(r0, Operand(value, LSL, kSmiTagSize));
3577 __ Ret();
3578
3579 __ bind(&box_int);
3580 // Allocate a HeapNumber for the result and perform int-to-double
3581 // conversion. Don't touch r0 or r1 as they are needed if allocation
3582 // fails.
3583 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3584 __ AllocateHeapNumber(r5, r3, r4, r6, &slow);
3585 // Now we can use r0 for the result as key is not needed any more.
3586 __ mov(r0, r5);
3587
3588 if (CpuFeatures::IsSupported(VFP3)) {
3589 CpuFeatures::Scope scope(VFP3);
3590 __ vmov(s0, value);
3591 __ vcvt_f64_s32(d0, s0);
3592 __ sub(r3, r0, Operand(kHeapObjectTag));
3593 __ vstr(d0, r3, HeapNumber::kValueOffset);
3594 __ Ret();
3595 } else {
3596 WriteInt32ToHeapNumberStub stub(value, r0, r3);
3597 __ TailCallStub(&stub);
3598 }
3599 } else if (array_type == kExternalUnsignedIntArray) {
3600 // The test is different for unsigned int values. Since we need
3601 // the value to be in the range of a positive smi, we can't
3602 // handle either of the top two bits being set in the value.
3603 if (CpuFeatures::IsSupported(VFP3)) {
3604 CpuFeatures::Scope scope(VFP3);
3605 Label box_int, done;
3606 __ tst(value, Operand(0xC0000000));
3607 __ b(ne, &box_int);
3608 // Tag integer as smi and return it.
3609 __ mov(r0, Operand(value, LSL, kSmiTagSize));
3610 __ Ret();
3611
3612 __ bind(&box_int);
3613 __ vmov(s0, value);
3614 // Allocate a HeapNumber for the result and perform int-to-double
3615 // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all
3616 // registers - also when jumping due to exhausted young space.
3617 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3618 __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
3619
3620 __ vcvt_f64_u32(d0, s0);
3621 __ sub(r1, r2, Operand(kHeapObjectTag));
3622 __ vstr(d0, r1, HeapNumber::kValueOffset);
3623
3624 __ mov(r0, r2);
3625 __ Ret();
3626 } else {
3627 // Check whether unsigned integer fits into smi.
3628 Label box_int_0, box_int_1, done;
3629 __ tst(value, Operand(0x80000000));
3630 __ b(ne, &box_int_0);
3631 __ tst(value, Operand(0x40000000));
3632 __ b(ne, &box_int_1);
3633 // Tag integer as smi and return it.
3634 __ mov(r0, Operand(value, LSL, kSmiTagSize));
3635 __ Ret();
3636
3637 Register hiword = value; // r2.
3638 Register loword = r3;
3639
3640 __ bind(&box_int_0);
3641 // Integer does not have leading zeros.
3642 GenerateUInt2Double(masm(), hiword, loword, r4, 0);
3643 __ b(&done);
3644
3645 __ bind(&box_int_1);
3646 // Integer has one leading zero.
3647 GenerateUInt2Double(masm(), hiword, loword, r4, 1);
3648
3649
3650 __ bind(&done);
3651 // Integer was converted to double in registers hiword:loword.
3652 // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber
3653 // clobbers all registers - also when jumping due to exhausted young
3654 // space.
3655 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3656 __ AllocateHeapNumber(r4, r5, r7, r6, &slow);
3657
3658 __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset));
3659 __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset));
3660
3661 __ mov(r0, r4);
3662 __ Ret();
3663 }
3664 } else if (array_type == kExternalFloatArray) {
3665 // For the floating-point array type, we need to always allocate a
3666 // HeapNumber.
3667 if (CpuFeatures::IsSupported(VFP3)) {
3668 CpuFeatures::Scope scope(VFP3);
3669 // Allocate a HeapNumber for the result. Don't use r0 and r1 as
3670 // AllocateHeapNumber clobbers all registers - also when jumping due to
3671 // exhausted young space.
3672 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3673 __ AllocateHeapNumber(r2, r3, r4, r6, &slow);
3674 __ vcvt_f64_f32(d0, s0);
3675 __ sub(r1, r2, Operand(kHeapObjectTag));
3676 __ vstr(d0, r1, HeapNumber::kValueOffset);
3677
3678 __ mov(r0, r2);
3679 __ Ret();
3680 } else {
3681 // Allocate a HeapNumber for the result. Don't use r0 and r1 as
3682 // AllocateHeapNumber clobbers all registers - also when jumping due to
3683 // exhausted young space.
3684 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex);
3685 __ AllocateHeapNumber(r3, r4, r5, r6, &slow);
3686 // VFP is not available, do manual single to double conversion.
3687
3688 // r2: floating point value (binary32)
3689 // r3: heap number for result
3690
3691 // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to
3692 // the slow case from here.
3693 __ and_(r0, value, Operand(kBinary32MantissaMask));
3694
3695 // Extract exponent to r1. OK to clobber r1 now as there are no jumps to
3696 // the slow case from here.
3697 __ mov(r1, Operand(value, LSR, kBinary32MantissaBits));
3698 __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits));
3699
3700 Label exponent_rebiased;
3701 __ teq(r1, Operand(0x00));
3702 __ b(eq, &exponent_rebiased);
3703
3704 __ teq(r1, Operand(0xff));
3705 __ mov(r1, Operand(0x7ff), LeaveCC, eq);
3706 __ b(eq, &exponent_rebiased);
3707
3708 // Rebias exponent.
3709 __ add(r1,
3710 r1,
3711 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias));
3712
3713 __ bind(&exponent_rebiased);
3714 __ and_(r2, value, Operand(kBinary32SignMask));
3715 value = no_reg;
3716 __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord));
3717
3718 // Shift mantissa.
3719 static const int kMantissaShiftForHiWord =
3720 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
3721
3722 static const int kMantissaShiftForLoWord =
3723 kBitsPerInt - kMantissaShiftForHiWord;
3724
3725 __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord));
3726 __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord));
3727
3728 __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset));
3729 __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset));
3730
3731 __ mov(r0, r3);
3732 __ Ret();
3733 }
3734
3735 } else {
3736 // Tag integer as smi and return it.
3737 __ mov(r0, Operand(value, LSL, kSmiTagSize));
3738 __ Ret();
3739 }
3740
3741 // Slow case, key and receiver still in r0 and r1.
3742 __ bind(&slow);
3743 __ IncrementCounter(&Counters::keyed_load_external_array_slow, 1, r2, r3);
3744
3745 // ---------- S t a t e --------------
3746 // -- lr : return address
3747 // -- r0 : key
3748 // -- r1 : receiver
3749 // -----------------------------------
3750
3751 __ Push(r1, r0);
3752
3753 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1);
3754
3755 return GetCode(flags);
3756 }
3757
3758
3759 MaybeObject* ExternalArrayStubCompiler::CompileKeyedStoreStub(
3760 ExternalArrayType array_type, Code::Flags flags) {
3761 // ---------- S t a t e --------------
3762 // -- r0 : value
3763 // -- r1 : key
3764 // -- r2 : receiver
3765 // -- lr : return address
3766 // -----------------------------------
3767 Label slow, check_heap_number;
3768
3769 // Register usage.
3770 Register value = r0;
3771 Register key = r1;
3772 Register receiver = r2;
3773 // r3 mostly holds the elements array or the destination external array.
3774
3775 // Check that the object isn't a smi.
3776 __ JumpIfSmi(receiver, &slow);
3777
3778 // Check that the object is a JS object. Load map into r3.
3779 __ CompareObjectType(receiver, r3, r4, FIRST_JS_OBJECT_TYPE);
3780 __ b(le, &slow);
3781
3782 // Check that the receiver does not require access checks. We need
3783 // to do this because this generic stub does not perform map checks.
3784 __ ldrb(ip, FieldMemOperand(r3, Map::kBitFieldOffset));
3785 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded));
3786 __ b(ne, &slow);
3787
3788 // Check that the key is a smi.
3789 __ JumpIfNotSmi(key, &slow);
3790
3791 // Check that the elements array is the appropriate type of ExternalArray.
3792 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset));
3793 __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset));
3794 __ LoadRoot(ip, Heap::RootIndexForExternalArrayType(array_type));
3795 __ cmp(r4, ip);
3796 __ b(ne, &slow);
3797
3798 // Check that the index is in range.
3799 __ mov(r4, Operand(key, ASR, kSmiTagSize)); // Untag the index.
3800 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset));
3801 __ cmp(r4, ip);
3802 // Unsigned comparison catches both negative and too-large values.
3803 __ b(hs, &slow);
3804
3805 // Handle both smis and HeapNumbers in the fast path. Go to the
3806 // runtime for all other kinds of values.
3807 // r3: external array.
3808 // r4: key (integer).
3809 __ JumpIfNotSmi(value, &check_heap_number);
3810 __ mov(r5, Operand(value, ASR, kSmiTagSize)); // Untag the value.
3811 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
3812
3813 // r3: base pointer of external storage.
3814 // r4: key (integer).
3815 // r5: value (integer).
3816 switch (array_type) {
3817 case kExternalByteArray:
3818 case kExternalUnsignedByteArray:
3819 __ strb(r5, MemOperand(r3, r4, LSL, 0));
3820 break;
3821 case kExternalShortArray:
3822 case kExternalUnsignedShortArray:
3823 __ strh(r5, MemOperand(r3, r4, LSL, 1));
3824 break;
3825 case kExternalIntArray:
3826 case kExternalUnsignedIntArray:
3827 __ str(r5, MemOperand(r3, r4, LSL, 2));
3828 break;
3829 case kExternalFloatArray:
3830 // Perform int-to-float conversion and store to memory.
3831 StoreIntAsFloat(masm(), r3, r4, r5, r6, r7, r9);
3832 break;
3833 default:
3834 UNREACHABLE();
3835 break;
3836 }
3837
3838 // Entry registers are intact, r0 holds the value which is the return value.
3839 __ Ret();
3840
3841
3842 // r3: external array.
3843 // r4: index (integer).
3844 __ bind(&check_heap_number);
3845 __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE);
3846 __ b(ne, &slow);
3847
3848 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset));
3849
3850 // r3: base pointer of external storage.
3851 // r4: key (integer).
3852
3853 // The WebGL specification leaves the behavior of storing NaN and
3854 // +/-Infinity into integer arrays basically undefined. For more
3855 // reproducible behavior, convert these to zero.
3856 if (CpuFeatures::IsSupported(VFP3)) {
3857 CpuFeatures::Scope scope(VFP3);
3858
3859
3860 if (array_type == kExternalFloatArray) {
3861 // vldr requires offset to be a multiple of 4 so we can not
3862 // include -kHeapObjectTag into it.
3863 __ sub(r5, r0, Operand(kHeapObjectTag));
3864 __ vldr(d0, r5, HeapNumber::kValueOffset);
3865 __ add(r5, r3, Operand(r4, LSL, 2));
3866 __ vcvt_f32_f64(s0, d0);
3867 __ vstr(s0, r5, 0);
3868 } else {
3869 // Need to perform float-to-int conversion.
3870 // Test for NaN or infinity (both give zero).
3871 __ ldr(r6, FieldMemOperand(value, HeapNumber::kExponentOffset));
3872
3873 // Hoisted load. vldr requires offset to be a multiple of 4 so we can not
3874 // include -kHeapObjectTag into it.
3875 __ sub(r5, value, Operand(kHeapObjectTag));
3876 __ vldr(d0, r5, HeapNumber::kValueOffset);
3877
3878 __ Sbfx(r6, r6, HeapNumber::kExponentShift, HeapNumber::kExponentBits);
3879 // NaNs and Infinities have all-one exponents so they sign extend to -1.
3880 __ cmp(r6, Operand(-1));
3881 __ mov(r5, Operand(0), LeaveCC, eq);
3882
3883 // Not infinity or NaN simply convert to int.
3884 if (IsElementTypeSigned(array_type)) {
3885 __ vcvt_s32_f64(s0, d0, kDefaultRoundToZero, ne);
3886 } else {
3887 __ vcvt_u32_f64(s0, d0, kDefaultRoundToZero, ne);
3888 }
3889 __ vmov(r5, s0, ne);
3890
3891 switch (array_type) {
3892 case kExternalByteArray:
3893 case kExternalUnsignedByteArray:
3894 __ strb(r5, MemOperand(r3, r4, LSL, 0));
3895 break;
3896 case kExternalShortArray:
3897 case kExternalUnsignedShortArray:
3898 __ strh(r5, MemOperand(r3, r4, LSL, 1));
3899 break;
3900 case kExternalIntArray:
3901 case kExternalUnsignedIntArray:
3902 __ str(r5, MemOperand(r3, r4, LSL, 2));
3903 break;
3904 default:
3905 UNREACHABLE();
3906 break;
3907 }
3908 }
3909
3910 // Entry registers are intact, r0 holds the value which is the return value.
3911 __ Ret();
3912 } else {
3913 // VFP3 is not available do manual conversions.
3914 __ ldr(r5, FieldMemOperand(value, HeapNumber::kExponentOffset));
3915 __ ldr(r6, FieldMemOperand(value, HeapNumber::kMantissaOffset));
3916
3917 if (array_type == kExternalFloatArray) {
3918 Label done, nan_or_infinity_or_zero;
3919 static const int kMantissaInHiWordShift =
3920 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord;
3921
3922 static const int kMantissaInLoWordShift =
3923 kBitsPerInt - kMantissaInHiWordShift;
3924
3925 // Test for all special exponent values: zeros, subnormal numbers, NaNs
3926 // and infinities. All these should be converted to 0.
3927 __ mov(r7, Operand(HeapNumber::kExponentMask));
3928 __ and_(r9, r5, Operand(r7), SetCC);
3929 __ b(eq, &nan_or_infinity_or_zero);
3930
3931 __ teq(r9, Operand(r7));
3932 __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq);
3933 __ b(eq, &nan_or_infinity_or_zero);
3934
3935 // Rebias exponent.
3936 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
3937 __ add(r9,
3938 r9,
3939 Operand(kBinary32ExponentBias - HeapNumber::kExponentBias));
3940
3941 __ cmp(r9, Operand(kBinary32MaxExponent));
3942 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt);
3943 __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt);
3944 __ b(gt, &done);
3945
3946 __ cmp(r9, Operand(kBinary32MinExponent));
3947 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt);
3948 __ b(lt, &done);
3949
3950 __ and_(r7, r5, Operand(HeapNumber::kSignMask));
3951 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
3952 __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift));
3953 __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift));
3954 __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift));
3955
3956 __ bind(&done);
3957 __ str(r5, MemOperand(r3, r4, LSL, 2));
3958 // Entry registers are intact, r0 holds the value which is the return
3959 // value.
3960 __ Ret();
3961
3962 __ bind(&nan_or_infinity_or_zero);
3963 __ and_(r7, r5, Operand(HeapNumber::kSignMask));
3964 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
3965 __ orr(r9, r9, r7);
3966 __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift));
3967 __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift));
3968 __ b(&done);
3969 } else {
3970 bool is_signed_type = IsElementTypeSigned(array_type);
3971 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt;
3972 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000;
3973
3974 Label done, sign;
3975
3976 // Test for all special exponent values: zeros, subnormal numbers, NaNs
3977 // and infinities. All these should be converted to 0.
3978 __ mov(r7, Operand(HeapNumber::kExponentMask));
3979 __ and_(r9, r5, Operand(r7), SetCC);
3980 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
3981 __ b(eq, &done);
3982
3983 __ teq(r9, Operand(r7));
3984 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq);
3985 __ b(eq, &done);
3986
3987 // Unbias exponent.
3988 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift));
3989 __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC);
3990 // If exponent is negative then result is 0.
3991 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi);
3992 __ b(mi, &done);
3993
3994 // If exponent is too big then result is minimal value.
3995 __ cmp(r9, Operand(meaningfull_bits - 1));
3996 __ mov(r5, Operand(min_value), LeaveCC, ge);
3997 __ b(ge, &done);
3998
3999 __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC);
4000 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask));
4001 __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord));
4002
4003 __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC);
4004 __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl);
4005 __ b(pl, &sign);
4006
4007 __ rsb(r9, r9, Operand(0, RelocInfo::NONE));
4008 __ mov(r5, Operand(r5, LSL, r9));
4009 __ rsb(r9, r9, Operand(meaningfull_bits));
4010 __ orr(r5, r5, Operand(r6, LSR, r9));
4011
4012 __ bind(&sign);
4013 __ teq(r7, Operand(0, RelocInfo::NONE));
4014 __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne);
4015
4016 __ bind(&done);
4017 switch (array_type) {
4018 case kExternalByteArray:
4019 case kExternalUnsignedByteArray:
4020 __ strb(r5, MemOperand(r3, r4, LSL, 0));
4021 break;
4022 case kExternalShortArray:
4023 case kExternalUnsignedShortArray:
4024 __ strh(r5, MemOperand(r3, r4, LSL, 1));
4025 break;
4026 case kExternalIntArray:
4027 case kExternalUnsignedIntArray:
4028 __ str(r5, MemOperand(r3, r4, LSL, 2));
4029 break;
4030 default:
4031 UNREACHABLE();
4032 break;
4033 }
4034 }
4035 }
4036
4037 // Slow case: call runtime.
4038 __ bind(&slow);
4039
4040 // Entry registers are intact.
4041 // ---------- S t a t e --------------
4042 // -- r0 : value
4043 // -- r1 : key
4044 // -- r2 : receiver
4045 // -- lr : return address
4046 // -----------------------------------
4047
4048 // Push receiver, key and value for runtime call.
4049 __ Push(r2, r1, r0);
4050
4051 __ TailCallRuntime(Runtime::kSetProperty, 3, 1);
4052
4053 return GetCode(flags);
4054 }
4055
4056
3218 #undef __ 4057 #undef __
3219 4058
3220 } } // namespace v8::internal 4059 } } // namespace v8::internal
3221 4060
3222 #endif // V8_TARGET_ARCH_ARM 4061 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/simulator-arm.cc ('k') | src/arm/virtual-frame-arm.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698