Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(823)

Side by Side Diff: src/ia32/code-stubs-ia32.cc

Issue 6826032: Remove code from the deprecated GenericBinaryOpStub. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge/
Patch Set: '' Created 9 years, 8 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ia32/code-stubs-ia32.h ('k') | src/ic.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2011 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 273 matching lines...) Expand 10 before | Expand all | Expand 10 after
284 // Return 1/0 for true/false in eax. 284 // Return 1/0 for true/false in eax.
285 __ bind(&true_result); 285 __ bind(&true_result);
286 __ mov(eax, 1); 286 __ mov(eax, 1);
287 __ ret(1 * kPointerSize); 287 __ ret(1 * kPointerSize);
288 __ bind(&false_result); 288 __ bind(&false_result);
289 __ mov(eax, 0); 289 __ mov(eax, 0);
290 __ ret(1 * kPointerSize); 290 __ ret(1 * kPointerSize);
291 } 291 }
292 292
293 293
294 const char* GenericBinaryOpStub::GetName() {
295 if (name_ != NULL) return name_;
296 const int kMaxNameLength = 100;
297 name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray(
298 kMaxNameLength);
299 if (name_ == NULL) return "OOM";
300 const char* op_name = Token::Name(op_);
301 const char* overwrite_name;
302 switch (mode_) {
303 case NO_OVERWRITE: overwrite_name = "Alloc"; break;
304 case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break;
305 case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break;
306 default: overwrite_name = "UnknownOverwrite"; break;
307 }
308
309 OS::SNPrintF(Vector<char>(name_, kMaxNameLength),
310 "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s",
311 op_name,
312 overwrite_name,
313 (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "",
314 args_in_registers_ ? "RegArgs" : "StackArgs",
315 args_reversed_ ? "_R" : "",
316 static_operands_type_.ToString(),
317 BinaryOpIC::GetName(runtime_operands_type_));
318 return name_;
319 }
320
321
322 void GenericBinaryOpStub::GenerateCall(
323 MacroAssembler* masm,
324 Register left,
325 Register right) {
326 if (!ArgsInRegistersSupported()) {
327 // Pass arguments on the stack.
328 __ push(left);
329 __ push(right);
330 } else {
331 // The calling convention with registers is left in edx and right in eax.
332 Register left_arg = edx;
333 Register right_arg = eax;
334 if (!(left.is(left_arg) && right.is(right_arg))) {
335 if (left.is(right_arg) && right.is(left_arg)) {
336 if (IsOperationCommutative()) {
337 SetArgsReversed();
338 } else {
339 __ xchg(left, right);
340 }
341 } else if (left.is(left_arg)) {
342 __ mov(right_arg, right);
343 } else if (right.is(right_arg)) {
344 __ mov(left_arg, left);
345 } else if (left.is(right_arg)) {
346 if (IsOperationCommutative()) {
347 __ mov(left_arg, right);
348 SetArgsReversed();
349 } else {
350 // Order of moves important to avoid destroying left argument.
351 __ mov(left_arg, left);
352 __ mov(right_arg, right);
353 }
354 } else if (right.is(left_arg)) {
355 if (IsOperationCommutative()) {
356 __ mov(right_arg, left);
357 SetArgsReversed();
358 } else {
359 // Order of moves important to avoid destroying right argument.
360 __ mov(right_arg, right);
361 __ mov(left_arg, left);
362 }
363 } else {
364 // Order of moves is not important.
365 __ mov(left_arg, left);
366 __ mov(right_arg, right);
367 }
368 }
369
370 // Update flags to indicate that arguments are in registers.
371 SetArgsInRegisters();
372 __ IncrementCounter(
373 masm->isolate()->counters()->generic_binary_stub_calls_regs(), 1);
374 }
375
376 // Call the stub.
377 __ CallStub(this);
378 }
379
380
381 void GenericBinaryOpStub::GenerateCall(
382 MacroAssembler* masm,
383 Register left,
384 Smi* right) {
385 if (!ArgsInRegistersSupported()) {
386 // Pass arguments on the stack.
387 __ push(left);
388 __ push(Immediate(right));
389 } else {
390 // The calling convention with registers is left in edx and right in eax.
391 Register left_arg = edx;
392 Register right_arg = eax;
393 if (left.is(left_arg)) {
394 __ mov(right_arg, Immediate(right));
395 } else if (left.is(right_arg) && IsOperationCommutative()) {
396 __ mov(left_arg, Immediate(right));
397 SetArgsReversed();
398 } else {
399 // For non-commutative operations, left and right_arg might be
400 // the same register. Therefore, the order of the moves is
401 // important here in order to not overwrite left before moving
402 // it to left_arg.
403 __ mov(left_arg, left);
404 __ mov(right_arg, Immediate(right));
405 }
406
407 // Update flags to indicate that arguments are in registers.
408 SetArgsInRegisters();
409 __ IncrementCounter(
410 masm->isolate()->counters()->generic_binary_stub_calls_regs(), 1);
411 }
412
413 // Call the stub.
414 __ CallStub(this);
415 }
416
417
418 void GenericBinaryOpStub::GenerateCall(
419 MacroAssembler* masm,
420 Smi* left,
421 Register right) {
422 if (!ArgsInRegistersSupported()) {
423 // Pass arguments on the stack.
424 __ push(Immediate(left));
425 __ push(right);
426 } else {
427 // The calling convention with registers is left in edx and right in eax.
428 Register left_arg = edx;
429 Register right_arg = eax;
430 if (right.is(right_arg)) {
431 __ mov(left_arg, Immediate(left));
432 } else if (right.is(left_arg) && IsOperationCommutative()) {
433 __ mov(right_arg, Immediate(left));
434 SetArgsReversed();
435 } else {
436 // For non-commutative operations, right and left_arg might be
437 // the same register. Therefore, the order of the moves is
438 // important here in order to not overwrite right before moving
439 // it to right_arg.
440 __ mov(right_arg, right);
441 __ mov(left_arg, Immediate(left));
442 }
443 // Update flags to indicate that arguments are in registers.
444 SetArgsInRegisters();
445 Counters* counters = masm->isolate()->counters();
446 __ IncrementCounter(counters->generic_binary_stub_calls_regs(), 1);
447 }
448
449 // Call the stub.
450 __ CallStub(this);
451 }
452
453
454 class FloatingPointHelper : public AllStatic { 294 class FloatingPointHelper : public AllStatic {
455 public: 295 public:
456 296
457 enum ArgLocation { 297 enum ArgLocation {
458 ARGS_ON_STACK, 298 ARGS_ON_STACK,
459 ARGS_IN_REGISTERS 299 ARGS_IN_REGISTERS
460 }; 300 };
461 301
462 // Code pattern for loading a floating point value. Input value must 302 // Code pattern for loading a floating point value. Input value must
463 // be either a smi or a heap number object (fp value). Requirements: 303 // be either a smi or a heap number object (fp value). Requirements:
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after
527 static void LoadSSE2Smis(MacroAssembler* masm, Register scratch); 367 static void LoadSSE2Smis(MacroAssembler* masm, Register scratch);
528 368
529 // Checks that the two floating point numbers loaded into xmm0 and xmm1 369 // Checks that the two floating point numbers loaded into xmm0 and xmm1
530 // have int32 values. 370 // have int32 values.
531 static void CheckSSE2OperandsAreInt32(MacroAssembler* masm, 371 static void CheckSSE2OperandsAreInt32(MacroAssembler* masm,
532 Label* non_int32, 372 Label* non_int32,
533 Register scratch); 373 Register scratch);
534 }; 374 };
535 375
536 376
537 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) {
538 // 1. Move arguments into edx, eax except for DIV and MOD, which need the
539 // dividend in eax and edx free for the division. Use eax, ebx for those.
540 Comment load_comment(masm, "-- Load arguments");
541 Register left = edx;
542 Register right = eax;
543 if (op_ == Token::DIV || op_ == Token::MOD) {
544 left = eax;
545 right = ebx;
546 if (HasArgsInRegisters()) {
547 __ mov(ebx, eax);
548 __ mov(eax, edx);
549 }
550 }
551 if (!HasArgsInRegisters()) {
552 __ mov(right, Operand(esp, 1 * kPointerSize));
553 __ mov(left, Operand(esp, 2 * kPointerSize));
554 }
555
556 if (static_operands_type_.IsSmi()) {
557 if (FLAG_debug_code) {
558 __ AbortIfNotSmi(left);
559 __ AbortIfNotSmi(right);
560 }
561 if (op_ == Token::BIT_OR) {
562 __ or_(right, Operand(left));
563 GenerateReturn(masm);
564 return;
565 } else if (op_ == Token::BIT_AND) {
566 __ and_(right, Operand(left));
567 GenerateReturn(masm);
568 return;
569 } else if (op_ == Token::BIT_XOR) {
570 __ xor_(right, Operand(left));
571 GenerateReturn(masm);
572 return;
573 }
574 }
575
576 // 2. Prepare the smi check of both operands by oring them together.
577 Comment smi_check_comment(masm, "-- Smi check arguments");
578 Label not_smis;
579 Register combined = ecx;
580 ASSERT(!left.is(combined) && !right.is(combined));
581 switch (op_) {
582 case Token::BIT_OR:
583 // Perform the operation into eax and smi check the result. Preserve
584 // eax in case the result is not a smi.
585 ASSERT(!left.is(ecx) && !right.is(ecx));
586 __ mov(ecx, right);
587 __ or_(right, Operand(left)); // Bitwise or is commutative.
588 combined = right;
589 break;
590
591 case Token::BIT_XOR:
592 case Token::BIT_AND:
593 case Token::ADD:
594 case Token::SUB:
595 case Token::MUL:
596 case Token::DIV:
597 case Token::MOD:
598 __ mov(combined, right);
599 __ or_(combined, Operand(left));
600 break;
601
602 case Token::SHL:
603 case Token::SAR:
604 case Token::SHR:
605 // Move the right operand into ecx for the shift operation, use eax
606 // for the smi check register.
607 ASSERT(!left.is(ecx) && !right.is(ecx));
608 __ mov(ecx, right);
609 __ or_(right, Operand(left));
610 combined = right;
611 break;
612
613 default:
614 break;
615 }
616
617 // 3. Perform the smi check of the operands.
618 STATIC_ASSERT(kSmiTag == 0); // Adjust zero check if not the case.
619 __ test(combined, Immediate(kSmiTagMask));
620 __ j(not_zero, &not_smis, not_taken);
621
622 // 4. Operands are both smis, perform the operation leaving the result in
623 // eax and check the result if necessary.
624 Comment perform_smi(masm, "-- Perform smi operation");
625 Label use_fp_on_smis;
626 switch (op_) {
627 case Token::BIT_OR:
628 // Nothing to do.
629 break;
630
631 case Token::BIT_XOR:
632 ASSERT(right.is(eax));
633 __ xor_(right, Operand(left)); // Bitwise xor is commutative.
634 break;
635
636 case Token::BIT_AND:
637 ASSERT(right.is(eax));
638 __ and_(right, Operand(left)); // Bitwise and is commutative.
639 break;
640
641 case Token::SHL:
642 // Remove tags from operands (but keep sign).
643 __ SmiUntag(left);
644 __ SmiUntag(ecx);
645 // Perform the operation.
646 __ shl_cl(left);
647 // Check that the *signed* result fits in a smi.
648 __ cmp(left, 0xc0000000);
649 __ j(sign, &use_fp_on_smis, not_taken);
650 // Tag the result and store it in register eax.
651 __ SmiTag(left);
652 __ mov(eax, left);
653 break;
654
655 case Token::SAR:
656 // Remove tags from operands (but keep sign).
657 __ SmiUntag(left);
658 __ SmiUntag(ecx);
659 // Perform the operation.
660 __ sar_cl(left);
661 // Tag the result and store it in register eax.
662 __ SmiTag(left);
663 __ mov(eax, left);
664 break;
665
666 case Token::SHR:
667 // Remove tags from operands (but keep sign).
668 __ SmiUntag(left);
669 __ SmiUntag(ecx);
670 // Perform the operation.
671 __ shr_cl(left);
672 // Check that the *unsigned* result fits in a smi.
673 // Neither of the two high-order bits can be set:
674 // - 0x80000000: high bit would be lost when smi tagging.
675 // - 0x40000000: this number would convert to negative when
676 // Smi tagging these two cases can only happen with shifts
677 // by 0 or 1 when handed a valid smi.
678 __ test(left, Immediate(0xc0000000));
679 __ j(not_zero, slow, not_taken);
680 // Tag the result and store it in register eax.
681 __ SmiTag(left);
682 __ mov(eax, left);
683 break;
684
685 case Token::ADD:
686 ASSERT(right.is(eax));
687 __ add(right, Operand(left)); // Addition is commutative.
688 __ j(overflow, &use_fp_on_smis, not_taken);
689 break;
690
691 case Token::SUB:
692 __ sub(left, Operand(right));
693 __ j(overflow, &use_fp_on_smis, not_taken);
694 __ mov(eax, left);
695 break;
696
697 case Token::MUL:
698 // If the smi tag is 0 we can just leave the tag on one operand.
699 STATIC_ASSERT(kSmiTag == 0); // Adjust code below if not the case.
700 // We can't revert the multiplication if the result is not a smi
701 // so save the right operand.
702 __ mov(ebx, right);
703 // Remove tag from one of the operands (but keep sign).
704 __ SmiUntag(right);
705 // Do multiplication.
706 __ imul(right, Operand(left)); // Multiplication is commutative.
707 __ j(overflow, &use_fp_on_smis, not_taken);
708 // Check for negative zero result. Use combined = left | right.
709 __ NegativeZeroTest(right, combined, &use_fp_on_smis);
710 break;
711
712 case Token::DIV:
713 // We can't revert the division if the result is not a smi so
714 // save the left operand.
715 __ mov(edi, left);
716 // Check for 0 divisor.
717 __ test(right, Operand(right));
718 __ j(zero, &use_fp_on_smis, not_taken);
719 // Sign extend left into edx:eax.
720 ASSERT(left.is(eax));
721 __ cdq();
722 // Divide edx:eax by right.
723 __ idiv(right);
724 // Check for the corner case of dividing the most negative smi by
725 // -1. We cannot use the overflow flag, since it is not set by idiv
726 // instruction.
727 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
728 __ cmp(eax, 0x40000000);
729 __ j(equal, &use_fp_on_smis);
730 // Check for negative zero result. Use combined = left | right.
731 __ NegativeZeroTest(eax, combined, &use_fp_on_smis);
732 // Check that the remainder is zero.
733 __ test(edx, Operand(edx));
734 __ j(not_zero, &use_fp_on_smis);
735 // Tag the result and store it in register eax.
736 __ SmiTag(eax);
737 break;
738
739 case Token::MOD:
740 // Check for 0 divisor.
741 __ test(right, Operand(right));
742 __ j(zero, &not_smis, not_taken);
743
744 // Sign extend left into edx:eax.
745 ASSERT(left.is(eax));
746 __ cdq();
747 // Divide edx:eax by right.
748 __ idiv(right);
749 // Check for negative zero result. Use combined = left | right.
750 __ NegativeZeroTest(edx, combined, slow);
751 // Move remainder to register eax.
752 __ mov(eax, edx);
753 break;
754
755 default:
756 UNREACHABLE();
757 }
758
759 // 5. Emit return of result in eax.
760 GenerateReturn(masm);
761
762 // 6. For some operations emit inline code to perform floating point
763 // operations on known smis (e.g., if the result of the operation
764 // overflowed the smi range).
765 switch (op_) {
766 case Token::SHL: {
767 Comment perform_float(masm, "-- Perform float operation on smis");
768 __ bind(&use_fp_on_smis);
769 if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
770 // Result we want is in left == edx, so we can put the allocated heap
771 // number in eax.
772 __ AllocateHeapNumber(eax, ecx, ebx, slow);
773 // Store the result in the HeapNumber and return.
774 if (CpuFeatures::IsSupported(SSE2)) {
775 CpuFeatures::Scope use_sse2(SSE2);
776 __ cvtsi2sd(xmm0, Operand(left));
777 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
778 } else {
779 // It's OK to overwrite the right argument on the stack because we
780 // are about to return.
781 __ mov(Operand(esp, 1 * kPointerSize), left);
782 __ fild_s(Operand(esp, 1 * kPointerSize));
783 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
784 }
785 GenerateReturn(masm);
786 } else {
787 ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
788 __ jmp(slow);
789 }
790 break;
791 }
792
793 case Token::ADD:
794 case Token::SUB:
795 case Token::MUL:
796 case Token::DIV: {
797 Comment perform_float(masm, "-- Perform float operation on smis");
798 __ bind(&use_fp_on_smis);
799 // Restore arguments to edx, eax.
800 switch (op_) {
801 case Token::ADD:
802 // Revert right = right + left.
803 __ sub(right, Operand(left));
804 break;
805 case Token::SUB:
806 // Revert left = left - right.
807 __ add(left, Operand(right));
808 break;
809 case Token::MUL:
810 // Right was clobbered but a copy is in ebx.
811 __ mov(right, ebx);
812 break;
813 case Token::DIV:
814 // Left was clobbered but a copy is in edi. Right is in ebx for
815 // division.
816 __ mov(edx, edi);
817 __ mov(eax, right);
818 break;
819 default: UNREACHABLE();
820 break;
821 }
822 if (runtime_operands_type_ != BinaryOpIC::UNINIT_OR_SMI) {
823 __ AllocateHeapNumber(ecx, ebx, no_reg, slow);
824 if (CpuFeatures::IsSupported(SSE2)) {
825 CpuFeatures::Scope use_sse2(SSE2);
826 FloatingPointHelper::LoadSSE2Smis(masm, ebx);
827 switch (op_) {
828 case Token::ADD: __ addsd(xmm0, xmm1); break;
829 case Token::SUB: __ subsd(xmm0, xmm1); break;
830 case Token::MUL: __ mulsd(xmm0, xmm1); break;
831 case Token::DIV: __ divsd(xmm0, xmm1); break;
832 default: UNREACHABLE();
833 }
834 __ movdbl(FieldOperand(ecx, HeapNumber::kValueOffset), xmm0);
835 } else { // SSE2 not available, use FPU.
836 FloatingPointHelper::LoadFloatSmis(masm, ebx);
837 switch (op_) {
838 case Token::ADD: __ faddp(1); break;
839 case Token::SUB: __ fsubp(1); break;
840 case Token::MUL: __ fmulp(1); break;
841 case Token::DIV: __ fdivp(1); break;
842 default: UNREACHABLE();
843 }
844 __ fstp_d(FieldOperand(ecx, HeapNumber::kValueOffset));
845 }
846 __ mov(eax, ecx);
847 GenerateReturn(masm);
848 } else {
849 ASSERT(runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI);
850 __ jmp(slow);
851 }
852 break;
853 }
854
855 default:
856 break;
857 }
858
859 // 7. Non-smi operands, fall out to the non-smi code with the operands in
860 // edx and eax.
861 Comment done_comment(masm, "-- Enter non-smi code");
862 __ bind(&not_smis);
863 switch (op_) {
864 case Token::BIT_OR:
865 case Token::SHL:
866 case Token::SAR:
867 case Token::SHR:
868 // Right operand is saved in ecx and eax was destroyed by the smi
869 // check.
870 __ mov(eax, ecx);
871 break;
872
873 case Token::DIV:
874 case Token::MOD:
875 // Operands are in eax, ebx at this point.
876 __ mov(edx, eax);
877 __ mov(eax, ebx);
878 break;
879
880 default:
881 break;
882 }
883 }
884
885
886 void GenericBinaryOpStub::Generate(MacroAssembler* masm) {
887 Label call_runtime;
888
889 Counters* counters = masm->isolate()->counters();
890 __ IncrementCounter(counters->generic_binary_stub_calls(), 1);
891
892 if (runtime_operands_type_ == BinaryOpIC::UNINIT_OR_SMI) {
893 Label slow;
894 if (ShouldGenerateSmiCode()) GenerateSmiCode(masm, &slow);
895 __ bind(&slow);
896 GenerateTypeTransition(masm);
897 }
898
899 // Generate fast case smi code if requested. This flag is set when the fast
900 // case smi code is not generated by the caller. Generating it here will speed
901 // up common operations.
902 if (ShouldGenerateSmiCode()) {
903 GenerateSmiCode(masm, &call_runtime);
904 } else if (op_ != Token::MOD) { // MOD goes straight to runtime.
905 if (!HasArgsInRegisters()) {
906 GenerateLoadArguments(masm);
907 }
908 }
909
910 // Floating point case.
911 if (ShouldGenerateFPCode()) {
912 switch (op_) {
913 case Token::ADD:
914 case Token::SUB:
915 case Token::MUL:
916 case Token::DIV: {
917 if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
918 HasSmiCodeInStub()) {
919 // Execution reaches this point when the first non-smi argument occurs
920 // (and only if smi code is generated). This is the right moment to
921 // patch to HEAP_NUMBERS state. The transition is attempted only for
922 // the four basic operations. The stub stays in the DEFAULT state
923 // forever for all other operations (also if smi code is skipped).
924 GenerateTypeTransition(masm);
925 break;
926 }
927
928 Label not_floats;
929 if (CpuFeatures::IsSupported(SSE2)) {
930 CpuFeatures::Scope use_sse2(SSE2);
931 if (static_operands_type_.IsNumber()) {
932 if (FLAG_debug_code) {
933 // Assert at runtime that inputs are only numbers.
934 __ AbortIfNotNumber(edx);
935 __ AbortIfNotNumber(eax);
936 }
937 if (static_operands_type_.IsSmi()) {
938 if (FLAG_debug_code) {
939 __ AbortIfNotSmi(edx);
940 __ AbortIfNotSmi(eax);
941 }
942 FloatingPointHelper::LoadSSE2Smis(masm, ecx);
943 } else {
944 FloatingPointHelper::LoadSSE2Operands(masm);
945 }
946 } else {
947 FloatingPointHelper::LoadSSE2Operands(masm, &not_floats);
948 }
949
950 switch (op_) {
951 case Token::ADD: __ addsd(xmm0, xmm1); break;
952 case Token::SUB: __ subsd(xmm0, xmm1); break;
953 case Token::MUL: __ mulsd(xmm0, xmm1); break;
954 case Token::DIV: __ divsd(xmm0, xmm1); break;
955 default: UNREACHABLE();
956 }
957 GenerateHeapResultAllocation(masm, &call_runtime);
958 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
959 GenerateReturn(masm);
960 } else { // SSE2 not available, use FPU.
961 if (static_operands_type_.IsNumber()) {
962 if (FLAG_debug_code) {
963 // Assert at runtime that inputs are only numbers.
964 __ AbortIfNotNumber(edx);
965 __ AbortIfNotNumber(eax);
966 }
967 } else {
968 FloatingPointHelper::CheckFloatOperands(masm, &not_floats, ebx);
969 }
970 FloatingPointHelper::LoadFloatOperands(
971 masm,
972 ecx,
973 FloatingPointHelper::ARGS_IN_REGISTERS);
974 switch (op_) {
975 case Token::ADD: __ faddp(1); break;
976 case Token::SUB: __ fsubp(1); break;
977 case Token::MUL: __ fmulp(1); break;
978 case Token::DIV: __ fdivp(1); break;
979 default: UNREACHABLE();
980 }
981 Label after_alloc_failure;
982 GenerateHeapResultAllocation(masm, &after_alloc_failure);
983 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
984 GenerateReturn(masm);
985 __ bind(&after_alloc_failure);
986 __ ffree();
987 __ jmp(&call_runtime);
988 }
989 __ bind(&not_floats);
990 if (runtime_operands_type_ == BinaryOpIC::DEFAULT &&
991 !HasSmiCodeInStub()) {
992 // Execution reaches this point when the first non-number argument
993 // occurs (and only if smi code is skipped from the stub, otherwise
994 // the patching has already been done earlier in this case branch).
995 // Try patching to STRINGS for ADD operation.
996 if (op_ == Token::ADD) {
997 GenerateTypeTransition(masm);
998 }
999 }
1000 break;
1001 }
1002 case Token::MOD: {
1003 // For MOD we go directly to runtime in the non-smi case.
1004 break;
1005 }
1006 case Token::BIT_OR:
1007 case Token::BIT_AND:
1008 case Token::BIT_XOR:
1009 case Token::SAR:
1010 case Token::SHL:
1011 case Token::SHR: {
1012 Label non_smi_result;
1013 FloatingPointHelper::LoadAsIntegers(masm,
1014 static_operands_type_,
1015 use_sse3_,
1016 &call_runtime);
1017 switch (op_) {
1018 case Token::BIT_OR: __ or_(eax, Operand(ecx)); break;
1019 case Token::BIT_AND: __ and_(eax, Operand(ecx)); break;
1020 case Token::BIT_XOR: __ xor_(eax, Operand(ecx)); break;
1021 case Token::SAR: __ sar_cl(eax); break;
1022 case Token::SHL: __ shl_cl(eax); break;
1023 case Token::SHR: __ shr_cl(eax); break;
1024 default: UNREACHABLE();
1025 }
1026 if (op_ == Token::SHR) {
1027 // Check if result is non-negative and fits in a smi.
1028 __ test(eax, Immediate(0xc0000000));
1029 __ j(not_zero, &call_runtime);
1030 } else {
1031 // Check if result fits in a smi.
1032 __ cmp(eax, 0xc0000000);
1033 __ j(negative, &non_smi_result);
1034 }
1035 // Tag smi result and return.
1036 __ SmiTag(eax);
1037 GenerateReturn(masm);
1038
1039 // All ops except SHR return a signed int32 that we load in
1040 // a HeapNumber.
1041 if (op_ != Token::SHR) {
1042 __ bind(&non_smi_result);
1043 // Allocate a heap number if needed.
1044 __ mov(ebx, Operand(eax)); // ebx: result
1045 NearLabel skip_allocation;
1046 switch (mode_) {
1047 case OVERWRITE_LEFT:
1048 case OVERWRITE_RIGHT:
1049 // If the operand was an object, we skip the
1050 // allocation of a heap number.
1051 __ mov(eax, Operand(esp, mode_ == OVERWRITE_RIGHT ?
1052 1 * kPointerSize : 2 * kPointerSize));
1053 __ test(eax, Immediate(kSmiTagMask));
1054 __ j(not_zero, &skip_allocation, not_taken);
1055 // Fall through!
1056 case NO_OVERWRITE:
1057 __ AllocateHeapNumber(eax, ecx, edx, &call_runtime);
1058 __ bind(&skip_allocation);
1059 break;
1060 default: UNREACHABLE();
1061 }
1062 // Store the result in the HeapNumber and return.
1063 if (CpuFeatures::IsSupported(SSE2)) {
1064 CpuFeatures::Scope use_sse2(SSE2);
1065 __ cvtsi2sd(xmm0, Operand(ebx));
1066 __ movdbl(FieldOperand(eax, HeapNumber::kValueOffset), xmm0);
1067 } else {
1068 __ mov(Operand(esp, 1 * kPointerSize), ebx);
1069 __ fild_s(Operand(esp, 1 * kPointerSize));
1070 __ fstp_d(FieldOperand(eax, HeapNumber::kValueOffset));
1071 }
1072 GenerateReturn(masm);
1073 }
1074 break;
1075 }
1076 default: UNREACHABLE(); break;
1077 }
1078 }
1079
1080 // If all else fails, use the runtime system to get the correct
1081 // result. If arguments was passed in registers now place them on the
1082 // stack in the correct order below the return address.
1083
1084 // Avoid hitting the string ADD code below when allocation fails in
1085 // the floating point code above.
1086 if (op_ != Token::ADD) {
1087 __ bind(&call_runtime);
1088 }
1089
1090 if (HasArgsInRegisters()) {
1091 GenerateRegisterArgsPush(masm);
1092 }
1093
1094 switch (op_) {
1095 case Token::ADD: {
1096 // Test for string arguments before calling runtime.
1097
1098 // If this stub has already generated FP-specific code then the arguments
1099 // are already in edx, eax
1100 if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) {
1101 GenerateLoadArguments(masm);
1102 }
1103
1104 // Registers containing left and right operands respectively.
1105 Register lhs, rhs;
1106 if (HasArgsReversed()) {
1107 lhs = eax;
1108 rhs = edx;
1109 } else {
1110 lhs = edx;
1111 rhs = eax;
1112 }
1113
1114 // Test if left operand is a string.
1115 NearLabel lhs_not_string;
1116 __ test(lhs, Immediate(kSmiTagMask));
1117 __ j(zero, &lhs_not_string);
1118 __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, ecx);
1119 __ j(above_equal, &lhs_not_string);
1120
1121 StringAddStub string_add_left_stub(NO_STRING_CHECK_LEFT_IN_STUB);
1122 __ TailCallStub(&string_add_left_stub);
1123
1124 NearLabel call_runtime_with_args;
1125 // Left operand is not a string, test right.
1126 __ bind(&lhs_not_string);
1127 __ test(rhs, Immediate(kSmiTagMask));
1128 __ j(zero, &call_runtime_with_args);
1129 __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, ecx);
1130 __ j(above_equal, &call_runtime_with_args);
1131
1132 StringAddStub string_add_right_stub(NO_STRING_CHECK_RIGHT_IN_STUB);
1133 __ TailCallStub(&string_add_right_stub);
1134
1135 // Neither argument is a string.
1136 __ bind(&call_runtime);
1137 if (HasArgsInRegisters()) {
1138 GenerateRegisterArgsPush(masm);
1139 }
1140 __ bind(&call_runtime_with_args);
1141 __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION);
1142 break;
1143 }
1144 case Token::SUB:
1145 __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION);
1146 break;
1147 case Token::MUL:
1148 __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION);
1149 break;
1150 case Token::DIV:
1151 __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION);
1152 break;
1153 case Token::MOD:
1154 __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION);
1155 break;
1156 case Token::BIT_OR:
1157 __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION);
1158 break;
1159 case Token::BIT_AND:
1160 __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION);
1161 break;
1162 case Token::BIT_XOR:
1163 __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION);
1164 break;
1165 case Token::SAR:
1166 __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION);
1167 break;
1168 case Token::SHL:
1169 __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION);
1170 break;
1171 case Token::SHR:
1172 __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION);
1173 break;
1174 default:
1175 UNREACHABLE();
1176 }
1177 }
1178
1179
1180 void GenericBinaryOpStub::GenerateHeapResultAllocation(MacroAssembler* masm,
1181 Label* alloc_failure) {
1182 Label skip_allocation;
1183 OverwriteMode mode = mode_;
1184 if (HasArgsReversed()) {
1185 if (mode == OVERWRITE_RIGHT) {
1186 mode = OVERWRITE_LEFT;
1187 } else if (mode == OVERWRITE_LEFT) {
1188 mode = OVERWRITE_RIGHT;
1189 }
1190 }
1191 switch (mode) {
1192 case OVERWRITE_LEFT: {
1193 // If the argument in edx is already an object, we skip the
1194 // allocation of a heap number.
1195 __ test(edx, Immediate(kSmiTagMask));
1196 __ j(not_zero, &skip_allocation, not_taken);
1197 // Allocate a heap number for the result. Keep eax and edx intact
1198 // for the possible runtime call.
1199 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
1200 // Now edx can be overwritten losing one of the arguments as we are
1201 // now done and will not need it any more.
1202 __ mov(edx, Operand(ebx));
1203 __ bind(&skip_allocation);
1204 // Use object in edx as a result holder
1205 __ mov(eax, Operand(edx));
1206 break;
1207 }
1208 case OVERWRITE_RIGHT:
1209 // If the argument in eax is already an object, we skip the
1210 // allocation of a heap number.
1211 __ test(eax, Immediate(kSmiTagMask));
1212 __ j(not_zero, &skip_allocation, not_taken);
1213 // Fall through!
1214 case NO_OVERWRITE:
1215 // Allocate a heap number for the result. Keep eax and edx intact
1216 // for the possible runtime call.
1217 __ AllocateHeapNumber(ebx, ecx, no_reg, alloc_failure);
1218 // Now eax can be overwritten losing one of the arguments as we are
1219 // now done and will not need it any more.
1220 __ mov(eax, ebx);
1221 __ bind(&skip_allocation);
1222 break;
1223 default: UNREACHABLE();
1224 }
1225 }
1226
1227
1228 void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) {
1229 // If arguments are not passed in registers read them from the stack.
1230 ASSERT(!HasArgsInRegisters());
1231 __ mov(eax, Operand(esp, 1 * kPointerSize));
1232 __ mov(edx, Operand(esp, 2 * kPointerSize));
1233 }
1234
1235
1236 void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) {
1237 // If arguments are not passed in registers remove them from the stack before
1238 // returning.
1239 if (!HasArgsInRegisters()) {
1240 __ ret(2 * kPointerSize); // Remove both operands
1241 } else {
1242 __ ret(0);
1243 }
1244 }
1245
1246
1247 void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) {
1248 ASSERT(HasArgsInRegisters());
1249 __ pop(ecx);
1250 if (HasArgsReversed()) {
1251 __ push(eax);
1252 __ push(edx);
1253 } else {
1254 __ push(edx);
1255 __ push(eax);
1256 }
1257 __ push(ecx);
1258 }
1259
1260
1261 void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1262 // Ensure the operands are on the stack.
1263 if (HasArgsInRegisters()) {
1264 GenerateRegisterArgsPush(masm);
1265 }
1266
1267 __ pop(ecx); // Save return address.
1268
1269 // Left and right arguments are now on top.
1270 // Push this stub's key. Although the operation and the type info are
1271 // encoded into the key, the encoding is opaque, so push them too.
1272 __ push(Immediate(Smi::FromInt(MinorKey())));
1273 __ push(Immediate(Smi::FromInt(op_)));
1274 __ push(Immediate(Smi::FromInt(runtime_operands_type_)));
1275
1276 __ push(ecx); // Push return address.
1277
1278 // Patch the caller to an appropriate specialized stub and return the
1279 // operation result to the caller of the stub.
1280 __ TailCallExternalReference(
1281 ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()),
1282 5,
1283 1);
1284 }
1285
1286
1287 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) {
1288 GenericBinaryOpStub stub(key, type_info);
1289 return stub.GetCode();
1290 }
1291
1292
1293 Handle<Code> GetTypeRecordingBinaryOpStub(int key, 377 Handle<Code> GetTypeRecordingBinaryOpStub(int key,
1294 TRBinaryOpIC::TypeInfo type_info, 378 TRBinaryOpIC::TypeInfo type_info,
1295 TRBinaryOpIC::TypeInfo result_type_info) { 379 TRBinaryOpIC::TypeInfo result_type_info) {
1296 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info); 380 TypeRecordingBinaryOpStub stub(key, type_info, result_type_info);
1297 return stub.GetCode(); 381 return stub.GetCode();
1298 } 382 }
1299 383
1300 384
1301 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { 385 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) {
1302 __ pop(ecx); // Save return address. 386 __ pop(ecx); // Save return address.
(...skipping 5237 matching lines...) Expand 10 before | Expand all | Expand 10 after
6540 // Do a tail call to the rewritten stub. 5624 // Do a tail call to the rewritten stub.
6541 __ jmp(Operand(edi)); 5625 __ jmp(Operand(edi));
6542 } 5626 }
6543 5627
6544 5628
6545 #undef __ 5629 #undef __
6546 5630
6547 } } // namespace v8::internal 5631 } } // namespace v8::internal
6548 5632
6549 #endif // V8_TARGET_ARCH_IA32 5633 #endif // V8_TARGET_ARCH_IA32
OLDNEW
« no previous file with comments | « src/ia32/code-stubs-ia32.h ('k') | src/ic.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698