| OLD | NEW | 
|     1 // Copyright 2011 the V8 project authors. All rights reserved. |     1 // Copyright 2011 the V8 project authors. All rights reserved. | 
|     2 // Redistribution and use in source and binary forms, with or without |     2 // Redistribution and use in source and binary forms, with or without | 
|     3 // modification, are permitted provided that the following conditions are |     3 // modification, are permitted provided that the following conditions are | 
|     4 // met: |     4 // met: | 
|     5 // |     5 // | 
|     6 //     * Redistributions of source code must retain the above copyright |     6 //     * Redistributions of source code must retain the above copyright | 
|     7 //       notice, this list of conditions and the following disclaimer. |     7 //       notice, this list of conditions and the following disclaimer. | 
|     8 //     * Redistributions in binary form must reproduce the above |     8 //     * Redistributions in binary form must reproduce the above | 
|     9 //       copyright notice, this list of conditions and the following |     9 //       copyright notice, this list of conditions and the following | 
|    10 //       disclaimer in the documentation and/or other materials provided |    10 //       disclaimer in the documentation and/or other materials provided | 
| (...skipping 263 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
|   274   // Return 1/0 for true/false in rax. |   274   // Return 1/0 for true/false in rax. | 
|   275   __ bind(&true_result); |   275   __ bind(&true_result); | 
|   276   __ movq(rax, Immediate(1)); |   276   __ movq(rax, Immediate(1)); | 
|   277   __ ret(1 * kPointerSize); |   277   __ ret(1 * kPointerSize); | 
|   278   __ bind(&false_result); |   278   __ bind(&false_result); | 
|   279   __ Set(rax, 0); |   279   __ Set(rax, 0); | 
|   280   __ ret(1 * kPointerSize); |   280   __ ret(1 * kPointerSize); | 
|   281 } |   281 } | 
|   282  |   282  | 
|   283  |   283  | 
|   284 const char* GenericBinaryOpStub::GetName() { |  | 
|   285   if (name_ != NULL) return name_; |  | 
|   286   const int kMaxNameLength = 100; |  | 
|   287   name_ = Isolate::Current()->bootstrapper()->AllocateAutoDeletedArray( |  | 
|   288       kMaxNameLength); |  | 
|   289   if (name_ == NULL) return "OOM"; |  | 
|   290   const char* op_name = Token::Name(op_); |  | 
|   291   const char* overwrite_name; |  | 
|   292   switch (mode_) { |  | 
|   293     case NO_OVERWRITE: overwrite_name = "Alloc"; break; |  | 
|   294     case OVERWRITE_RIGHT: overwrite_name = "OverwriteRight"; break; |  | 
|   295     case OVERWRITE_LEFT: overwrite_name = "OverwriteLeft"; break; |  | 
|   296     default: overwrite_name = "UnknownOverwrite"; break; |  | 
|   297   } |  | 
|   298  |  | 
|   299   OS::SNPrintF(Vector<char>(name_, kMaxNameLength), |  | 
|   300                "GenericBinaryOpStub_%s_%s%s_%s%s_%s_%s", |  | 
|   301                op_name, |  | 
|   302                overwrite_name, |  | 
|   303                (flags_ & NO_SMI_CODE_IN_STUB) ? "_NoSmiInStub" : "", |  | 
|   304                args_in_registers_ ? "RegArgs" : "StackArgs", |  | 
|   305                args_reversed_ ? "_R" : "", |  | 
|   306                static_operands_type_.ToString(), |  | 
|   307                BinaryOpIC::GetName(runtime_operands_type_)); |  | 
|   308   return name_; |  | 
|   309 } |  | 
|   310  |  | 
|   311  |  | 
|   312 void GenericBinaryOpStub::GenerateCall( |  | 
|   313     MacroAssembler* masm, |  | 
|   314     Register left, |  | 
|   315     Register right) { |  | 
|   316   if (!ArgsInRegistersSupported()) { |  | 
|   317     // Pass arguments on the stack. |  | 
|   318     __ push(left); |  | 
|   319     __ push(right); |  | 
|   320   } else { |  | 
|   321     // The calling convention with registers is left in rdx and right in rax. |  | 
|   322     Register left_arg = rdx; |  | 
|   323     Register right_arg = rax; |  | 
|   324     if (!(left.is(left_arg) && right.is(right_arg))) { |  | 
|   325       if (left.is(right_arg) && right.is(left_arg)) { |  | 
|   326         if (IsOperationCommutative()) { |  | 
|   327           SetArgsReversed(); |  | 
|   328         } else { |  | 
|   329           __ xchg(left, right); |  | 
|   330         } |  | 
|   331       } else if (left.is(left_arg)) { |  | 
|   332         __ movq(right_arg, right); |  | 
|   333       } else if (right.is(right_arg)) { |  | 
|   334         __ movq(left_arg, left); |  | 
|   335       } else if (left.is(right_arg)) { |  | 
|   336         if (IsOperationCommutative()) { |  | 
|   337           __ movq(left_arg, right); |  | 
|   338           SetArgsReversed(); |  | 
|   339         } else { |  | 
|   340           // Order of moves important to avoid destroying left argument. |  | 
|   341           __ movq(left_arg, left); |  | 
|   342           __ movq(right_arg, right); |  | 
|   343         } |  | 
|   344       } else if (right.is(left_arg)) { |  | 
|   345         if (IsOperationCommutative()) { |  | 
|   346           __ movq(right_arg, left); |  | 
|   347           SetArgsReversed(); |  | 
|   348         } else { |  | 
|   349           // Order of moves important to avoid destroying right argument. |  | 
|   350           __ movq(right_arg, right); |  | 
|   351           __ movq(left_arg, left); |  | 
|   352         } |  | 
|   353       } else { |  | 
|   354         // Order of moves is not important. |  | 
|   355         __ movq(left_arg, left); |  | 
|   356         __ movq(right_arg, right); |  | 
|   357       } |  | 
|   358     } |  | 
|   359  |  | 
|   360     // Update flags to indicate that arguments are in registers. |  | 
|   361     SetArgsInRegisters(); |  | 
|   362     Counters* counters = masm->isolate()->counters(); |  | 
|   363     __ IncrementCounter(counters->generic_binary_stub_calls_regs(), 1); |  | 
|   364   } |  | 
|   365  |  | 
|   366   // Call the stub. |  | 
|   367   __ CallStub(this); |  | 
|   368 } |  | 
|   369  |  | 
|   370  |  | 
|   371 void GenericBinaryOpStub::GenerateCall( |  | 
|   372     MacroAssembler* masm, |  | 
|   373     Register left, |  | 
|   374     Smi* right) { |  | 
|   375   if (!ArgsInRegistersSupported()) { |  | 
|   376     // Pass arguments on the stack. |  | 
|   377     __ push(left); |  | 
|   378     __ Push(right); |  | 
|   379   } else { |  | 
|   380     // The calling convention with registers is left in rdx and right in rax. |  | 
|   381     Register left_arg = rdx; |  | 
|   382     Register right_arg = rax; |  | 
|   383     if (left.is(left_arg)) { |  | 
|   384       __ Move(right_arg, right); |  | 
|   385     } else if (left.is(right_arg) && IsOperationCommutative()) { |  | 
|   386       __ Move(left_arg, right); |  | 
|   387       SetArgsReversed(); |  | 
|   388     } else { |  | 
|   389       // For non-commutative operations, left and right_arg might be |  | 
|   390       // the same register.  Therefore, the order of the moves is |  | 
|   391       // important here in order to not overwrite left before moving |  | 
|   392       // it to left_arg. |  | 
|   393       __ movq(left_arg, left); |  | 
|   394       __ Move(right_arg, right); |  | 
|   395     } |  | 
|   396  |  | 
|   397     // Update flags to indicate that arguments are in registers. |  | 
|   398     SetArgsInRegisters(); |  | 
|   399   Counters* counters = masm->isolate()->counters(); |  | 
|   400     __ IncrementCounter(counters->generic_binary_stub_calls_regs(), 1); |  | 
|   401   } |  | 
|   402  |  | 
|   403   // Call the stub. |  | 
|   404   __ CallStub(this); |  | 
|   405 } |  | 
|   406  |  | 
|   407  |  | 
|   408 void GenericBinaryOpStub::GenerateCall( |  | 
|   409     MacroAssembler* masm, |  | 
|   410     Smi* left, |  | 
|   411     Register right) { |  | 
|   412   if (!ArgsInRegistersSupported()) { |  | 
|   413     // Pass arguments on the stack. |  | 
|   414     __ Push(left); |  | 
|   415     __ push(right); |  | 
|   416   } else { |  | 
|   417     // The calling convention with registers is left in rdx and right in rax. |  | 
|   418     Register left_arg = rdx; |  | 
|   419     Register right_arg = rax; |  | 
|   420     if (right.is(right_arg)) { |  | 
|   421       __ Move(left_arg, left); |  | 
|   422     } else if (right.is(left_arg) && IsOperationCommutative()) { |  | 
|   423       __ Move(right_arg, left); |  | 
|   424       SetArgsReversed(); |  | 
|   425     } else { |  | 
|   426       // For non-commutative operations, right and left_arg might be |  | 
|   427       // the same register.  Therefore, the order of the moves is |  | 
|   428       // important here in order to not overwrite right before moving |  | 
|   429       // it to right_arg. |  | 
|   430       __ movq(right_arg, right); |  | 
|   431       __ Move(left_arg, left); |  | 
|   432     } |  | 
|   433     // Update flags to indicate that arguments are in registers. |  | 
|   434     SetArgsInRegisters(); |  | 
|   435   Counters* counters = masm->isolate()->counters(); |  | 
|   436     __ IncrementCounter(counters->generic_binary_stub_calls_regs(), 1); |  | 
|   437   } |  | 
|   438  |  | 
|   439   // Call the stub. |  | 
|   440   __ CallStub(this); |  | 
|   441 } |  | 
|   442  |  | 
|   443  |  | 
|   444 class FloatingPointHelper : public AllStatic { |   284 class FloatingPointHelper : public AllStatic { | 
|   445  public: |   285  public: | 
|   446   // Load the operands from rdx and rax into xmm0 and xmm1, as doubles. |   286   // Load the operands from rdx and rax into xmm0 and xmm1, as doubles. | 
|   447   // If the operands are not both numbers, jump to not_numbers. |   287   // If the operands are not both numbers, jump to not_numbers. | 
|   448   // Leaves rdx and rax unchanged.  SmiOperands assumes both are smis. |   288   // Leaves rdx and rax unchanged.  SmiOperands assumes both are smis. | 
|   449   // NumberOperands assumes both are smis or heap numbers. |   289   // NumberOperands assumes both are smis or heap numbers. | 
|   450   static void LoadSSE2SmiOperands(MacroAssembler* masm); |   290   static void LoadSSE2SmiOperands(MacroAssembler* masm); | 
|   451   static void LoadSSE2NumberOperands(MacroAssembler* masm); |   291   static void LoadSSE2NumberOperands(MacroAssembler* masm); | 
|   452   static void LoadSSE2UnknownOperands(MacroAssembler* masm, |   292   static void LoadSSE2UnknownOperands(MacroAssembler* masm, | 
|   453                                       Label* not_numbers); |   293                                       Label* not_numbers); | 
|   454  |   294  | 
|   455   // Takes the operands in rdx and rax and loads them as integers in rax |   295   // Takes the operands in rdx and rax and loads them as integers in rax | 
|   456   // and rcx. |   296   // and rcx. | 
|   457   static void LoadAsIntegers(MacroAssembler* masm, |   297   static void LoadAsIntegers(MacroAssembler* masm, | 
|   458                              Label* operand_conversion_failure, |   298                              Label* operand_conversion_failure, | 
|   459                              Register heap_number_map); |   299                              Register heap_number_map); | 
|   460   // As above, but we know the operands to be numbers. In that case, |   300   // As above, but we know the operands to be numbers. In that case, | 
|   461   // conversion can't fail. |   301   // conversion can't fail. | 
|   462   static void LoadNumbersAsIntegers(MacroAssembler* masm); |   302   static void LoadNumbersAsIntegers(MacroAssembler* masm); | 
|   463 }; |   303 }; | 
|   464  |   304  | 
|   465  |   305  | 
|   466 void GenericBinaryOpStub::GenerateSmiCode(MacroAssembler* masm, Label* slow) { |  | 
|   467   // 1. Move arguments into rdx, rax except for DIV and MOD, which need the |  | 
|   468   // dividend in rax and rdx free for the division.  Use rax, rbx for those. |  | 
|   469   Comment load_comment(masm, "-- Load arguments"); |  | 
|   470   Register left = rdx; |  | 
|   471   Register right = rax; |  | 
|   472   if (op_ == Token::DIV || op_ == Token::MOD) { |  | 
|   473     left = rax; |  | 
|   474     right = rbx; |  | 
|   475     if (HasArgsInRegisters()) { |  | 
|   476       __ movq(rbx, rax); |  | 
|   477       __ movq(rax, rdx); |  | 
|   478     } |  | 
|   479   } |  | 
|   480   if (!HasArgsInRegisters()) { |  | 
|   481     __ movq(right, Operand(rsp, 1 * kPointerSize)); |  | 
|   482     __ movq(left, Operand(rsp, 2 * kPointerSize)); |  | 
|   483   } |  | 
|   484  |  | 
|   485   Label not_smis; |  | 
|   486   // 2. Smi check both operands. |  | 
|   487   if (static_operands_type_.IsSmi()) { |  | 
|   488     // Skip smi check if we know that both arguments are smis. |  | 
|   489     if (FLAG_debug_code) { |  | 
|   490       __ AbortIfNotSmi(left); |  | 
|   491       __ AbortIfNotSmi(right); |  | 
|   492     } |  | 
|   493     if (op_ == Token::BIT_OR) { |  | 
|   494       // Handle OR here, since we do extra smi-checking in the or code below. |  | 
|   495       __ SmiOr(right, right, left); |  | 
|   496       GenerateReturn(masm); |  | 
|   497       return; |  | 
|   498     } |  | 
|   499   } else { |  | 
|   500     if (op_ != Token::BIT_OR) { |  | 
|   501       // Skip the check for OR as it is better combined with the |  | 
|   502       // actual operation. |  | 
|   503       Comment smi_check_comment(masm, "-- Smi check arguments"); |  | 
|   504       __ JumpIfNotBothSmi(left, right, ¬_smis); |  | 
|   505     } |  | 
|   506   } |  | 
|   507  |  | 
|   508   // 3. Operands are both smis (except for OR), perform the operation leaving |  | 
|   509   // the result in rax and check the result if necessary. |  | 
|   510   Comment perform_smi(masm, "-- Perform smi operation"); |  | 
|   511   Label use_fp_on_smis; |  | 
|   512   switch (op_) { |  | 
|   513     case Token::ADD: { |  | 
|   514       ASSERT(right.is(rax)); |  | 
|   515       __ SmiAdd(right, right, left, &use_fp_on_smis);  // ADD is commutative. |  | 
|   516       break; |  | 
|   517     } |  | 
|   518  |  | 
|   519     case Token::SUB: { |  | 
|   520       __ SmiSub(left, left, right, &use_fp_on_smis); |  | 
|   521       __ movq(rax, left); |  | 
|   522       break; |  | 
|   523     } |  | 
|   524  |  | 
|   525     case Token::MUL: |  | 
|   526       ASSERT(right.is(rax)); |  | 
|   527       __ SmiMul(right, right, left, &use_fp_on_smis);  // MUL is commutative. |  | 
|   528       break; |  | 
|   529  |  | 
|   530     case Token::DIV: |  | 
|   531       ASSERT(left.is(rax)); |  | 
|   532       __ SmiDiv(left, left, right, &use_fp_on_smis); |  | 
|   533       break; |  | 
|   534  |  | 
|   535     case Token::MOD: |  | 
|   536       ASSERT(left.is(rax)); |  | 
|   537       __ SmiMod(left, left, right, slow); |  | 
|   538       break; |  | 
|   539  |  | 
|   540     case Token::BIT_OR: |  | 
|   541       ASSERT(right.is(rax)); |  | 
|   542       __ movq(rcx, right);  // Save the right operand. |  | 
|   543       __ SmiOr(right, right, left);  // BIT_OR is commutative. |  | 
|   544       __ testb(right, Immediate(kSmiTagMask)); |  | 
|   545       __ j(not_zero, ¬_smis); |  | 
|   546       break; |  | 
|   547  |  | 
|   548     case Token::BIT_AND: |  | 
|   549       ASSERT(right.is(rax)); |  | 
|   550       __ SmiAnd(right, right, left);  // BIT_AND is commutative. |  | 
|   551       break; |  | 
|   552  |  | 
|   553     case Token::BIT_XOR: |  | 
|   554       ASSERT(right.is(rax)); |  | 
|   555       __ SmiXor(right, right, left);  // BIT_XOR is commutative. |  | 
|   556       break; |  | 
|   557  |  | 
|   558     case Token::SHL: |  | 
|   559     case Token::SHR: |  | 
|   560     case Token::SAR: |  | 
|   561       switch (op_) { |  | 
|   562         case Token::SAR: |  | 
|   563           __ SmiShiftArithmeticRight(left, left, right); |  | 
|   564           break; |  | 
|   565         case Token::SHR: |  | 
|   566           __ SmiShiftLogicalRight(left, left, right, slow); |  | 
|   567           break; |  | 
|   568         case Token::SHL: |  | 
|   569           __ SmiShiftLeft(left, left, right); |  | 
|   570           break; |  | 
|   571         default: |  | 
|   572           UNREACHABLE(); |  | 
|   573       } |  | 
|   574       __ movq(rax, left); |  | 
|   575       break; |  | 
|   576  |  | 
|   577     default: |  | 
|   578       UNREACHABLE(); |  | 
|   579       break; |  | 
|   580   } |  | 
|   581  |  | 
|   582   // 4. Emit return of result in rax. |  | 
|   583   GenerateReturn(masm); |  | 
|   584  |  | 
|   585   // 5. For some operations emit inline code to perform floating point |  | 
|   586   // operations on known smis (e.g., if the result of the operation |  | 
|   587   // overflowed the smi range). |  | 
|   588   switch (op_) { |  | 
|   589     case Token::ADD: |  | 
|   590     case Token::SUB: |  | 
|   591     case Token::MUL: |  | 
|   592     case Token::DIV: { |  | 
|   593       ASSERT(use_fp_on_smis.is_linked()); |  | 
|   594       __ bind(&use_fp_on_smis); |  | 
|   595       if (op_ == Token::DIV) { |  | 
|   596         __ movq(rdx, rax); |  | 
|   597         __ movq(rax, rbx); |  | 
|   598       } |  | 
|   599       // left is rdx, right is rax. |  | 
|   600       __ AllocateHeapNumber(rbx, rcx, slow); |  | 
|   601       FloatingPointHelper::LoadSSE2SmiOperands(masm); |  | 
|   602       switch (op_) { |  | 
|   603         case Token::ADD: __ addsd(xmm0, xmm1); break; |  | 
|   604         case Token::SUB: __ subsd(xmm0, xmm1); break; |  | 
|   605         case Token::MUL: __ mulsd(xmm0, xmm1); break; |  | 
|   606         case Token::DIV: __ divsd(xmm0, xmm1); break; |  | 
|   607         default: UNREACHABLE(); |  | 
|   608       } |  | 
|   609       __ movsd(FieldOperand(rbx, HeapNumber::kValueOffset), xmm0); |  | 
|   610       __ movq(rax, rbx); |  | 
|   611       GenerateReturn(masm); |  | 
|   612     } |  | 
|   613     default: |  | 
|   614       break; |  | 
|   615   } |  | 
|   616  |  | 
|   617   // 6. Non-smi operands, fall out to the non-smi code with the operands in |  | 
|   618   // rdx and rax. |  | 
|   619   Comment done_comment(masm, "-- Enter non-smi code"); |  | 
|   620   __ bind(¬_smis); |  | 
|   621  |  | 
|   622   switch (op_) { |  | 
|   623     case Token::DIV: |  | 
|   624     case Token::MOD: |  | 
|   625       // Operands are in rax, rbx at this point. |  | 
|   626       __ movq(rdx, rax); |  | 
|   627       __ movq(rax, rbx); |  | 
|   628       break; |  | 
|   629  |  | 
|   630     case Token::BIT_OR: |  | 
|   631       // Right operand is saved in rcx and rax was destroyed by the smi |  | 
|   632       // operation. |  | 
|   633       __ movq(rax, rcx); |  | 
|   634       break; |  | 
|   635  |  | 
|   636     default: |  | 
|   637       break; |  | 
|   638   } |  | 
|   639 } |  | 
|   640  |  | 
|   641  |  | 
|   642 void GenericBinaryOpStub::Generate(MacroAssembler* masm) { |  | 
|   643   Label call_runtime; |  | 
|   644  |  | 
|   645   if (ShouldGenerateSmiCode()) { |  | 
|   646     GenerateSmiCode(masm, &call_runtime); |  | 
|   647   } else if (op_ != Token::MOD) { |  | 
|   648     if (!HasArgsInRegisters()) { |  | 
|   649       GenerateLoadArguments(masm); |  | 
|   650     } |  | 
|   651   } |  | 
|   652   // Floating point case. |  | 
|   653   if (ShouldGenerateFPCode()) { |  | 
|   654     switch (op_) { |  | 
|   655       case Token::ADD: |  | 
|   656       case Token::SUB: |  | 
|   657       case Token::MUL: |  | 
|   658       case Token::DIV: { |  | 
|   659         if (runtime_operands_type_ == BinaryOpIC::DEFAULT && |  | 
|   660             HasSmiCodeInStub()) { |  | 
|   661           // Execution reaches this point when the first non-smi argument occurs |  | 
|   662           // (and only if smi code is generated). This is the right moment to |  | 
|   663           // patch to HEAP_NUMBERS state. The transition is attempted only for |  | 
|   664           // the four basic operations. The stub stays in the DEFAULT state |  | 
|   665           // forever for all other operations (also if smi code is skipped). |  | 
|   666           GenerateTypeTransition(masm); |  | 
|   667           break; |  | 
|   668         } |  | 
|   669  |  | 
|   670         Label not_floats; |  | 
|   671         // rax: y |  | 
|   672         // rdx: x |  | 
|   673         if (static_operands_type_.IsNumber()) { |  | 
|   674           if (FLAG_debug_code) { |  | 
|   675             // Assert at runtime that inputs are only numbers. |  | 
|   676             __ AbortIfNotNumber(rdx); |  | 
|   677             __ AbortIfNotNumber(rax); |  | 
|   678           } |  | 
|   679           FloatingPointHelper::LoadSSE2NumberOperands(masm); |  | 
|   680         } else { |  | 
|   681           FloatingPointHelper::LoadSSE2UnknownOperands(masm, &call_runtime); |  | 
|   682         } |  | 
|   683  |  | 
|   684         switch (op_) { |  | 
|   685           case Token::ADD: __ addsd(xmm0, xmm1); break; |  | 
|   686           case Token::SUB: __ subsd(xmm0, xmm1); break; |  | 
|   687           case Token::MUL: __ mulsd(xmm0, xmm1); break; |  | 
|   688           case Token::DIV: __ divsd(xmm0, xmm1); break; |  | 
|   689           default: UNREACHABLE(); |  | 
|   690         } |  | 
|   691         // Allocate a heap number, if needed. |  | 
|   692         Label skip_allocation; |  | 
|   693         OverwriteMode mode = mode_; |  | 
|   694         if (HasArgsReversed()) { |  | 
|   695           if (mode == OVERWRITE_RIGHT) { |  | 
|   696             mode = OVERWRITE_LEFT; |  | 
|   697           } else if (mode == OVERWRITE_LEFT) { |  | 
|   698             mode = OVERWRITE_RIGHT; |  | 
|   699           } |  | 
|   700         } |  | 
|   701         switch (mode) { |  | 
|   702           case OVERWRITE_LEFT: |  | 
|   703             __ JumpIfNotSmi(rdx, &skip_allocation); |  | 
|   704             __ AllocateHeapNumber(rbx, rcx, &call_runtime); |  | 
|   705             __ movq(rdx, rbx); |  | 
|   706             __ bind(&skip_allocation); |  | 
|   707             __ movq(rax, rdx); |  | 
|   708             break; |  | 
|   709           case OVERWRITE_RIGHT: |  | 
|   710             // If the argument in rax is already an object, we skip the |  | 
|   711             // allocation of a heap number. |  | 
|   712             __ JumpIfNotSmi(rax, &skip_allocation); |  | 
|   713             // Fall through! |  | 
|   714           case NO_OVERWRITE: |  | 
|   715             // Allocate a heap number for the result. Keep rax and rdx intact |  | 
|   716             // for the possible runtime call. |  | 
|   717             __ AllocateHeapNumber(rbx, rcx, &call_runtime); |  | 
|   718             __ movq(rax, rbx); |  | 
|   719             __ bind(&skip_allocation); |  | 
|   720             break; |  | 
|   721           default: UNREACHABLE(); |  | 
|   722         } |  | 
|   723         __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |  | 
|   724         GenerateReturn(masm); |  | 
|   725         __ bind(¬_floats); |  | 
|   726         if (runtime_operands_type_ == BinaryOpIC::DEFAULT && |  | 
|   727             !HasSmiCodeInStub()) { |  | 
|   728             // Execution reaches this point when the first non-number argument |  | 
|   729             // occurs (and only if smi code is skipped from the stub, otherwise |  | 
|   730             // the patching has already been done earlier in this case branch). |  | 
|   731             // A perfect moment to try patching to STRINGS for ADD operation. |  | 
|   732             if (op_ == Token::ADD) { |  | 
|   733               GenerateTypeTransition(masm); |  | 
|   734             } |  | 
|   735         } |  | 
|   736         break; |  | 
|   737       } |  | 
|   738       case Token::MOD: { |  | 
|   739         // For MOD we go directly to runtime in the non-smi case. |  | 
|   740         break; |  | 
|   741       } |  | 
|   742       case Token::BIT_OR: |  | 
|   743       case Token::BIT_AND: |  | 
|   744       case Token::BIT_XOR: |  | 
|   745       case Token::SAR: |  | 
|   746       case Token::SHL: |  | 
|   747       case Token::SHR: { |  | 
|   748         Label skip_allocation, non_smi_shr_result; |  | 
|   749         Register heap_number_map = r9; |  | 
|   750         __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |  | 
|   751         if (static_operands_type_.IsNumber()) { |  | 
|   752           if (FLAG_debug_code) { |  | 
|   753             // Assert at runtime that inputs are only numbers. |  | 
|   754             __ AbortIfNotNumber(rdx); |  | 
|   755             __ AbortIfNotNumber(rax); |  | 
|   756           } |  | 
|   757           FloatingPointHelper::LoadNumbersAsIntegers(masm); |  | 
|   758         } else { |  | 
|   759           FloatingPointHelper::LoadAsIntegers(masm, |  | 
|   760                                               &call_runtime, |  | 
|   761                                               heap_number_map); |  | 
|   762         } |  | 
|   763         switch (op_) { |  | 
|   764           case Token::BIT_OR:  __ orl(rax, rcx); break; |  | 
|   765           case Token::BIT_AND: __ andl(rax, rcx); break; |  | 
|   766           case Token::BIT_XOR: __ xorl(rax, rcx); break; |  | 
|   767           case Token::SAR: __ sarl_cl(rax); break; |  | 
|   768           case Token::SHL: __ shll_cl(rax); break; |  | 
|   769           case Token::SHR: { |  | 
|   770             __ shrl_cl(rax); |  | 
|   771             // Check if result is negative. This can only happen for a shift |  | 
|   772             // by zero. |  | 
|   773             __ testl(rax, rax); |  | 
|   774             __ j(negative, &non_smi_shr_result); |  | 
|   775             break; |  | 
|   776           } |  | 
|   777           default: UNREACHABLE(); |  | 
|   778         } |  | 
|   779  |  | 
|   780         STATIC_ASSERT(kSmiValueSize == 32); |  | 
|   781         // Tag smi result and return. |  | 
|   782         __ Integer32ToSmi(rax, rax); |  | 
|   783         GenerateReturn(masm); |  | 
|   784  |  | 
|   785         // All bit-ops except SHR return a signed int32 that can be |  | 
|   786         // returned immediately as a smi. |  | 
|   787         // We might need to allocate a HeapNumber if we shift a negative |  | 
|   788         // number right by zero (i.e., convert to UInt32). |  | 
|   789         if (op_ == Token::SHR) { |  | 
|   790           ASSERT(non_smi_shr_result.is_linked()); |  | 
|   791           __ bind(&non_smi_shr_result); |  | 
|   792           // Allocate a heap number if needed. |  | 
|   793           __ movl(rbx, rax);  // rbx holds result value (uint32 value as int64). |  | 
|   794           switch (mode_) { |  | 
|   795             case OVERWRITE_LEFT: |  | 
|   796             case OVERWRITE_RIGHT: |  | 
|   797               // If the operand was an object, we skip the |  | 
|   798               // allocation of a heap number. |  | 
|   799               __ movq(rax, Operand(rsp, mode_ == OVERWRITE_RIGHT ? |  | 
|   800                                    1 * kPointerSize : 2 * kPointerSize)); |  | 
|   801               __ JumpIfNotSmi(rax, &skip_allocation); |  | 
|   802               // Fall through! |  | 
|   803             case NO_OVERWRITE: |  | 
|   804               // Allocate heap number in new space. |  | 
|   805               // Not using AllocateHeapNumber macro in order to reuse |  | 
|   806               // already loaded heap_number_map. |  | 
|   807               __ AllocateInNewSpace(HeapNumber::kSize, |  | 
|   808                                     rax, |  | 
|   809                                     rcx, |  | 
|   810                                     no_reg, |  | 
|   811                                     &call_runtime, |  | 
|   812                                     TAG_OBJECT); |  | 
|   813               // Set the map. |  | 
|   814               if (FLAG_debug_code) { |  | 
|   815                 __ AbortIfNotRootValue(heap_number_map, |  | 
|   816                                        Heap::kHeapNumberMapRootIndex, |  | 
|   817                                        "HeapNumberMap register clobbered."); |  | 
|   818               } |  | 
|   819               __ movq(FieldOperand(rax, HeapObject::kMapOffset), |  | 
|   820                       heap_number_map); |  | 
|   821               __ bind(&skip_allocation); |  | 
|   822               break; |  | 
|   823             default: UNREACHABLE(); |  | 
|   824           } |  | 
|   825           // Store the result in the HeapNumber and return. |  | 
|   826           __ cvtqsi2sd(xmm0, rbx); |  | 
|   827           __ movsd(FieldOperand(rax, HeapNumber::kValueOffset), xmm0); |  | 
|   828           GenerateReturn(masm); |  | 
|   829         } |  | 
|   830  |  | 
|   831         break; |  | 
|   832       } |  | 
|   833       default: UNREACHABLE(); break; |  | 
|   834     } |  | 
|   835   } |  | 
|   836  |  | 
|   837   // If all else fails, use the runtime system to get the correct |  | 
|   838   // result. If arguments was passed in registers now place them on the |  | 
|   839   // stack in the correct order below the return address. |  | 
|   840   __ bind(&call_runtime); |  | 
|   841  |  | 
|   842   if (HasArgsInRegisters()) { |  | 
|   843     GenerateRegisterArgsPush(masm); |  | 
|   844   } |  | 
|   845  |  | 
|   846   switch (op_) { |  | 
|   847     case Token::ADD: { |  | 
|   848       // Registers containing left and right operands respectively. |  | 
|   849       Register lhs, rhs; |  | 
|   850  |  | 
|   851       if (HasArgsReversed()) { |  | 
|   852         lhs = rax; |  | 
|   853         rhs = rdx; |  | 
|   854       } else { |  | 
|   855         lhs = rdx; |  | 
|   856         rhs = rax; |  | 
|   857       } |  | 
|   858  |  | 
|   859       // Test for string arguments before calling runtime. |  | 
|   860       Label not_strings, both_strings, not_string1, string1, string1_smi2; |  | 
|   861  |  | 
|   862       // If this stub has already generated FP-specific code then the arguments |  | 
|   863       // are already in rdx and rax. |  | 
|   864       if (!ShouldGenerateFPCode() && !HasArgsInRegisters()) { |  | 
|   865         GenerateLoadArguments(masm); |  | 
|   866       } |  | 
|   867  |  | 
|   868       Condition is_smi; |  | 
|   869       is_smi = masm->CheckSmi(lhs); |  | 
|   870       __ j(is_smi, ¬_string1); |  | 
|   871       __ CmpObjectType(lhs, FIRST_NONSTRING_TYPE, r8); |  | 
|   872       __ j(above_equal, ¬_string1); |  | 
|   873  |  | 
|   874       // First argument is a a string, test second. |  | 
|   875       is_smi = masm->CheckSmi(rhs); |  | 
|   876       __ j(is_smi, &string1_smi2); |  | 
|   877       __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, r9); |  | 
|   878       __ j(above_equal, &string1); |  | 
|   879  |  | 
|   880       // First and second argument are strings. |  | 
|   881       StringAddStub string_add_stub(NO_STRING_CHECK_IN_STUB); |  | 
|   882       __ TailCallStub(&string_add_stub); |  | 
|   883  |  | 
|   884       __ bind(&string1_smi2); |  | 
|   885       // First argument is a string, second is a smi. Try to lookup the number |  | 
|   886       // string for the smi in the number string cache. |  | 
|   887       NumberToStringStub::GenerateLookupNumberStringCache( |  | 
|   888           masm, rhs, rbx, rcx, r8, true, &string1); |  | 
|   889  |  | 
|   890       // Replace second argument on stack and tailcall string add stub to make |  | 
|   891       // the result. |  | 
|   892       __ movq(Operand(rsp, 1 * kPointerSize), rbx); |  | 
|   893       __ TailCallStub(&string_add_stub); |  | 
|   894  |  | 
|   895       // Only first argument is a string. |  | 
|   896       __ bind(&string1); |  | 
|   897       __ InvokeBuiltin(Builtins::STRING_ADD_LEFT, JUMP_FUNCTION); |  | 
|   898  |  | 
|   899       // First argument was not a string, test second. |  | 
|   900       __ bind(¬_string1); |  | 
|   901       is_smi = masm->CheckSmi(rhs); |  | 
|   902       __ j(is_smi, ¬_strings); |  | 
|   903       __ CmpObjectType(rhs, FIRST_NONSTRING_TYPE, rhs); |  | 
|   904       __ j(above_equal, ¬_strings); |  | 
|   905  |  | 
|   906       // Only second argument is a string. |  | 
|   907       __ InvokeBuiltin(Builtins::STRING_ADD_RIGHT, JUMP_FUNCTION); |  | 
|   908  |  | 
|   909       __ bind(¬_strings); |  | 
|   910       // Neither argument is a string. |  | 
|   911       __ InvokeBuiltin(Builtins::ADD, JUMP_FUNCTION); |  | 
|   912       break; |  | 
|   913     } |  | 
|   914     case Token::SUB: |  | 
|   915       __ InvokeBuiltin(Builtins::SUB, JUMP_FUNCTION); |  | 
|   916       break; |  | 
|   917     case Token::MUL: |  | 
|   918       __ InvokeBuiltin(Builtins::MUL, JUMP_FUNCTION); |  | 
|   919       break; |  | 
|   920     case Token::DIV: |  | 
|   921       __ InvokeBuiltin(Builtins::DIV, JUMP_FUNCTION); |  | 
|   922       break; |  | 
|   923     case Token::MOD: |  | 
|   924       __ InvokeBuiltin(Builtins::MOD, JUMP_FUNCTION); |  | 
|   925       break; |  | 
|   926     case Token::BIT_OR: |  | 
|   927       __ InvokeBuiltin(Builtins::BIT_OR, JUMP_FUNCTION); |  | 
|   928       break; |  | 
|   929     case Token::BIT_AND: |  | 
|   930       __ InvokeBuiltin(Builtins::BIT_AND, JUMP_FUNCTION); |  | 
|   931       break; |  | 
|   932     case Token::BIT_XOR: |  | 
|   933       __ InvokeBuiltin(Builtins::BIT_XOR, JUMP_FUNCTION); |  | 
|   934       break; |  | 
|   935     case Token::SAR: |  | 
|   936       __ InvokeBuiltin(Builtins::SAR, JUMP_FUNCTION); |  | 
|   937       break; |  | 
|   938     case Token::SHL: |  | 
|   939       __ InvokeBuiltin(Builtins::SHL, JUMP_FUNCTION); |  | 
|   940       break; |  | 
|   941     case Token::SHR: |  | 
|   942       __ InvokeBuiltin(Builtins::SHR, JUMP_FUNCTION); |  | 
|   943       break; |  | 
|   944     default: |  | 
|   945       UNREACHABLE(); |  | 
|   946   } |  | 
|   947 } |  | 
|   948  |  | 
|   949  |  | 
|   950 void GenericBinaryOpStub::GenerateLoadArguments(MacroAssembler* masm) { |  | 
|   951   ASSERT(!HasArgsInRegisters()); |  | 
|   952   __ movq(rax, Operand(rsp, 1 * kPointerSize)); |  | 
|   953   __ movq(rdx, Operand(rsp, 2 * kPointerSize)); |  | 
|   954 } |  | 
|   955  |  | 
|   956  |  | 
|   957 void GenericBinaryOpStub::GenerateReturn(MacroAssembler* masm) { |  | 
|   958   // If arguments are not passed in registers remove them from the stack before |  | 
|   959   // returning. |  | 
|   960   if (!HasArgsInRegisters()) { |  | 
|   961     __ ret(2 * kPointerSize);  // Remove both operands |  | 
|   962   } else { |  | 
|   963     __ ret(0); |  | 
|   964   } |  | 
|   965 } |  | 
|   966  |  | 
|   967  |  | 
|   968 void GenericBinaryOpStub::GenerateRegisterArgsPush(MacroAssembler* masm) { |  | 
|   969   ASSERT(HasArgsInRegisters()); |  | 
|   970   __ pop(rcx); |  | 
|   971   if (HasArgsReversed()) { |  | 
|   972     __ push(rax); |  | 
|   973     __ push(rdx); |  | 
|   974   } else { |  | 
|   975     __ push(rdx); |  | 
|   976     __ push(rax); |  | 
|   977   } |  | 
|   978   __ push(rcx); |  | 
|   979 } |  | 
|   980  |  | 
|   981  |  | 
|   982 void GenericBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |  | 
|   983   Label get_result; |  | 
|   984  |  | 
|   985   // Ensure the operands are on the stack. |  | 
|   986   if (HasArgsInRegisters()) { |  | 
|   987     GenerateRegisterArgsPush(masm); |  | 
|   988   } |  | 
|   989  |  | 
|   990   // Left and right arguments are already on stack. |  | 
|   991   __ pop(rcx);  // Save the return address. |  | 
|   992  |  | 
|   993   // Push this stub's key. |  | 
|   994   __ Push(Smi::FromInt(MinorKey())); |  | 
|   995  |  | 
|   996   // Although the operation and the type info are encoded into the key, |  | 
|   997   // the encoding is opaque, so push them too. |  | 
|   998   __ Push(Smi::FromInt(op_)); |  | 
|   999  |  | 
|  1000   __ Push(Smi::FromInt(runtime_operands_type_)); |  | 
|  1001  |  | 
|  1002   __ push(rcx);  // The return address. |  | 
|  1003  |  | 
|  1004   // Perform patching to an appropriate fast case and return the result. |  | 
|  1005   __ TailCallExternalReference( |  | 
|  1006       ExternalReference(IC_Utility(IC::kBinaryOp_Patch), masm->isolate()), |  | 
|  1007       5, |  | 
|  1008       1); |  | 
|  1009 } |  | 
|  1010  |  | 
|  1011  |  | 
|  1012 Handle<Code> GetBinaryOpStub(int key, BinaryOpIC::TypeInfo type_info) { |  | 
|  1013   GenericBinaryOpStub stub(key, type_info); |  | 
|  1014   return stub.GetCode(); |  | 
|  1015 } |  | 
|  1016  |  | 
|  1017  |  | 
|  1018 Handle<Code> GetTypeRecordingBinaryOpStub(int key, |   306 Handle<Code> GetTypeRecordingBinaryOpStub(int key, | 
|  1019     TRBinaryOpIC::TypeInfo type_info, |   307     TRBinaryOpIC::TypeInfo type_info, | 
|  1020     TRBinaryOpIC::TypeInfo result_type_info) { |   308     TRBinaryOpIC::TypeInfo result_type_info) { | 
|  1021   TypeRecordingBinaryOpStub stub(key, type_info, result_type_info); |   309   TypeRecordingBinaryOpStub stub(key, type_info, result_type_info); | 
|  1022   return stub.GetCode(); |   310   return stub.GetCode(); | 
|  1023 } |   311 } | 
|  1024  |   312  | 
|  1025  |   313  | 
|  1026 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { |   314 void TypeRecordingBinaryOpStub::GenerateTypeTransition(MacroAssembler* masm) { | 
|  1027   __ pop(rcx);  // Save return address. |   315   __ pop(rcx);  // Save return address. | 
| (...skipping 4114 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
|  5142   // Do a tail call to the rewritten stub. |  4430   // Do a tail call to the rewritten stub. | 
|  5143   __ jmp(rdi); |  4431   __ jmp(rdi); | 
|  5144 } |  4432 } | 
|  5145  |  4433  | 
|  5146  |  4434  | 
|  5147 #undef __ |  4435 #undef __ | 
|  5148  |  4436  | 
|  5149 } }  // namespace v8::internal |  4437 } }  // namespace v8::internal | 
|  5150  |  4438  | 
|  5151 #endif  // V8_TARGET_ARCH_X64 |  4439 #endif  // V8_TARGET_ARCH_X64 | 
| OLD | NEW |