| OLD | NEW | 
|---|
| 1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. | 
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be | 
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. | 
| 4 | 4 | 
| 5 #if V8_TARGET_ARCH_PPC | 5 #if V8_TARGET_ARCH_S390 | 
| 6 | 6 | 
|  | 7 #include "src/code-stubs.h" | 
| 7 #include "src/base/bits.h" | 8 #include "src/base/bits.h" | 
| 8 #include "src/bootstrapper.h" | 9 #include "src/bootstrapper.h" | 
| 9 #include "src/code-stubs.h" |  | 
| 10 #include "src/codegen.h" | 10 #include "src/codegen.h" | 
| 11 #include "src/ic/handler-compiler.h" | 11 #include "src/ic/handler-compiler.h" | 
| 12 #include "src/ic/ic.h" | 12 #include "src/ic/ic.h" | 
| 13 #include "src/ic/stub-cache.h" | 13 #include "src/ic/stub-cache.h" | 
| 14 #include "src/isolate.h" | 14 #include "src/isolate.h" | 
| 15 #include "src/ppc/code-stubs-ppc.h" |  | 
| 16 #include "src/regexp/jsregexp.h" | 15 #include "src/regexp/jsregexp.h" | 
| 17 #include "src/regexp/regexp-macro-assembler.h" | 16 #include "src/regexp/regexp-macro-assembler.h" | 
| 18 #include "src/runtime/runtime.h" | 17 #include "src/runtime/runtime.h" | 
|  | 18 #include "src/s390/code-stubs-s390.h" | 
| 19 | 19 | 
| 20 namespace v8 { | 20 namespace v8 { | 
| 21 namespace internal { | 21 namespace internal { | 
| 22 | 22 | 
| 23 |  | 
| 24 static void InitializeArrayConstructorDescriptor( | 23 static void InitializeArrayConstructorDescriptor( | 
| 25     Isolate* isolate, CodeStubDescriptor* descriptor, | 24     Isolate* isolate, CodeStubDescriptor* descriptor, | 
| 26     int constant_stack_parameter_count) { | 25     int constant_stack_parameter_count) { | 
| 27   Address deopt_handler = | 26   Address deopt_handler = | 
| 28       Runtime::FunctionForId(Runtime::kArrayConstructor)->entry; | 27       Runtime::FunctionForId(Runtime::kArrayConstructor)->entry; | 
| 29 | 28 | 
| 30   if (constant_stack_parameter_count == 0) { | 29   if (constant_stack_parameter_count == 0) { | 
| 31     descriptor->Initialize(deopt_handler, constant_stack_parameter_count, | 30     descriptor->Initialize(deopt_handler, constant_stack_parameter_count, | 
| 32                            JS_FUNCTION_STUB_MODE); | 31                            JS_FUNCTION_STUB_MODE); | 
| 33   } else { | 32   } else { | 
| 34     descriptor->Initialize(r3, deopt_handler, constant_stack_parameter_count, | 33     descriptor->Initialize(r2, deopt_handler, constant_stack_parameter_count, | 
| 35                            JS_FUNCTION_STUB_MODE); | 34                            JS_FUNCTION_STUB_MODE); | 
| 36   } | 35   } | 
| 37 } | 36 } | 
| 38 | 37 | 
| 39 |  | 
| 40 static void InitializeInternalArrayConstructorDescriptor( | 38 static void InitializeInternalArrayConstructorDescriptor( | 
| 41     Isolate* isolate, CodeStubDescriptor* descriptor, | 39     Isolate* isolate, CodeStubDescriptor* descriptor, | 
| 42     int constant_stack_parameter_count) { | 40     int constant_stack_parameter_count) { | 
| 43   Address deopt_handler = | 41   Address deopt_handler = | 
| 44       Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry; | 42       Runtime::FunctionForId(Runtime::kInternalArrayConstructor)->entry; | 
| 45 | 43 | 
| 46   if (constant_stack_parameter_count == 0) { | 44   if (constant_stack_parameter_count == 0) { | 
| 47     descriptor->Initialize(deopt_handler, constant_stack_parameter_count, | 45     descriptor->Initialize(deopt_handler, constant_stack_parameter_count, | 
| 48                            JS_FUNCTION_STUB_MODE); | 46                            JS_FUNCTION_STUB_MODE); | 
| 49   } else { | 47   } else { | 
| 50     descriptor->Initialize(r3, deopt_handler, constant_stack_parameter_count, | 48     descriptor->Initialize(r2, deopt_handler, constant_stack_parameter_count, | 
| 51                            JS_FUNCTION_STUB_MODE); | 49                            JS_FUNCTION_STUB_MODE); | 
| 52   } | 50   } | 
| 53 } | 51 } | 
| 54 | 52 | 
| 55 |  | 
| 56 void ArrayNoArgumentConstructorStub::InitializeDescriptor( | 53 void ArrayNoArgumentConstructorStub::InitializeDescriptor( | 
| 57     CodeStubDescriptor* descriptor) { | 54     CodeStubDescriptor* descriptor) { | 
| 58   InitializeArrayConstructorDescriptor(isolate(), descriptor, 0); | 55   InitializeArrayConstructorDescriptor(isolate(), descriptor, 0); | 
| 59 } | 56 } | 
| 60 | 57 | 
| 61 |  | 
| 62 void ArraySingleArgumentConstructorStub::InitializeDescriptor( | 58 void ArraySingleArgumentConstructorStub::InitializeDescriptor( | 
| 63     CodeStubDescriptor* descriptor) { | 59     CodeStubDescriptor* descriptor) { | 
| 64   InitializeArrayConstructorDescriptor(isolate(), descriptor, 1); | 60   InitializeArrayConstructorDescriptor(isolate(), descriptor, 1); | 
| 65 } | 61 } | 
| 66 | 62 | 
| 67 |  | 
| 68 void ArrayNArgumentsConstructorStub::InitializeDescriptor( | 63 void ArrayNArgumentsConstructorStub::InitializeDescriptor( | 
| 69     CodeStubDescriptor* descriptor) { | 64     CodeStubDescriptor* descriptor) { | 
| 70   InitializeArrayConstructorDescriptor(isolate(), descriptor, -1); | 65   InitializeArrayConstructorDescriptor(isolate(), descriptor, -1); | 
| 71 } | 66 } | 
| 72 | 67 | 
| 73 |  | 
| 74 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor( | 68 void InternalArrayNoArgumentConstructorStub::InitializeDescriptor( | 
| 75     CodeStubDescriptor* descriptor) { | 69     CodeStubDescriptor* descriptor) { | 
| 76   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0); | 70   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 0); | 
| 77 } | 71 } | 
| 78 | 72 | 
| 79 |  | 
| 80 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor( | 73 void InternalArraySingleArgumentConstructorStub::InitializeDescriptor( | 
| 81     CodeStubDescriptor* descriptor) { | 74     CodeStubDescriptor* descriptor) { | 
| 82   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1); | 75   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, 1); | 
| 83 } | 76 } | 
| 84 | 77 | 
| 85 |  | 
| 86 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor( | 78 void InternalArrayNArgumentsConstructorStub::InitializeDescriptor( | 
| 87     CodeStubDescriptor* descriptor) { | 79     CodeStubDescriptor* descriptor) { | 
| 88   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1); | 80   InitializeInternalArrayConstructorDescriptor(isolate(), descriptor, -1); | 
| 89 } | 81 } | 
| 90 | 82 | 
| 91 |  | 
| 92 #define __ ACCESS_MASM(masm) | 83 #define __ ACCESS_MASM(masm) | 
| 93 | 84 | 
| 94 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, | 85 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, | 
| 95                                           Condition cond); | 86                                           Condition cond); | 
| 96 static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs, | 87 static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs, | 
| 97                                     Register rhs, Label* lhs_not_nan, | 88                                     Register rhs, Label* lhs_not_nan, | 
| 98                                     Label* slow, bool strict); | 89                                     Label* slow, bool strict); | 
| 99 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs, | 90 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs, | 
| 100                                            Register rhs); | 91                                            Register rhs); | 
| 101 | 92 | 
| 102 |  | 
| 103 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm, | 93 void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm, | 
| 104                                                ExternalReference miss) { | 94                                                ExternalReference miss) { | 
| 105   // Update the static counter each time a new code stub is generated. | 95   // Update the static counter each time a new code stub is generated. | 
| 106   isolate()->counters()->code_stubs()->Increment(); | 96   isolate()->counters()->code_stubs()->Increment(); | 
| 107 | 97 | 
| 108   CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor(); | 98   CallInterfaceDescriptor descriptor = GetCallInterfaceDescriptor(); | 
| 109   int param_count = descriptor.GetRegisterParameterCount(); | 99   int param_count = descriptor.GetRegisterParameterCount(); | 
| 110   { | 100   { | 
| 111     // Call the runtime system in a fresh internal frame. | 101     // Call the runtime system in a fresh internal frame. | 
| 112     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 102     FrameScope scope(masm, StackFrame::INTERNAL); | 
| 113     DCHECK(param_count == 0 || | 103     DCHECK(param_count == 0 || | 
| 114            r3.is(descriptor.GetRegisterParameter(param_count - 1))); | 104            r2.is(descriptor.GetRegisterParameter(param_count - 1))); | 
| 115     // Push arguments | 105     // Push arguments | 
| 116     for (int i = 0; i < param_count; ++i) { | 106     for (int i = 0; i < param_count; ++i) { | 
| 117       __ push(descriptor.GetRegisterParameter(i)); | 107       __ push(descriptor.GetRegisterParameter(i)); | 
| 118     } | 108     } | 
| 119     __ CallExternalReference(miss, param_count); | 109     __ CallExternalReference(miss, param_count); | 
| 120   } | 110   } | 
| 121 | 111 | 
| 122   __ Ret(); | 112   __ Ret(); | 
| 123 } | 113 } | 
| 124 | 114 | 
| 125 |  | 
| 126 void DoubleToIStub::Generate(MacroAssembler* masm) { | 115 void DoubleToIStub::Generate(MacroAssembler* masm) { | 
| 127   Label out_of_range, only_low, negate, done, fastpath_done; | 116   Label out_of_range, only_low, negate, done, fastpath_done; | 
| 128   Register input_reg = source(); | 117   Register input_reg = source(); | 
| 129   Register result_reg = destination(); | 118   Register result_reg = destination(); | 
| 130   DCHECK(is_truncating()); | 119   DCHECK(is_truncating()); | 
| 131 | 120 | 
| 132   int double_offset = offset(); | 121   int double_offset = offset(); | 
| 133 | 122 | 
| 134   // Immediate values for this stub fit in instructions, so it's safe to use ip. | 123   // Immediate values for this stub fit in instructions, so it's safe to use ip. | 
| 135   Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg); | 124   Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg); | 
| 136   Register scratch_low = | 125   Register scratch_low = | 
| 137       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch); | 126       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch); | 
| 138   Register scratch_high = | 127   Register scratch_high = | 
| 139       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low); | 128       GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low); | 
| 140   DoubleRegister double_scratch = kScratchDoubleReg; | 129   DoubleRegister double_scratch = kScratchDoubleReg; | 
| 141 | 130 | 
| 142   __ push(scratch); | 131   __ push(scratch); | 
| 143   // Account for saved regs if input is sp. | 132   // Account for saved regs if input is sp. | 
| 144   if (input_reg.is(sp)) double_offset += kPointerSize; | 133   if (input_reg.is(sp)) double_offset += kPointerSize; | 
| 145 | 134 | 
| 146   if (!skip_fastpath()) { | 135   if (!skip_fastpath()) { | 
| 147     // Load double input. | 136     // Load double input. | 
| 148     __ lfd(double_scratch, MemOperand(input_reg, double_offset)); | 137     __ LoadDouble(double_scratch, MemOperand(input_reg, double_offset)); | 
| 149 | 138 | 
| 150     // Do fast-path convert from double to int. | 139     // Do fast-path convert from double to int. | 
| 151     __ ConvertDoubleToInt64(double_scratch, | 140     __ ConvertDoubleToInt64(double_scratch, | 
| 152 #if !V8_TARGET_ARCH_PPC64 | 141 #if !V8_TARGET_ARCH_S390X | 
| 153                             scratch, | 142                             scratch, | 
| 154 #endif | 143 #endif | 
| 155                             result_reg, d0); | 144                             result_reg, d0); | 
| 156 | 145 | 
| 157 // Test for overflow | 146 // Test for overflow | 
| 158 #if V8_TARGET_ARCH_PPC64 | 147 #if V8_TARGET_ARCH_S390X | 
| 159     __ TestIfInt32(result_reg, r0); | 148     __ TestIfInt32(result_reg, r0); | 
| 160 #else | 149 #else | 
| 161     __ TestIfInt32(scratch, result_reg, r0); | 150     __ TestIfInt32(scratch, result_reg, r0); | 
| 162 #endif | 151 #endif | 
| 163     __ beq(&fastpath_done); | 152     __ beq(&fastpath_done, Label::kNear); | 
| 164   } | 153   } | 
| 165 | 154 | 
| 166   __ Push(scratch_high, scratch_low); | 155   __ Push(scratch_high, scratch_low); | 
| 167   // Account for saved regs if input is sp. | 156   // Account for saved regs if input is sp. | 
| 168   if (input_reg.is(sp)) double_offset += 2 * kPointerSize; | 157   if (input_reg.is(sp)) double_offset += 2 * kPointerSize; | 
| 169 | 158 | 
| 170   __ lwz(scratch_high, | 159   __ LoadlW(scratch_high, | 
| 171          MemOperand(input_reg, double_offset + Register::kExponentOffset)); | 160             MemOperand(input_reg, double_offset + Register::kExponentOffset)); | 
| 172   __ lwz(scratch_low, | 161   __ LoadlW(scratch_low, | 
| 173          MemOperand(input_reg, double_offset + Register::kMantissaOffset)); | 162             MemOperand(input_reg, double_offset + Register::kMantissaOffset)); | 
| 174 | 163 | 
| 175   __ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask); | 164   __ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask); | 
| 176   // Load scratch with exponent - 1. This is faster than loading | 165   // Load scratch with exponent - 1. This is faster than loading | 
| 177   // with exponent because Bias + 1 = 1024 which is a *PPC* immediate value. | 166   // with exponent because Bias + 1 = 1024 which is a *S390* immediate value. | 
| 178   STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024); | 167   STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024); | 
| 179   __ subi(scratch, scratch, Operand(HeapNumber::kExponentBias + 1)); | 168   __ SubP(scratch, Operand(HeapNumber::kExponentBias + 1)); | 
| 180   // If exponent is greater than or equal to 84, the 32 less significant | 169   // If exponent is greater than or equal to 84, the 32 less significant | 
| 181   // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits), | 170   // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits), | 
| 182   // the result is 0. | 171   // the result is 0. | 
| 183   // Compare exponent with 84 (compare exponent - 1 with 83). | 172   // Compare exponent with 84 (compare exponent - 1 with 83). | 
| 184   __ cmpi(scratch, Operand(83)); | 173   __ CmpP(scratch, Operand(83)); | 
| 185   __ bge(&out_of_range); | 174   __ bge(&out_of_range, Label::kNear); | 
| 186 | 175 | 
| 187   // If we reach this code, 31 <= exponent <= 83. | 176   // If we reach this code, 31 <= exponent <= 83. | 
| 188   // So, we don't have to handle cases where 0 <= exponent <= 20 for | 177   // So, we don't have to handle cases where 0 <= exponent <= 20 for | 
| 189   // which we would need to shift right the high part of the mantissa. | 178   // which we would need to shift right the high part of the mantissa. | 
| 190   // Scratch contains exponent - 1. | 179   // Scratch contains exponent - 1. | 
| 191   // Load scratch with 52 - exponent (load with 51 - (exponent - 1)). | 180   // Load scratch with 52 - exponent (load with 51 - (exponent - 1)). | 
| 192   __ subfic(scratch, scratch, Operand(51)); | 181   __ Load(r0, Operand(51)); | 
| 193   __ cmpi(scratch, Operand::Zero()); | 182   __ SubP(scratch, r0, scratch); | 
| 194   __ ble(&only_low); | 183   __ CmpP(scratch, Operand::Zero()); | 
|  | 184   __ ble(&only_low, Label::kNear); | 
| 195   // 21 <= exponent <= 51, shift scratch_low and scratch_high | 185   // 21 <= exponent <= 51, shift scratch_low and scratch_high | 
| 196   // to generate the result. | 186   // to generate the result. | 
| 197   __ srw(scratch_low, scratch_low, scratch); | 187   __ ShiftRight(scratch_low, scratch_low, scratch); | 
| 198   // Scratch contains: 52 - exponent. | 188   // Scratch contains: 52 - exponent. | 
| 199   // We needs: exponent - 20. | 189   // We needs: exponent - 20. | 
| 200   // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20. | 190   // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20. | 
| 201   __ subfic(scratch, scratch, Operand(32)); | 191   __ Load(r0, Operand(32)); | 
|  | 192   __ SubP(scratch, r0, scratch); | 
| 202   __ ExtractBitMask(result_reg, scratch_high, HeapNumber::kMantissaMask); | 193   __ ExtractBitMask(result_reg, scratch_high, HeapNumber::kMantissaMask); | 
| 203   // Set the implicit 1 before the mantissa part in scratch_high. | 194   // Set the implicit 1 before the mantissa part in scratch_high. | 
| 204   STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16); | 195   STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16); | 
| 205   __ oris(result_reg, result_reg, | 196   __ Load(r0, Operand(1 << ((HeapNumber::kMantissaBitsInTopWord)-16))); | 
| 206           Operand(1 << ((HeapNumber::kMantissaBitsInTopWord) - 16))); | 197   __ ShiftLeftP(r0, r0, Operand(16)); | 
| 207   __ slw(r0, result_reg, scratch); | 198   __ OrP(result_reg, result_reg, r0); | 
| 208   __ orx(result_reg, scratch_low, r0); | 199   __ ShiftLeft(r0, result_reg, scratch); | 
| 209   __ b(&negate); | 200   __ OrP(result_reg, scratch_low, r0); | 
|  | 201   __ b(&negate, Label::kNear); | 
| 210 | 202 | 
| 211   __ bind(&out_of_range); | 203   __ bind(&out_of_range); | 
| 212   __ mov(result_reg, Operand::Zero()); | 204   __ mov(result_reg, Operand::Zero()); | 
| 213   __ b(&done); | 205   __ b(&done, Label::kNear); | 
| 214 | 206 | 
| 215   __ bind(&only_low); | 207   __ bind(&only_low); | 
| 216   // 52 <= exponent <= 83, shift only scratch_low. | 208   // 52 <= exponent <= 83, shift only scratch_low. | 
| 217   // On entry, scratch contains: 52 - exponent. | 209   // On entry, scratch contains: 52 - exponent. | 
| 218   __ neg(scratch, scratch); | 210   __ LoadComplementRR(scratch, scratch); | 
| 219   __ slw(result_reg, scratch_low, scratch); | 211   __ ShiftLeft(result_reg, scratch_low, scratch); | 
| 220 | 212 | 
| 221   __ bind(&negate); | 213   __ bind(&negate); | 
| 222   // If input was positive, scratch_high ASR 31 equals 0 and | 214   // If input was positive, scratch_high ASR 31 equals 0 and | 
| 223   // scratch_high LSR 31 equals zero. | 215   // scratch_high LSR 31 equals zero. | 
| 224   // New result = (result eor 0) + 0 = result. | 216   // New result = (result eor 0) + 0 = result. | 
| 225   // If the input was negative, we have to negate the result. | 217   // If the input was negative, we have to negate the result. | 
| 226   // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1. | 218   // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1. | 
| 227   // New result = (result eor 0xffffffff) + 1 = 0 - result. | 219   // New result = (result eor 0xffffffff) + 1 = 0 - result. | 
| 228   __ srawi(r0, scratch_high, 31); | 220   __ ShiftRightArith(r0, scratch_high, Operand(31)); | 
| 229 #if V8_TARGET_ARCH_PPC64 | 221 #if V8_TARGET_ARCH_S390X | 
| 230   __ srdi(r0, r0, Operand(32)); | 222   __ lgfr(r0, r0); | 
|  | 223   __ ShiftRightP(r0, r0, Operand(32)); | 
| 231 #endif | 224 #endif | 
| 232   __ xor_(result_reg, result_reg, r0); | 225   __ XorP(result_reg, r0); | 
| 233   __ srwi(r0, scratch_high, Operand(31)); | 226   __ ShiftRight(r0, scratch_high, Operand(31)); | 
| 234   __ add(result_reg, result_reg, r0); | 227   __ AddP(result_reg, r0); | 
| 235 | 228 | 
| 236   __ bind(&done); | 229   __ bind(&done); | 
| 237   __ Pop(scratch_high, scratch_low); | 230   __ Pop(scratch_high, scratch_low); | 
| 238 | 231 | 
| 239   __ bind(&fastpath_done); | 232   __ bind(&fastpath_done); | 
| 240   __ pop(scratch); | 233   __ pop(scratch); | 
| 241 | 234 | 
| 242   __ Ret(); | 235   __ Ret(); | 
| 243 } | 236 } | 
| 244 | 237 | 
| 245 |  | 
| 246 // Handle the case where the lhs and rhs are the same object. | 238 // Handle the case where the lhs and rhs are the same object. | 
| 247 // Equality is almost reflexive (everything but NaN), so this is a test | 239 // Equality is almost reflexive (everything but NaN), so this is a test | 
| 248 // for "identity and not NaN". | 240 // for "identity and not NaN". | 
| 249 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, | 241 static void EmitIdenticalObjectComparison(MacroAssembler* masm, Label* slow, | 
| 250                                           Condition cond) { | 242                                           Condition cond) { | 
| 251   Label not_identical; | 243   Label not_identical; | 
| 252   Label heap_number, return_equal; | 244   Label heap_number, return_equal; | 
| 253   __ cmp(r3, r4); | 245   __ CmpP(r2, r3); | 
| 254   __ bne(¬_identical); | 246   __ bne(¬_identical); | 
| 255 | 247 | 
| 256   // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), | 248   // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), | 
| 257   // so we do the second best thing - test it ourselves. | 249   // so we do the second best thing - test it ourselves. | 
| 258   // They are both equal and they are not both Smis so both of them are not | 250   // They are both equal and they are not both Smis so both of them are not | 
| 259   // Smis.  If it's not a heap number, then return equal. | 251   // Smis.  If it's not a heap number, then return equal. | 
| 260   if (cond == lt || cond == gt) { | 252   if (cond == lt || cond == gt) { | 
| 261     // Call runtime on identical JSObjects. | 253     // Call runtime on identical JSObjects. | 
| 262     __ CompareObjectType(r3, r7, r7, FIRST_JS_RECEIVER_TYPE); | 254     __ CompareObjectType(r2, r6, r6, FIRST_JS_RECEIVER_TYPE); | 
| 263     __ bge(slow); | 255     __ bge(slow); | 
| 264     // Call runtime on identical symbols since we need to throw a TypeError. | 256     // Call runtime on identical symbols since we need to throw a TypeError. | 
| 265     __ cmpi(r7, Operand(SYMBOL_TYPE)); | 257     __ CmpP(r6, Operand(SYMBOL_TYPE)); | 
| 266     __ beq(slow); | 258     __ beq(slow); | 
| 267     // Call runtime on identical SIMD values since we must throw a TypeError. | 259     // Call runtime on identical SIMD values since we must throw a TypeError. | 
| 268     __ cmpi(r7, Operand(SIMD128_VALUE_TYPE)); | 260     __ CmpP(r6, Operand(SIMD128_VALUE_TYPE)); | 
| 269     __ beq(slow); | 261     __ beq(slow); | 
| 270   } else { | 262   } else { | 
| 271     __ CompareObjectType(r3, r7, r7, HEAP_NUMBER_TYPE); | 263     __ CompareObjectType(r2, r6, r6, HEAP_NUMBER_TYPE); | 
| 272     __ beq(&heap_number); | 264     __ beq(&heap_number); | 
| 273     // Comparing JS objects with <=, >= is complicated. | 265     // Comparing JS objects with <=, >= is complicated. | 
| 274     if (cond != eq) { | 266     if (cond != eq) { | 
| 275       __ cmpi(r7, Operand(FIRST_JS_RECEIVER_TYPE)); | 267       __ CmpP(r6, Operand(FIRST_JS_RECEIVER_TYPE)); | 
| 276       __ bge(slow); | 268       __ bge(slow); | 
| 277       // Call runtime on identical symbols since we need to throw a TypeError. | 269       // Call runtime on identical symbols since we need to throw a TypeError. | 
| 278       __ cmpi(r7, Operand(SYMBOL_TYPE)); | 270       __ CmpP(r6, Operand(SYMBOL_TYPE)); | 
| 279       __ beq(slow); | 271       __ beq(slow); | 
| 280       // Call runtime on identical SIMD values since we must throw a TypeError. | 272       // Call runtime on identical SIMD values since we must throw a TypeError. | 
| 281       __ cmpi(r7, Operand(SIMD128_VALUE_TYPE)); | 273       __ CmpP(r6, Operand(SIMD128_VALUE_TYPE)); | 
| 282       __ beq(slow); | 274       __ beq(slow); | 
| 283       // Normally here we fall through to return_equal, but undefined is | 275       // Normally here we fall through to return_equal, but undefined is | 
| 284       // special: (undefined == undefined) == true, but | 276       // special: (undefined == undefined) == true, but | 
| 285       // (undefined <= undefined) == false!  See ECMAScript 11.8.5. | 277       // (undefined <= undefined) == false!  See ECMAScript 11.8.5. | 
| 286       if (cond == le || cond == ge) { | 278       if (cond == le || cond == ge) { | 
| 287         __ cmpi(r7, Operand(ODDBALL_TYPE)); | 279         __ CmpP(r6, Operand(ODDBALL_TYPE)); | 
| 288         __ bne(&return_equal); | 280         __ bne(&return_equal); | 
| 289         __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); | 281         __ CompareRoot(r2, Heap::kUndefinedValueRootIndex); | 
| 290         __ cmp(r3, r5); |  | 
| 291         __ bne(&return_equal); | 282         __ bne(&return_equal); | 
| 292         if (cond == le) { | 283         if (cond == le) { | 
| 293           // undefined <= undefined should fail. | 284           // undefined <= undefined should fail. | 
| 294           __ li(r3, Operand(GREATER)); | 285           __ LoadImmP(r2, Operand(GREATER)); | 
| 295         } else { | 286         } else { | 
| 296           // undefined >= undefined should fail. | 287           // undefined >= undefined should fail. | 
| 297           __ li(r3, Operand(LESS)); | 288           __ LoadImmP(r2, Operand(LESS)); | 
| 298         } | 289         } | 
| 299         __ Ret(); | 290         __ Ret(); | 
| 300       } | 291       } | 
| 301     } | 292     } | 
| 302   } | 293   } | 
| 303 | 294 | 
| 304   __ bind(&return_equal); | 295   __ bind(&return_equal); | 
| 305   if (cond == lt) { | 296   if (cond == lt) { | 
| 306     __ li(r3, Operand(GREATER));  // Things aren't less than themselves. | 297     __ LoadImmP(r2, Operand(GREATER));  // Things aren't less than themselves. | 
| 307   } else if (cond == gt) { | 298   } else if (cond == gt) { | 
| 308     __ li(r3, Operand(LESS));  // Things aren't greater than themselves. | 299     __ LoadImmP(r2, Operand(LESS));  // Things aren't greater than themselves. | 
| 309   } else { | 300   } else { | 
| 310     __ li(r3, Operand(EQUAL));  // Things are <=, >=, ==, === themselves. | 301     __ LoadImmP(r2, Operand(EQUAL));  // Things are <=, >=, ==, === themselves | 
| 311   } | 302   } | 
| 312   __ Ret(); | 303   __ Ret(); | 
| 313 | 304 | 
| 314   // For less and greater we don't have to check for NaN since the result of | 305   // For less and greater we don't have to check for NaN since the result of | 
| 315   // x < x is false regardless.  For the others here is some code to check | 306   // x < x is false regardless.  For the others here is some code to check | 
| 316   // for NaN. | 307   // for NaN. | 
| 317   if (cond != lt && cond != gt) { | 308   if (cond != lt && cond != gt) { | 
| 318     __ bind(&heap_number); | 309     __ bind(&heap_number); | 
| 319     // It is a heap number, so return non-equal if it's NaN and equal if it's | 310     // It is a heap number, so return non-equal if it's NaN and equal if it's | 
| 320     // not NaN. | 311     // not NaN. | 
| 321 | 312 | 
| 322     // The representation of NaN values has all exponent bits (52..62) set, | 313     // The representation of NaN values has all exponent bits (52..62) set, | 
| 323     // and not all mantissa bits (0..51) clear. | 314     // and not all mantissa bits (0..51) clear. | 
| 324     // Read top bits of double representation (second word of value). | 315     // Read top bits of double representation (second word of value). | 
| 325     __ lwz(r5, FieldMemOperand(r3, HeapNumber::kExponentOffset)); | 316     __ LoadlW(r4, FieldMemOperand(r2, HeapNumber::kExponentOffset)); | 
| 326     // Test that exponent bits are all set. | 317     // Test that exponent bits are all set. | 
| 327     STATIC_ASSERT(HeapNumber::kExponentMask == 0x7ff00000u); | 318     STATIC_ASSERT(HeapNumber::kExponentMask == 0x7ff00000u); | 
| 328     __ ExtractBitMask(r6, r5, HeapNumber::kExponentMask); | 319     __ ExtractBitMask(r5, r4, HeapNumber::kExponentMask); | 
| 329     __ cmpli(r6, Operand(0x7ff)); | 320     __ CmpLogicalP(r5, Operand(0x7ff)); | 
| 330     __ bne(&return_equal); | 321     __ bne(&return_equal); | 
| 331 | 322 | 
| 332     // Shift out flag and all exponent bits, retaining only mantissa. | 323     // Shift out flag and all exponent bits, retaining only mantissa. | 
| 333     __ slwi(r5, r5, Operand(HeapNumber::kNonMantissaBitsInTopWord)); | 324     __ sll(r4, Operand(HeapNumber::kNonMantissaBitsInTopWord)); | 
| 334     // Or with all low-bits of mantissa. | 325     // Or with all low-bits of mantissa. | 
| 335     __ lwz(r6, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); | 326     __ LoadlW(r5, FieldMemOperand(r2, HeapNumber::kMantissaOffset)); | 
| 336     __ orx(r3, r6, r5); | 327     __ OrP(r2, r5, r4); | 
| 337     __ cmpi(r3, Operand::Zero()); | 328     __ CmpP(r2, Operand::Zero()); | 
| 338     // For equal we already have the right value in r3:  Return zero (equal) | 329     // For equal we already have the right value in r2:  Return zero (equal) | 
| 339     // if all bits in mantissa are zero (it's an Infinity) and non-zero if | 330     // if all bits in mantissa are zero (it's an Infinity) and non-zero if | 
| 340     // not (it's a NaN).  For <= and >= we need to load r0 with the failing | 331     // not (it's a NaN).  For <= and >= we need to load r0 with the failing | 
| 341     // value if it's a NaN. | 332     // value if it's a NaN. | 
| 342     if (cond != eq) { | 333     if (cond != eq) { | 
| 343       if (CpuFeatures::IsSupported(ISELECT)) { | 334       Label not_equal; | 
| 344         __ li(r4, Operand((cond == le) ? GREATER : LESS)); | 335       __ bne(¬_equal, Label::kNear); | 
| 345         __ isel(eq, r3, r3, r4); | 336       // All-zero means Infinity means equal. | 
|  | 337       __ Ret(); | 
|  | 338       __ bind(¬_equal); | 
|  | 339       if (cond == le) { | 
|  | 340         __ LoadImmP(r2, Operand(GREATER));  // NaN <= NaN should fail. | 
| 346       } else { | 341       } else { | 
| 347         // All-zero means Infinity means equal. | 342         __ LoadImmP(r2, Operand(LESS));  // NaN >= NaN should fail. | 
| 348         __ Ret(eq); |  | 
| 349         if (cond == le) { |  | 
| 350           __ li(r3, Operand(GREATER));  // NaN <= NaN should fail. |  | 
| 351         } else { |  | 
| 352           __ li(r3, Operand(LESS));  // NaN >= NaN should fail. |  | 
| 353         } |  | 
| 354       } | 343       } | 
| 355     } | 344     } | 
| 356     __ Ret(); | 345     __ Ret(); | 
| 357   } | 346   } | 
| 358   // No fall through here. | 347   // No fall through here. | 
| 359 | 348 | 
| 360   __ bind(¬_identical); | 349   __ bind(¬_identical); | 
| 361 } | 350 } | 
| 362 | 351 | 
| 363 |  | 
| 364 // See comment at call site. | 352 // See comment at call site. | 
| 365 static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs, | 353 static void EmitSmiNonsmiComparison(MacroAssembler* masm, Register lhs, | 
| 366                                     Register rhs, Label* lhs_not_nan, | 354                                     Register rhs, Label* lhs_not_nan, | 
| 367                                     Label* slow, bool strict) { | 355                                     Label* slow, bool strict) { | 
| 368   DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3))); | 356   DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2))); | 
| 369 | 357 | 
| 370   Label rhs_is_smi; | 358   Label rhs_is_smi; | 
| 371   __ JumpIfSmi(rhs, &rhs_is_smi); | 359   __ JumpIfSmi(rhs, &rhs_is_smi); | 
| 372 | 360 | 
| 373   // Lhs is a Smi.  Check whether the rhs is a heap number. | 361   // Lhs is a Smi.  Check whether the rhs is a heap number. | 
| 374   __ CompareObjectType(rhs, r6, r7, HEAP_NUMBER_TYPE); | 362   __ CompareObjectType(rhs, r5, r6, HEAP_NUMBER_TYPE); | 
| 375   if (strict) { | 363   if (strict) { | 
| 376     // If rhs is not a number and lhs is a Smi then strict equality cannot | 364     // If rhs is not a number and lhs is a Smi then strict equality cannot | 
| 377     // succeed.  Return non-equal | 365     // succeed.  Return non-equal | 
| 378     // If rhs is r3 then there is already a non zero value in it. | 366     // If rhs is r2 then there is already a non zero value in it. | 
| 379     if (!rhs.is(r3)) { | 367     Label skip; | 
| 380       Label skip; | 368     __ beq(&skip, Label::kNear); | 
| 381       __ beq(&skip); | 369     if (!rhs.is(r2)) { | 
| 382       __ mov(r3, Operand(NOT_EQUAL)); | 370       __ mov(r2, Operand(NOT_EQUAL)); | 
| 383       __ Ret(); |  | 
| 384       __ bind(&skip); |  | 
| 385     } else { |  | 
| 386       __ Ret(ne); |  | 
| 387     } | 371     } | 
|  | 372     __ Ret(); | 
|  | 373     __ bind(&skip); | 
| 388   } else { | 374   } else { | 
| 389     // Smi compared non-strictly with a non-Smi non-heap-number.  Call | 375     // Smi compared non-strictly with a non-Smi non-heap-number.  Call | 
| 390     // the runtime. | 376     // the runtime. | 
| 391     __ bne(slow); | 377     __ bne(slow); | 
| 392   } | 378   } | 
| 393 | 379 | 
| 394   // Lhs is a smi, rhs is a number. | 380   // Lhs is a smi, rhs is a number. | 
| 395   // Convert lhs to a double in d7. | 381   // Convert lhs to a double in d7. | 
| 396   __ SmiToDouble(d7, lhs); | 382   __ SmiToDouble(d7, lhs); | 
| 397   // Load the double from rhs, tagged HeapNumber r3, to d6. | 383   // Load the double from rhs, tagged HeapNumber r2, to d6. | 
| 398   __ lfd(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 384   __ LoadDouble(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 
| 399 | 385 | 
| 400   // We now have both loaded as doubles but we can skip the lhs nan check | 386   // We now have both loaded as doubles but we can skip the lhs nan check | 
| 401   // since it's a smi. | 387   // since it's a smi. | 
| 402   __ b(lhs_not_nan); | 388   __ b(lhs_not_nan); | 
| 403 | 389 | 
| 404   __ bind(&rhs_is_smi); | 390   __ bind(&rhs_is_smi); | 
| 405   // Rhs is a smi.  Check whether the non-smi lhs is a heap number. | 391   // Rhs is a smi.  Check whether the non-smi lhs is a heap number. | 
| 406   __ CompareObjectType(lhs, r7, r7, HEAP_NUMBER_TYPE); | 392   __ CompareObjectType(lhs, r6, r6, HEAP_NUMBER_TYPE); | 
| 407   if (strict) { | 393   if (strict) { | 
| 408     // If lhs is not a number and rhs is a smi then strict equality cannot | 394     // If lhs is not a number and rhs is a smi then strict equality cannot | 
| 409     // succeed.  Return non-equal. | 395     // succeed.  Return non-equal. | 
| 410     // If lhs is r3 then there is already a non zero value in it. | 396     // If lhs is r2 then there is already a non zero value in it. | 
| 411     if (!lhs.is(r3)) { | 397     Label skip; | 
| 412       Label skip; | 398     __ beq(&skip, Label::kNear); | 
| 413       __ beq(&skip); | 399     if (!lhs.is(r2)) { | 
| 414       __ mov(r3, Operand(NOT_EQUAL)); | 400       __ mov(r2, Operand(NOT_EQUAL)); | 
| 415       __ Ret(); |  | 
| 416       __ bind(&skip); |  | 
| 417     } else { |  | 
| 418       __ Ret(ne); |  | 
| 419     } | 401     } | 
|  | 402     __ Ret(); | 
|  | 403     __ bind(&skip); | 
| 420   } else { | 404   } else { | 
| 421     // Smi compared non-strictly with a non-smi non-heap-number.  Call | 405     // Smi compared non-strictly with a non-smi non-heap-number.  Call | 
| 422     // the runtime. | 406     // the runtime. | 
| 423     __ bne(slow); | 407     __ bne(slow); | 
| 424   } | 408   } | 
| 425 | 409 | 
| 426   // Rhs is a smi, lhs is a heap number. | 410   // Rhs is a smi, lhs is a heap number. | 
| 427   // Load the double from lhs, tagged HeapNumber r4, to d7. | 411   // Load the double from lhs, tagged HeapNumber r3, to d7. | 
| 428   __ lfd(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 412   __ LoadDouble(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 
| 429   // Convert rhs to a double in d6. | 413   // Convert rhs to a double in d6. | 
| 430   __ SmiToDouble(d6, rhs); | 414   __ SmiToDouble(d6, rhs); | 
| 431   // Fall through to both_loaded_as_doubles. | 415   // Fall through to both_loaded_as_doubles. | 
| 432 } | 416 } | 
| 433 | 417 | 
| 434 |  | 
| 435 // See comment at call site. | 418 // See comment at call site. | 
| 436 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs, | 419 static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, Register lhs, | 
| 437                                            Register rhs) { | 420                                            Register rhs) { | 
| 438   DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3))); | 421   DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2))); | 
| 439 | 422 | 
| 440   // If either operand is a JS object or an oddball value, then they are | 423   // If either operand is a JS object or an oddball value, then they are | 
| 441   // not equal since their pointers are different. | 424   // not equal since their pointers are different. | 
| 442   // There is no test for undetectability in strict equality. | 425   // There is no test for undetectability in strict equality. | 
| 443   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); | 426   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); | 
| 444   Label first_non_object; | 427   Label first_non_object; | 
| 445   // Get the type of the first operand into r5 and compare it with | 428   // Get the type of the first operand into r4 and compare it with | 
| 446   // FIRST_JS_RECEIVER_TYPE. | 429   // FIRST_JS_RECEIVER_TYPE. | 
| 447   __ CompareObjectType(rhs, r5, r5, FIRST_JS_RECEIVER_TYPE); | 430   __ CompareObjectType(rhs, r4, r4, FIRST_JS_RECEIVER_TYPE); | 
| 448   __ blt(&first_non_object); | 431   __ blt(&first_non_object, Label::kNear); | 
| 449 | 432 | 
| 450   // Return non-zero (r3 is not zero) | 433   // Return non-zero (r2 is not zero) | 
| 451   Label return_not_equal; | 434   Label return_not_equal; | 
| 452   __ bind(&return_not_equal); | 435   __ bind(&return_not_equal); | 
| 453   __ Ret(); | 436   __ Ret(); | 
| 454 | 437 | 
| 455   __ bind(&first_non_object); | 438   __ bind(&first_non_object); | 
| 456   // Check for oddballs: true, false, null, undefined. | 439   // Check for oddballs: true, false, null, undefined. | 
| 457   __ cmpi(r5, Operand(ODDBALL_TYPE)); | 440   __ CmpP(r4, Operand(ODDBALL_TYPE)); | 
| 458   __ beq(&return_not_equal); | 441   __ beq(&return_not_equal); | 
| 459 | 442 | 
| 460   __ CompareObjectType(lhs, r6, r6, FIRST_JS_RECEIVER_TYPE); | 443   __ CompareObjectType(lhs, r5, r5, FIRST_JS_RECEIVER_TYPE); | 
| 461   __ bge(&return_not_equal); | 444   __ bge(&return_not_equal); | 
| 462 | 445 | 
| 463   // Check for oddballs: true, false, null, undefined. | 446   // Check for oddballs: true, false, null, undefined. | 
| 464   __ cmpi(r6, Operand(ODDBALL_TYPE)); | 447   __ CmpP(r5, Operand(ODDBALL_TYPE)); | 
| 465   __ beq(&return_not_equal); | 448   __ beq(&return_not_equal); | 
| 466 | 449 | 
| 467   // Now that we have the types we might as well check for | 450   // Now that we have the types we might as well check for | 
| 468   // internalized-internalized. | 451   // internalized-internalized. | 
| 469   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); | 452   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); | 
| 470   __ orx(r5, r5, r6); | 453   __ OrP(r4, r4, r5); | 
| 471   __ andi(r0, r5, Operand(kIsNotStringMask | kIsNotInternalizedMask)); | 454   __ AndP(r0, r4, Operand(kIsNotStringMask | kIsNotInternalizedMask)); | 
| 472   __ beq(&return_not_equal, cr0); | 455   __ beq(&return_not_equal); | 
| 473 } | 456 } | 
| 474 | 457 | 
| 475 |  | 
| 476 // See comment at call site. | 458 // See comment at call site. | 
| 477 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, Register lhs, | 459 static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, Register lhs, | 
| 478                                        Register rhs, | 460                                        Register rhs, | 
| 479                                        Label* both_loaded_as_doubles, | 461                                        Label* both_loaded_as_doubles, | 
| 480                                        Label* not_heap_numbers, Label* slow) { | 462                                        Label* not_heap_numbers, Label* slow) { | 
| 481   DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3))); | 463   DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2))); | 
| 482 | 464 | 
| 483   __ CompareObjectType(rhs, r6, r5, HEAP_NUMBER_TYPE); | 465   __ CompareObjectType(rhs, r5, r4, HEAP_NUMBER_TYPE); | 
| 484   __ bne(not_heap_numbers); | 466   __ bne(not_heap_numbers); | 
| 485   __ LoadP(r5, FieldMemOperand(lhs, HeapObject::kMapOffset)); | 467   __ LoadP(r4, FieldMemOperand(lhs, HeapObject::kMapOffset)); | 
| 486   __ cmp(r5, r6); | 468   __ CmpP(r4, r5); | 
| 487   __ bne(slow);  // First was a heap number, second wasn't.  Go slow case. | 469   __ bne(slow);  // First was a heap number, second wasn't.  Go slow case. | 
| 488 | 470 | 
| 489   // Both are heap numbers.  Load them up then jump to the code we have | 471   // Both are heap numbers.  Load them up then jump to the code we have | 
| 490   // for that. | 472   // for that. | 
| 491   __ lfd(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 473   __ LoadDouble(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 
| 492   __ lfd(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 474   __ LoadDouble(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 
| 493 | 475 | 
| 494   __ b(both_loaded_as_doubles); | 476   __ b(both_loaded_as_doubles); | 
| 495 } | 477 } | 
| 496 | 478 | 
| 497 |  | 
| 498 // Fast negative check for internalized-to-internalized equality. | 479 // Fast negative check for internalized-to-internalized equality. | 
| 499 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, | 480 static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, | 
| 500                                                      Register lhs, Register rhs, | 481                                                      Register lhs, Register rhs, | 
| 501                                                      Label* possible_strings, | 482                                                      Label* possible_strings, | 
| 502                                                      Label* runtime_call) { | 483                                                      Label* runtime_call) { | 
| 503   DCHECK((lhs.is(r3) && rhs.is(r4)) || (lhs.is(r4) && rhs.is(r3))); | 484   DCHECK((lhs.is(r2) && rhs.is(r3)) || (lhs.is(r3) && rhs.is(r2))); | 
| 504 | 485 | 
| 505   // r5 is object type of rhs. | 486   // r4 is object type of rhs. | 
| 506   Label object_test, return_unequal, undetectable; | 487   Label object_test, return_unequal, undetectable; | 
| 507   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); | 488   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); | 
| 508   __ andi(r0, r5, Operand(kIsNotStringMask)); | 489   __ mov(r0, Operand(kIsNotStringMask)); | 
| 509   __ bne(&object_test, cr0); | 490   __ AndP(r0, r4); | 
| 510   __ andi(r0, r5, Operand(kIsNotInternalizedMask)); | 491   __ bne(&object_test, Label::kNear); | 
| 511   __ bne(possible_strings, cr0); | 492   __ mov(r0, Operand(kIsNotInternalizedMask)); | 
| 512   __ CompareObjectType(lhs, r6, r6, FIRST_NONSTRING_TYPE); | 493   __ AndP(r0, r4); | 
|  | 494   __ bne(possible_strings); | 
|  | 495   __ CompareObjectType(lhs, r5, r5, FIRST_NONSTRING_TYPE); | 
| 513   __ bge(runtime_call); | 496   __ bge(runtime_call); | 
| 514   __ andi(r0, r6, Operand(kIsNotInternalizedMask)); | 497   __ mov(r0, Operand(kIsNotInternalizedMask)); | 
| 515   __ bne(possible_strings, cr0); | 498   __ AndP(r0, r5); | 
|  | 499   __ bne(possible_strings); | 
| 516 | 500 | 
| 517   // Both are internalized. We already checked they weren't the same pointer so | 501   // Both are internalized. We already checked they weren't the same pointer so | 
| 518   // they are not equal. Return non-equal by returning the non-zero object | 502   // they are not equal. Return non-equal by returning the non-zero object | 
| 519   // pointer in r3. | 503   // pointer in r2. | 
| 520   __ Ret(); | 504   __ Ret(); | 
| 521 | 505 | 
| 522   __ bind(&object_test); | 506   __ bind(&object_test); | 
| 523   __ LoadP(r5, FieldMemOperand(lhs, HeapObject::kMapOffset)); | 507   __ LoadP(r4, FieldMemOperand(lhs, HeapObject::kMapOffset)); | 
| 524   __ LoadP(r6, FieldMemOperand(rhs, HeapObject::kMapOffset)); | 508   __ LoadP(r5, FieldMemOperand(rhs, HeapObject::kMapOffset)); | 
| 525   __ lbz(r7, FieldMemOperand(r5, Map::kBitFieldOffset)); | 509   __ LoadlB(r6, FieldMemOperand(r4, Map::kBitFieldOffset)); | 
| 526   __ lbz(r8, FieldMemOperand(r6, Map::kBitFieldOffset)); | 510   __ LoadlB(r7, FieldMemOperand(r5, Map::kBitFieldOffset)); | 
| 527   __ andi(r0, r7, Operand(1 << Map::kIsUndetectable)); | 511   __ AndP(r0, r6, Operand(1 << Map::kIsUndetectable)); | 
| 528   __ bne(&undetectable, cr0); | 512   __ bne(&undetectable); | 
| 529   __ andi(r0, r8, Operand(1 << Map::kIsUndetectable)); | 513   __ AndP(r0, r7, Operand(1 << Map::kIsUndetectable)); | 
| 530   __ bne(&return_unequal, cr0); | 514   __ bne(&return_unequal); | 
| 531 | 515 | 
|  | 516   __ CompareInstanceType(r4, r4, FIRST_JS_RECEIVER_TYPE); | 
|  | 517   __ blt(runtime_call); | 
| 532   __ CompareInstanceType(r5, r5, FIRST_JS_RECEIVER_TYPE); | 518   __ CompareInstanceType(r5, r5, FIRST_JS_RECEIVER_TYPE); | 
| 533   __ blt(runtime_call); | 519   __ blt(runtime_call); | 
| 534   __ CompareInstanceType(r6, r6, FIRST_JS_RECEIVER_TYPE); |  | 
| 535   __ blt(runtime_call); |  | 
| 536 | 520 | 
| 537   __ bind(&return_unequal); | 521   __ bind(&return_unequal); | 
| 538   // Return non-equal by returning the non-zero object pointer in r3. | 522   // Return non-equal by returning the non-zero object pointer in r2. | 
| 539   __ Ret(); | 523   __ Ret(); | 
| 540 | 524 | 
| 541   __ bind(&undetectable); | 525   __ bind(&undetectable); | 
| 542   __ andi(r0, r8, Operand(1 << Map::kIsUndetectable)); | 526   __ AndP(r0, r7, Operand(1 << Map::kIsUndetectable)); | 
| 543   __ beq(&return_unequal, cr0); | 527   __ beq(&return_unequal); | 
| 544   __ li(r3, Operand(EQUAL)); | 528   __ LoadImmP(r2, Operand(EQUAL)); | 
| 545   __ Ret(); | 529   __ Ret(); | 
| 546 } | 530 } | 
| 547 | 531 | 
| 548 |  | 
| 549 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input, | 532 static void CompareICStub_CheckInputType(MacroAssembler* masm, Register input, | 
| 550                                          Register scratch, | 533                                          Register scratch, | 
| 551                                          CompareICState::State expected, | 534                                          CompareICState::State expected, | 
| 552                                          Label* fail) { | 535                                          Label* fail) { | 
| 553   Label ok; | 536   Label ok; | 
| 554   if (expected == CompareICState::SMI) { | 537   if (expected == CompareICState::SMI) { | 
| 555     __ JumpIfNotSmi(input, fail); | 538     __ JumpIfNotSmi(input, fail); | 
| 556   } else if (expected == CompareICState::NUMBER) { | 539   } else if (expected == CompareICState::NUMBER) { | 
| 557     __ JumpIfSmi(input, &ok); | 540     __ JumpIfSmi(input, &ok); | 
| 558     __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail, | 541     __ CheckMap(input, scratch, Heap::kHeapNumberMapRootIndex, fail, | 
| 559                 DONT_DO_SMI_CHECK); | 542                 DONT_DO_SMI_CHECK); | 
| 560   } | 543   } | 
| 561   // We could be strict about internalized/non-internalized here, but as long as | 544   // We could be strict about internalized/non-internalized here, but as long as | 
| 562   // hydrogen doesn't care, the stub doesn't have to care either. | 545   // hydrogen doesn't care, the stub doesn't have to care either. | 
| 563   __ bind(&ok); | 546   __ bind(&ok); | 
| 564 } | 547 } | 
| 565 | 548 | 
| 566 | 549 // On entry r3 and r4 are the values to be compared. | 
| 567 // On entry r4 and r5 are the values to be compared. | 550 // On exit r2 is 0, positive or negative to indicate the result of | 
| 568 // On exit r3 is 0, positive or negative to indicate the result of |  | 
| 569 // the comparison. | 551 // the comparison. | 
| 570 void CompareICStub::GenerateGeneric(MacroAssembler* masm) { | 552 void CompareICStub::GenerateGeneric(MacroAssembler* masm) { | 
| 571   Register lhs = r4; | 553   Register lhs = r3; | 
| 572   Register rhs = r3; | 554   Register rhs = r2; | 
| 573   Condition cc = GetCondition(); | 555   Condition cc = GetCondition(); | 
| 574 | 556 | 
| 575   Label miss; | 557   Label miss; | 
| 576   CompareICStub_CheckInputType(masm, lhs, r5, left(), &miss); | 558   CompareICStub_CheckInputType(masm, lhs, r4, left(), &miss); | 
| 577   CompareICStub_CheckInputType(masm, rhs, r6, right(), &miss); | 559   CompareICStub_CheckInputType(masm, rhs, r5, right(), &miss); | 
| 578 | 560 | 
| 579   Label slow;  // Call builtin. | 561   Label slow;  // Call builtin. | 
| 580   Label not_smis, both_loaded_as_doubles, lhs_not_nan; | 562   Label not_smis, both_loaded_as_doubles, lhs_not_nan; | 
| 581 | 563 | 
| 582   Label not_two_smis, smi_done; | 564   Label not_two_smis, smi_done; | 
| 583   __ orx(r5, r4, r3); | 565   __ OrP(r4, r3, r2); | 
| 584   __ JumpIfNotSmi(r5, ¬_two_smis); | 566   __ JumpIfNotSmi(r4, ¬_two_smis); | 
| 585   __ SmiUntag(r4); |  | 
| 586   __ SmiUntag(r3); | 567   __ SmiUntag(r3); | 
| 587   __ sub(r3, r4, r3); | 568   __ SmiUntag(r2); | 
|  | 569   __ SubP(r2, r3, r2); | 
| 588   __ Ret(); | 570   __ Ret(); | 
| 589   __ bind(¬_two_smis); | 571   __ bind(¬_two_smis); | 
| 590 | 572 | 
| 591   // NOTICE! This code is only reached after a smi-fast-case check, so | 573   // NOTICE! This code is only reached after a smi-fast-case check, so | 
| 592   // it is certain that at least one operand isn't a smi. | 574   // it is certain that at least one operand isn't a smi. | 
| 593 | 575 | 
| 594   // Handle the case where the objects are identical.  Either returns the answer | 576   // Handle the case where the objects are identical.  Either returns the answer | 
| 595   // or goes to slow.  Only falls through if the objects were not identical. | 577   // or goes to slow.  Only falls through if the objects were not identical. | 
| 596   EmitIdenticalObjectComparison(masm, &slow, cc); | 578   EmitIdenticalObjectComparison(masm, &slow, cc); | 
| 597 | 579 | 
| 598   // If either is a Smi (we know that not both are), then they can only | 580   // If either is a Smi (we know that not both are), then they can only | 
| 599   // be strictly equal if the other is a HeapNumber. | 581   // be strictly equal if the other is a HeapNumber. | 
| 600   STATIC_ASSERT(kSmiTag == 0); | 582   STATIC_ASSERT(kSmiTag == 0); | 
| 601   DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0)); | 583   DCHECK_EQ(static_cast<Smi*>(0), Smi::FromInt(0)); | 
| 602   __ and_(r5, lhs, rhs); | 584   __ AndP(r4, lhs, rhs); | 
| 603   __ JumpIfNotSmi(r5, ¬_smis); | 585   __ JumpIfNotSmi(r4, ¬_smis); | 
| 604   // One operand is a smi.  EmitSmiNonsmiComparison generates code that can: | 586   // One operand is a smi.  EmitSmiNonsmiComparison generates code that can: | 
| 605   // 1) Return the answer. | 587   // 1) Return the answer. | 
| 606   // 2) Go to slow. | 588   // 2) Go to slow. | 
| 607   // 3) Fall through to both_loaded_as_doubles. | 589   // 3) Fall through to both_loaded_as_doubles. | 
| 608   // 4) Jump to lhs_not_nan. | 590   // 4) Jump to lhs_not_nan. | 
| 609   // In cases 3 and 4 we have found out we were dealing with a number-number | 591   // In cases 3 and 4 we have found out we were dealing with a number-number | 
| 610   // comparison.  The double values of the numbers have been loaded | 592   // comparison.  The double values of the numbers have been loaded | 
| 611   // into d7 and d6. | 593   // into d7 and d6. | 
| 612   EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict()); | 594   EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict()); | 
| 613 | 595 | 
| 614   __ bind(&both_loaded_as_doubles); | 596   __ bind(&both_loaded_as_doubles); | 
| 615   // The arguments have been converted to doubles and stored in d6 and d7 | 597   // The arguments have been converted to doubles and stored in d6 and d7 | 
| 616   __ bind(&lhs_not_nan); | 598   __ bind(&lhs_not_nan); | 
| 617   Label no_nan; | 599   Label no_nan; | 
| 618   __ fcmpu(d7, d6); | 600   __ cdbr(d7, d6); | 
| 619 | 601 | 
| 620   Label nan, equal, less_than; | 602   Label nan, equal, less_than; | 
| 621   __ bunordered(&nan); | 603   __ bunordered(&nan); | 
| 622   if (CpuFeatures::IsSupported(ISELECT)) { | 604   __ beq(&equal, Label::kNear); | 
| 623     DCHECK(EQUAL == 0); | 605   __ blt(&less_than, Label::kNear); | 
| 624     __ li(r4, Operand(GREATER)); | 606   __ LoadImmP(r2, Operand(GREATER)); | 
| 625     __ li(r5, Operand(LESS)); | 607   __ Ret(); | 
| 626     __ isel(eq, r3, r0, r4); | 608   __ bind(&equal); | 
| 627     __ isel(lt, r3, r5, r3); | 609   __ LoadImmP(r2, Operand(EQUAL)); | 
| 628     __ Ret(); | 610   __ Ret(); | 
| 629   } else { | 611   __ bind(&less_than); | 
| 630     __ beq(&equal); | 612   __ LoadImmP(r2, Operand(LESS)); | 
| 631     __ blt(&less_than); | 613   __ Ret(); | 
| 632     __ li(r3, Operand(GREATER)); |  | 
| 633     __ Ret(); |  | 
| 634     __ bind(&equal); |  | 
| 635     __ li(r3, Operand(EQUAL)); |  | 
| 636     __ Ret(); |  | 
| 637     __ bind(&less_than); |  | 
| 638     __ li(r3, Operand(LESS)); |  | 
| 639     __ Ret(); |  | 
| 640   } |  | 
| 641 | 614 | 
| 642   __ bind(&nan); | 615   __ bind(&nan); | 
| 643   // If one of the sides was a NaN then the v flag is set.  Load r3 with | 616   // If one of the sides was a NaN then the v flag is set.  Load r2 with | 
| 644   // whatever it takes to make the comparison fail, since comparisons with NaN | 617   // whatever it takes to make the comparison fail, since comparisons with NaN | 
| 645   // always fail. | 618   // always fail. | 
| 646   if (cc == lt || cc == le) { | 619   if (cc == lt || cc == le) { | 
| 647     __ li(r3, Operand(GREATER)); | 620     __ LoadImmP(r2, Operand(GREATER)); | 
| 648   } else { | 621   } else { | 
| 649     __ li(r3, Operand(LESS)); | 622     __ LoadImmP(r2, Operand(LESS)); | 
| 650   } | 623   } | 
| 651   __ Ret(); | 624   __ Ret(); | 
| 652 | 625 | 
| 653   __ bind(¬_smis); | 626   __ bind(¬_smis); | 
| 654   // At this point we know we are dealing with two different objects, | 627   // At this point we know we are dealing with two different objects, | 
| 655   // and neither of them is a Smi.  The objects are in rhs_ and lhs_. | 628   // and neither of them is a Smi.  The objects are in rhs_ and lhs_. | 
| 656   if (strict()) { | 629   if (strict()) { | 
| 657     // This returns non-equal for some object types, or falls through if it | 630     // This returns non-equal for some object types, or falls through if it | 
| 658     // was not lucky. | 631     // was not lucky. | 
| 659     EmitStrictTwoHeapObjectCompare(masm, lhs, rhs); | 632     EmitStrictTwoHeapObjectCompare(masm, lhs, rhs); | 
| 660   } | 633   } | 
| 661 | 634 | 
| 662   Label check_for_internalized_strings; | 635   Label check_for_internalized_strings; | 
| 663   Label flat_string_check; | 636   Label flat_string_check; | 
| 664   // Check for heap-number-heap-number comparison.  Can jump to slow case, | 637   // Check for heap-number-heap-number comparison.  Can jump to slow case, | 
| 665   // or load both doubles into r3, r4, r5, r6 and jump to the code that handles | 638   // or load both doubles into r2, r3, r4, r5 and jump to the code that handles | 
| 666   // that case.  If the inputs are not doubles then jumps to | 639   // that case.  If the inputs are not doubles then jumps to | 
| 667   // check_for_internalized_strings. | 640   // check_for_internalized_strings. | 
| 668   // In this case r5 will contain the type of rhs_.  Never falls through. | 641   // In this case r4 will contain the type of rhs_.  Never falls through. | 
| 669   EmitCheckForTwoHeapNumbers(masm, lhs, rhs, &both_loaded_as_doubles, | 642   EmitCheckForTwoHeapNumbers(masm, lhs, rhs, &both_loaded_as_doubles, | 
| 670                              &check_for_internalized_strings, | 643                              &check_for_internalized_strings, | 
| 671                              &flat_string_check); | 644                              &flat_string_check); | 
| 672 | 645 | 
| 673   __ bind(&check_for_internalized_strings); | 646   __ bind(&check_for_internalized_strings); | 
| 674   // In the strict case the EmitStrictTwoHeapObjectCompare already took care of | 647   // In the strict case the EmitStrictTwoHeapObjectCompare already took care of | 
| 675   // internalized strings. | 648   // internalized strings. | 
| 676   if (cc == eq && !strict()) { | 649   if (cc == eq && !strict()) { | 
| 677     // Returns an answer for two internalized strings or two detectable objects. | 650     // Returns an answer for two internalized strings or two detectable objects. | 
| 678     // Otherwise jumps to string case or not both strings case. | 651     // Otherwise jumps to string case or not both strings case. | 
| 679     // Assumes that r5 is the type of rhs_ on entry. | 652     // Assumes that r4 is the type of rhs_ on entry. | 
| 680     EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, &flat_string_check, | 653     EmitCheckForInternalizedStringsOrObjects(masm, lhs, rhs, &flat_string_check, | 
| 681                                              &slow); | 654                                              &slow); | 
| 682   } | 655   } | 
| 683 | 656 | 
| 684   // Check for both being sequential one-byte strings, | 657   // Check for both being sequential one-byte strings, | 
| 685   // and inline if that is the case. | 658   // and inline if that is the case. | 
| 686   __ bind(&flat_string_check); | 659   __ bind(&flat_string_check); | 
| 687 | 660 | 
| 688   __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r5, r6, &slow); | 661   __ JumpIfNonSmisNotBothSequentialOneByteStrings(lhs, rhs, r4, r5, &slow); | 
| 689 | 662 | 
| 690   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r5, | 663   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r4, | 
| 691                       r6); | 664                       r5); | 
| 692   if (cc == eq) { | 665   if (cc == eq) { | 
| 693     StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r5, r6); | 666     StringHelper::GenerateFlatOneByteStringEquals(masm, lhs, rhs, r4, r5); | 
| 694   } else { | 667   } else { | 
| 695     StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r5, r6, r7); | 668     StringHelper::GenerateCompareFlatOneByteStrings(masm, lhs, rhs, r4, r5, r6); | 
| 696   } | 669   } | 
| 697   // Never falls through to here. | 670   // Never falls through to here. | 
| 698 | 671 | 
| 699   __ bind(&slow); | 672   __ bind(&slow); | 
| 700 | 673 | 
| 701   __ Push(lhs, rhs); | 674   __ Push(lhs, rhs); | 
| 702   // Figure out which native to call and setup the arguments. | 675   // Figure out which native to call and setup the arguments. | 
| 703   if (cc == eq) { | 676   if (cc == eq) { | 
| 704     __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals); | 677     __ TailCallRuntime(strict() ? Runtime::kStrictEquals : Runtime::kEquals); | 
| 705   } else { | 678   } else { | 
| 706     int ncr;  // NaN compare result | 679     int ncr;  // NaN compare result | 
| 707     if (cc == lt || cc == le) { | 680     if (cc == lt || cc == le) { | 
| 708       ncr = GREATER; | 681       ncr = GREATER; | 
| 709     } else { | 682     } else { | 
| 710       DCHECK(cc == gt || cc == ge);  // remaining cases | 683       DCHECK(cc == gt || cc == ge);  // remaining cases | 
| 711       ncr = LESS; | 684       ncr = LESS; | 
| 712     } | 685     } | 
| 713     __ LoadSmiLiteral(r3, Smi::FromInt(ncr)); | 686     __ LoadSmiLiteral(r2, Smi::FromInt(ncr)); | 
| 714     __ push(r3); | 687     __ push(r2); | 
| 715 | 688 | 
| 716     // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) | 689     // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) | 
| 717     // tagged as a small integer. | 690     // tagged as a small integer. | 
| 718     __ TailCallRuntime(Runtime::kCompare); | 691     __ TailCallRuntime(Runtime::kCompare); | 
| 719   } | 692   } | 
| 720 | 693 | 
| 721   __ bind(&miss); | 694   __ bind(&miss); | 
| 722   GenerateMiss(masm); | 695   GenerateMiss(masm); | 
| 723 } | 696 } | 
| 724 | 697 | 
| 725 |  | 
| 726 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { | 698 void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { | 
| 727   // We don't allow a GC during a store buffer overflow so there is no need to | 699   // We don't allow a GC during a store buffer overflow so there is no need to | 
| 728   // store the registers in any particular way, but we do have to store and | 700   // store the registers in any particular way, but we do have to store and | 
| 729   // restore them. | 701   // restore them. | 
| 730   __ mflr(r0); | 702   __ MultiPush(kJSCallerSaved | r14.bit()); | 
| 731   __ MultiPush(kJSCallerSaved | r0.bit()); |  | 
| 732   if (save_doubles()) { | 703   if (save_doubles()) { | 
| 733     __ MultiPushDoubles(kCallerSavedDoubles); | 704     __ MultiPushDoubles(kCallerSavedDoubles); | 
| 734   } | 705   } | 
| 735   const int argument_count = 1; | 706   const int argument_count = 1; | 
| 736   const int fp_argument_count = 0; | 707   const int fp_argument_count = 0; | 
| 737   const Register scratch = r4; | 708   const Register scratch = r3; | 
| 738 | 709 | 
| 739   AllowExternalCallThatCantCauseGC scope(masm); | 710   AllowExternalCallThatCantCauseGC scope(masm); | 
| 740   __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); | 711   __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); | 
| 741   __ mov(r3, Operand(ExternalReference::isolate_address(isolate()))); | 712   __ mov(r2, Operand(ExternalReference::isolate_address(isolate()))); | 
| 742   __ CallCFunction(ExternalReference::store_buffer_overflow_function(isolate()), | 713   __ CallCFunction(ExternalReference::store_buffer_overflow_function(isolate()), | 
| 743                    argument_count); | 714                    argument_count); | 
| 744   if (save_doubles()) { | 715   if (save_doubles()) { | 
| 745     __ MultiPopDoubles(kCallerSavedDoubles); | 716     __ MultiPopDoubles(kCallerSavedDoubles); | 
| 746   } | 717   } | 
| 747   __ MultiPop(kJSCallerSaved | r0.bit()); | 718   __ MultiPop(kJSCallerSaved | r14.bit()); | 
| 748   __ mtlr(r0); |  | 
| 749   __ Ret(); | 719   __ Ret(); | 
| 750 } | 720 } | 
| 751 | 721 | 
| 752 |  | 
| 753 void StoreRegistersStateStub::Generate(MacroAssembler* masm) { | 722 void StoreRegistersStateStub::Generate(MacroAssembler* masm) { | 
| 754   __ PushSafepointRegisters(); | 723   __ PushSafepointRegisters(); | 
| 755   __ blr(); | 724   __ b(r14); | 
| 756 } | 725 } | 
| 757 | 726 | 
| 758 |  | 
| 759 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) { | 727 void RestoreRegistersStateStub::Generate(MacroAssembler* masm) { | 
| 760   __ PopSafepointRegisters(); | 728   __ PopSafepointRegisters(); | 
| 761   __ blr(); | 729   __ b(r14); | 
| 762 } | 730 } | 
| 763 | 731 | 
| 764 |  | 
| 765 void MathPowStub::Generate(MacroAssembler* masm) { | 732 void MathPowStub::Generate(MacroAssembler* masm) { | 
| 766   const Register base = r4; | 733   const Register base = r3; | 
| 767   const Register exponent = MathPowTaggedDescriptor::exponent(); | 734   const Register exponent = MathPowTaggedDescriptor::exponent(); | 
| 768   DCHECK(exponent.is(r5)); | 735   DCHECK(exponent.is(r4)); | 
| 769   const Register heapnumbermap = r8; | 736   const Register heapnumbermap = r7; | 
| 770   const Register heapnumber = r3; | 737   const Register heapnumber = r2; | 
| 771   const DoubleRegister double_base = d1; | 738   const DoubleRegister double_base = d1; | 
| 772   const DoubleRegister double_exponent = d2; | 739   const DoubleRegister double_exponent = d2; | 
| 773   const DoubleRegister double_result = d3; | 740   const DoubleRegister double_result = d3; | 
| 774   const DoubleRegister double_scratch = d0; | 741   const DoubleRegister double_scratch = d0; | 
| 775   const Register scratch = r11; | 742   const Register scratch = r1; | 
| 776   const Register scratch2 = r10; | 743   const Register scratch2 = r9; | 
| 777 | 744 | 
| 778   Label call_runtime, done, int_exponent; | 745   Label call_runtime, done, int_exponent; | 
| 779   if (exponent_type() == ON_STACK) { | 746   if (exponent_type() == ON_STACK) { | 
| 780     Label base_is_smi, unpack_exponent; | 747     Label base_is_smi, unpack_exponent; | 
| 781     // The exponent and base are supplied as arguments on the stack. | 748     // The exponent and base are supplied as arguments on the stack. | 
| 782     // This can only happen if the stub is called from non-optimized code. | 749     // This can only happen if the stub is called from non-optimized code. | 
| 783     // Load input parameters from stack to double registers. | 750     // Load input parameters from stack to double registers. | 
| 784     __ LoadP(base, MemOperand(sp, 1 * kPointerSize)); | 751     __ LoadP(base, MemOperand(sp, 1 * kPointerSize)); | 
| 785     __ LoadP(exponent, MemOperand(sp, 0 * kPointerSize)); | 752     __ LoadP(exponent, MemOperand(sp, 0 * kPointerSize)); | 
| 786 | 753 | 
| 787     __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); | 754     __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); | 
| 788 | 755 | 
| 789     __ UntagAndJumpIfSmi(scratch, base, &base_is_smi); | 756     __ UntagAndJumpIfSmi(scratch, base, &base_is_smi); | 
| 790     __ LoadP(scratch, FieldMemOperand(base, JSObject::kMapOffset)); | 757     __ LoadP(scratch, FieldMemOperand(base, JSObject::kMapOffset)); | 
| 791     __ cmp(scratch, heapnumbermap); | 758     __ CmpP(scratch, heapnumbermap); | 
| 792     __ bne(&call_runtime); | 759     __ bne(&call_runtime); | 
| 793 | 760 | 
| 794     __ lfd(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); | 761     __ LoadDouble(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); | 
| 795     __ b(&unpack_exponent); | 762     __ b(&unpack_exponent, Label::kNear); | 
| 796 | 763 | 
| 797     __ bind(&base_is_smi); | 764     __ bind(&base_is_smi); | 
| 798     __ ConvertIntToDouble(scratch, double_base); | 765     __ ConvertIntToDouble(scratch, double_base); | 
| 799     __ bind(&unpack_exponent); | 766     __ bind(&unpack_exponent); | 
| 800 | 767 | 
| 801     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); | 768     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); | 
| 802     __ LoadP(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); | 769     __ LoadP(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); | 
| 803     __ cmp(scratch, heapnumbermap); | 770     __ CmpP(scratch, heapnumbermap); | 
| 804     __ bne(&call_runtime); | 771     __ bne(&call_runtime); | 
| 805 | 772 | 
| 806     __ lfd(double_exponent, | 773     __ LoadDouble(double_exponent, | 
| 807            FieldMemOperand(exponent, HeapNumber::kValueOffset)); | 774                   FieldMemOperand(exponent, HeapNumber::kValueOffset)); | 
| 808   } else if (exponent_type() == TAGGED) { | 775   } else if (exponent_type() == TAGGED) { | 
| 809     // Base is already in double_base. | 776     // Base is already in double_base. | 
| 810     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); | 777     __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); | 
| 811 | 778 | 
| 812     __ lfd(double_exponent, | 779     __ LoadDouble(double_exponent, | 
| 813            FieldMemOperand(exponent, HeapNumber::kValueOffset)); | 780                   FieldMemOperand(exponent, HeapNumber::kValueOffset)); | 
| 814   } | 781   } | 
| 815 | 782 | 
| 816   if (exponent_type() != INTEGER) { | 783   if (exponent_type() != INTEGER) { | 
| 817     // Detect integer exponents stored as double. | 784     // Detect integer exponents stored as double. | 
| 818     __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2, | 785     __ TryDoubleToInt32Exact(scratch, double_exponent, scratch2, | 
| 819                              double_scratch); | 786                              double_scratch); | 
| 820     __ beq(&int_exponent); | 787     __ beq(&int_exponent, Label::kNear); | 
| 821 | 788 | 
| 822     if (exponent_type() == ON_STACK) { | 789     if (exponent_type() == ON_STACK) { | 
| 823       // Detect square root case.  Crankshaft detects constant +/-0.5 at | 790       // Detect square root case.  Crankshaft detects constant +/-0.5 at | 
| 824       // compile time and uses DoMathPowHalf instead.  We then skip this check | 791       // compile time and uses DoMathPowHalf instead.  We then skip this check | 
| 825       // for non-constant cases of +/-0.5 as these hardly occur. | 792       // for non-constant cases of +/-0.5 as these hardly occur. | 
| 826       Label not_plus_half, not_minus_inf1, not_minus_inf2; | 793       Label not_plus_half, not_minus_inf1, not_minus_inf2; | 
| 827 | 794 | 
| 828       // Test for 0.5. | 795       // Test for 0.5. | 
| 829       __ LoadDoubleLiteral(double_scratch, 0.5, scratch); | 796       __ LoadDoubleLiteral(double_scratch, 0.5, scratch); | 
| 830       __ fcmpu(double_exponent, double_scratch); | 797       __ cdbr(double_exponent, double_scratch); | 
| 831       __ bne(¬_plus_half); | 798       __ bne(¬_plus_half, Label::kNear); | 
| 832 | 799 | 
| 833       // Calculates square root of base.  Check for the special case of | 800       // Calculates square root of base.  Check for the special case of | 
| 834       // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13). | 801       // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13). | 
| 835       __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch); | 802       __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch); | 
| 836       __ fcmpu(double_base, double_scratch); | 803       __ cdbr(double_base, double_scratch); | 
| 837       __ bne(¬_minus_inf1); | 804       __ bne(¬_minus_inf1, Label::kNear); | 
| 838       __ fneg(double_result, double_scratch); | 805       __ lcdbr(double_result, double_scratch); | 
| 839       __ b(&done); | 806       __ b(&done); | 
| 840       __ bind(¬_minus_inf1); | 807       __ bind(¬_minus_inf1); | 
| 841 | 808 | 
| 842       // Add +0 to convert -0 to +0. | 809       // Add +0 to convert -0 to +0. | 
| 843       __ fadd(double_scratch, double_base, kDoubleRegZero); | 810       __ ldr(double_scratch, double_base); | 
| 844       __ fsqrt(double_result, double_scratch); | 811       __ lzdr(kDoubleRegZero); | 
|  | 812       __ adbr(double_scratch, kDoubleRegZero); | 
|  | 813       __ sqdbr(double_result, double_scratch); | 
| 845       __ b(&done); | 814       __ b(&done); | 
| 846 | 815 | 
| 847       __ bind(¬_plus_half); | 816       __ bind(¬_plus_half); | 
| 848       __ LoadDoubleLiteral(double_scratch, -0.5, scratch); | 817       __ LoadDoubleLiteral(double_scratch, -0.5, scratch); | 
| 849       __ fcmpu(double_exponent, double_scratch); | 818       __ cdbr(double_exponent, double_scratch); | 
| 850       __ bne(&call_runtime); | 819       __ bne(&call_runtime); | 
| 851 | 820 | 
| 852       // Calculates square root of base.  Check for the special case of | 821       // Calculates square root of base.  Check for the special case of | 
| 853       // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13). | 822       // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13). | 
| 854       __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch); | 823       __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch); | 
| 855       __ fcmpu(double_base, double_scratch); | 824       __ cdbr(double_base, double_scratch); | 
| 856       __ bne(¬_minus_inf2); | 825       __ bne(¬_minus_inf2, Label::kNear); | 
| 857       __ fmr(double_result, kDoubleRegZero); | 826       __ ldr(double_result, kDoubleRegZero); | 
| 858       __ b(&done); | 827       __ b(&done); | 
| 859       __ bind(¬_minus_inf2); | 828       __ bind(¬_minus_inf2); | 
| 860 | 829 | 
| 861       // Add +0 to convert -0 to +0. | 830       // Add +0 to convert -0 to +0. | 
| 862       __ fadd(double_scratch, double_base, kDoubleRegZero); | 831       __ ldr(double_scratch, double_base); | 
|  | 832       __ lzdr(kDoubleRegZero); | 
|  | 833       __ adbr(double_scratch, kDoubleRegZero); | 
| 863       __ LoadDoubleLiteral(double_result, 1.0, scratch); | 834       __ LoadDoubleLiteral(double_result, 1.0, scratch); | 
| 864       __ fsqrt(double_scratch, double_scratch); | 835       __ sqdbr(double_scratch, double_scratch); | 
| 865       __ fdiv(double_result, double_result, double_scratch); | 836       __ ddbr(double_result, double_scratch); | 
| 866       __ b(&done); | 837       __ b(&done); | 
| 867     } | 838     } | 
| 868 | 839 | 
| 869     __ mflr(r0); | 840     __ push(r14); | 
| 870     __ push(r0); |  | 
| 871     { | 841     { | 
| 872       AllowExternalCallThatCantCauseGC scope(masm); | 842       AllowExternalCallThatCantCauseGC scope(masm); | 
| 873       __ PrepareCallCFunction(0, 2, scratch); | 843       __ PrepareCallCFunction(0, 2, scratch); | 
| 874       __ MovToFloatParameters(double_base, double_exponent); | 844       __ MovToFloatParameters(double_base, double_exponent); | 
| 875       __ CallCFunction( | 845       __ CallCFunction( | 
| 876           ExternalReference::power_double_double_function(isolate()), 0, 2); | 846           ExternalReference::power_double_double_function(isolate()), 0, 2); | 
| 877     } | 847     } | 
| 878     __ pop(r0); | 848     __ pop(r14); | 
| 879     __ mtlr(r0); |  | 
| 880     __ MovFromFloatResult(double_result); | 849     __ MovFromFloatResult(double_result); | 
| 881     __ b(&done); | 850     __ b(&done); | 
| 882   } | 851   } | 
| 883 | 852 | 
| 884   // Calculate power with integer exponent. | 853   // Calculate power with integer exponent. | 
| 885   __ bind(&int_exponent); | 854   __ bind(&int_exponent); | 
| 886 | 855 | 
| 887   // Get two copies of exponent in the registers scratch and exponent. | 856   // Get two copies of exponent in the registers scratch and exponent. | 
| 888   if (exponent_type() == INTEGER) { | 857   if (exponent_type() == INTEGER) { | 
| 889     __ mr(scratch, exponent); | 858     __ LoadRR(scratch, exponent); | 
| 890   } else { | 859   } else { | 
| 891     // Exponent has previously been stored into scratch as untagged integer. | 860     // Exponent has previously been stored into scratch as untagged integer. | 
| 892     __ mr(exponent, scratch); | 861     __ LoadRR(exponent, scratch); | 
| 893   } | 862   } | 
| 894   __ fmr(double_scratch, double_base);  // Back up base. | 863   __ ldr(double_scratch, double_base);  // Back up base. | 
| 895   __ li(scratch2, Operand(1)); | 864   __ LoadImmP(scratch2, Operand(1)); | 
| 896   __ ConvertIntToDouble(scratch2, double_result); | 865   __ ConvertIntToDouble(scratch2, double_result); | 
| 897 | 866 | 
| 898   // Get absolute value of exponent. | 867   // Get absolute value of exponent. | 
| 899   __ cmpi(scratch, Operand::Zero()); | 868   Label positive_exponent; | 
| 900   if (CpuFeatures::IsSupported(ISELECT)) { | 869   __ CmpP(scratch, Operand::Zero()); | 
| 901     __ neg(scratch2, scratch); | 870   __ bge(&positive_exponent, Label::kNear); | 
| 902     __ isel(lt, scratch, scratch2, scratch); | 871   __ LoadComplementRR(scratch, scratch); | 
| 903   } else { | 872   __ bind(&positive_exponent); | 
| 904     Label positive_exponent; |  | 
| 905     __ bge(&positive_exponent); |  | 
| 906     __ neg(scratch, scratch); |  | 
| 907     __ bind(&positive_exponent); |  | 
| 908   } |  | 
| 909 | 873 | 
| 910   Label while_true, no_carry, loop_end; | 874   Label while_true, no_carry, loop_end; | 
| 911   __ bind(&while_true); | 875   __ bind(&while_true); | 
| 912   __ andi(scratch2, scratch, Operand(1)); | 876   __ mov(scratch2, Operand(1)); | 
| 913   __ beq(&no_carry, cr0); | 877   __ AndP(scratch2, scratch); | 
| 914   __ fmul(double_result, double_result, double_scratch); | 878   __ beq(&no_carry, Label::kNear); | 
|  | 879   __ mdbr(double_result, double_scratch); | 
| 915   __ bind(&no_carry); | 880   __ bind(&no_carry); | 
| 916   __ ShiftRightArithImm(scratch, scratch, 1, SetRC); | 881   __ ShiftRightArithP(scratch, scratch, Operand(1)); | 
| 917   __ beq(&loop_end, cr0); | 882   __ beq(&loop_end, Label::kNear); | 
| 918   __ fmul(double_scratch, double_scratch, double_scratch); | 883   __ mdbr(double_scratch, double_scratch); | 
| 919   __ b(&while_true); | 884   __ b(&while_true); | 
| 920   __ bind(&loop_end); | 885   __ bind(&loop_end); | 
| 921 | 886 | 
| 922   __ cmpi(exponent, Operand::Zero()); | 887   __ CmpP(exponent, Operand::Zero()); | 
| 923   __ bge(&done); | 888   __ bge(&done); | 
| 924 | 889 | 
| 925   __ li(scratch2, Operand(1)); | 890   // get 1/double_result: | 
| 926   __ ConvertIntToDouble(scratch2, double_scratch); | 891   __ ldr(double_scratch, double_result); | 
| 927   __ fdiv(double_result, double_scratch, double_result); | 892   __ LoadImmP(scratch2, Operand(1)); | 
|  | 893   __ ConvertIntToDouble(scratch2, double_result); | 
|  | 894   __ ddbr(double_result, double_scratch); | 
|  | 895 | 
| 928   // Test whether result is zero.  Bail out to check for subnormal result. | 896   // Test whether result is zero.  Bail out to check for subnormal result. | 
| 929   // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. | 897   // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. | 
| 930   __ fcmpu(double_result, kDoubleRegZero); | 898   __ lzdr(kDoubleRegZero); | 
| 931   __ bne(&done); | 899   __ cdbr(double_result, kDoubleRegZero); | 
|  | 900   __ bne(&done, Label::kNear); | 
| 932   // double_exponent may not containe the exponent value if the input was a | 901   // double_exponent may not containe the exponent value if the input was a | 
| 933   // smi.  We set it with exponent value before bailing out. | 902   // smi.  We set it with exponent value before bailing out. | 
| 934   __ ConvertIntToDouble(exponent, double_exponent); | 903   __ ConvertIntToDouble(exponent, double_exponent); | 
| 935 | 904 | 
| 936   // Returning or bailing out. | 905   // Returning or bailing out. | 
| 937   if (exponent_type() == ON_STACK) { | 906   if (exponent_type() == ON_STACK) { | 
| 938     // The arguments are still on the stack. | 907     // The arguments are still on the stack. | 
| 939     __ bind(&call_runtime); | 908     __ bind(&call_runtime); | 
| 940     __ TailCallRuntime(Runtime::kMathPowRT); | 909     __ TailCallRuntime(Runtime::kMathPowRT); | 
| 941 | 910 | 
| 942     // The stub is called from non-optimized code, which expects the result | 911     // The stub is called from non-optimized code, which expects the result | 
| 943     // as heap number in exponent. | 912     // as heap number in exponent. | 
| 944     __ bind(&done); | 913     __ bind(&done); | 
| 945     __ AllocateHeapNumber(heapnumber, scratch, scratch2, heapnumbermap, | 914     __ AllocateHeapNumber(heapnumber, scratch, scratch2, heapnumbermap, | 
| 946                           &call_runtime); | 915                           &call_runtime); | 
| 947     __ stfd(double_result, | 916     __ StoreDouble(double_result, | 
| 948             FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); | 917                    FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); | 
| 949     DCHECK(heapnumber.is(r3)); | 918     DCHECK(heapnumber.is(r2)); | 
| 950     __ Ret(2); | 919     __ Ret(2); | 
| 951   } else { | 920   } else { | 
| 952     __ mflr(r0); | 921     __ push(r14); | 
| 953     __ push(r0); |  | 
| 954     { | 922     { | 
| 955       AllowExternalCallThatCantCauseGC scope(masm); | 923       AllowExternalCallThatCantCauseGC scope(masm); | 
| 956       __ PrepareCallCFunction(0, 2, scratch); | 924       __ PrepareCallCFunction(0, 2, scratch); | 
| 957       __ MovToFloatParameters(double_base, double_exponent); | 925       __ MovToFloatParameters(double_base, double_exponent); | 
| 958       __ CallCFunction( | 926       __ CallCFunction( | 
| 959           ExternalReference::power_double_double_function(isolate()), 0, 2); | 927           ExternalReference::power_double_double_function(isolate()), 0, 2); | 
| 960     } | 928     } | 
| 961     __ pop(r0); | 929     __ pop(r14); | 
| 962     __ mtlr(r0); |  | 
| 963     __ MovFromFloatResult(double_result); | 930     __ MovFromFloatResult(double_result); | 
| 964 | 931 | 
| 965     __ bind(&done); | 932     __ bind(&done); | 
| 966     __ Ret(); | 933     __ Ret(); | 
| 967   } | 934   } | 
| 968 } | 935 } | 
| 969 | 936 | 
| 970 |  | 
| 971 bool CEntryStub::NeedsImmovableCode() { return true; } | 937 bool CEntryStub::NeedsImmovableCode() { return true; } | 
| 972 | 938 | 
| 973 |  | 
| 974 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { | 939 void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { | 
| 975   CEntryStub::GenerateAheadOfTime(isolate); | 940   CEntryStub::GenerateAheadOfTime(isolate); | 
| 976   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); | 941   StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); | 
| 977   StubFailureTrampolineStub::GenerateAheadOfTime(isolate); | 942   StubFailureTrampolineStub::GenerateAheadOfTime(isolate); | 
| 978   ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); | 943   ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); | 
| 979   CreateAllocationSiteStub::GenerateAheadOfTime(isolate); | 944   CreateAllocationSiteStub::GenerateAheadOfTime(isolate); | 
| 980   CreateWeakCellStub::GenerateAheadOfTime(isolate); | 945   CreateWeakCellStub::GenerateAheadOfTime(isolate); | 
| 981   BinaryOpICStub::GenerateAheadOfTime(isolate); | 946   BinaryOpICStub::GenerateAheadOfTime(isolate); | 
| 982   StoreRegistersStateStub::GenerateAheadOfTime(isolate); | 947   StoreRegistersStateStub::GenerateAheadOfTime(isolate); | 
| 983   RestoreRegistersStateStub::GenerateAheadOfTime(isolate); | 948   RestoreRegistersStateStub::GenerateAheadOfTime(isolate); | 
| 984   BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); | 949   BinaryOpICWithAllocationSiteStub::GenerateAheadOfTime(isolate); | 
| 985   StoreFastElementStub::GenerateAheadOfTime(isolate); | 950   StoreFastElementStub::GenerateAheadOfTime(isolate); | 
| 986   TypeofStub::GenerateAheadOfTime(isolate); | 951   TypeofStub::GenerateAheadOfTime(isolate); | 
| 987 } | 952 } | 
| 988 | 953 | 
| 989 |  | 
| 990 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) { | 954 void StoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) { | 
| 991   StoreRegistersStateStub stub(isolate); | 955   StoreRegistersStateStub stub(isolate); | 
| 992   stub.GetCode(); | 956   stub.GetCode(); | 
| 993 } | 957 } | 
| 994 | 958 | 
| 995 |  | 
| 996 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) { | 959 void RestoreRegistersStateStub::GenerateAheadOfTime(Isolate* isolate) { | 
| 997   RestoreRegistersStateStub stub(isolate); | 960   RestoreRegistersStateStub stub(isolate); | 
| 998   stub.GetCode(); | 961   stub.GetCode(); | 
| 999 } | 962 } | 
| 1000 | 963 | 
| 1001 |  | 
| 1002 void CodeStub::GenerateFPStubs(Isolate* isolate) { | 964 void CodeStub::GenerateFPStubs(Isolate* isolate) { | 
| 1003   // Generate if not already in cache. |  | 
| 1004   SaveFPRegsMode mode = kSaveFPRegs; | 965   SaveFPRegsMode mode = kSaveFPRegs; | 
| 1005   CEntryStub(isolate, 1, mode).GetCode(); | 966   CEntryStub(isolate, 1, mode).GetCode(); | 
| 1006   StoreBufferOverflowStub(isolate, mode).GetCode(); | 967   StoreBufferOverflowStub(isolate, mode).GetCode(); | 
| 1007   isolate->set_fp_stubs_generated(true); | 968   isolate->set_fp_stubs_generated(true); | 
| 1008 } | 969 } | 
| 1009 | 970 | 
| 1010 |  | 
| 1011 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { | 971 void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { | 
| 1012   CEntryStub stub(isolate, 1, kDontSaveFPRegs); | 972   CEntryStub stub(isolate, 1, kDontSaveFPRegs); | 
| 1013   stub.GetCode(); | 973   stub.GetCode(); | 
| 1014 } | 974 } | 
| 1015 | 975 | 
| 1016 |  | 
| 1017 void CEntryStub::Generate(MacroAssembler* masm) { | 976 void CEntryStub::Generate(MacroAssembler* masm) { | 
| 1018   // Called from JavaScript; parameters are on stack as if calling JS function. | 977   // Called from JavaScript; parameters are on stack as if calling JS function. | 
| 1019   // r3: number of arguments including receiver | 978   // r2: number of arguments including receiver | 
| 1020   // r4: pointer to builtin function | 979   // r3: pointer to builtin function | 
| 1021   // fp: frame pointer  (restored after C call) | 980   // fp: frame pointer  (restored after C call) | 
| 1022   // sp: stack pointer  (restored as callee's sp after C call) | 981   // sp: stack pointer  (restored as callee's sp after C call) | 
| 1023   // cp: current context  (C callee-saved) | 982   // cp: current context  (C callee-saved) | 
| 1024   // | 983   // | 
| 1025   // If argv_in_register(): | 984   // If argv_in_register(): | 
| 1026   // r5: pointer to the first argument | 985   // r4: pointer to the first argument | 
| 1027   ProfileEntryHookStub::MaybeCallEntryHook(masm); | 986   ProfileEntryHookStub::MaybeCallEntryHook(masm); | 
| 1028 | 987 | 
| 1029   __ mr(r15, r4); | 988   __ LoadRR(r7, r3); | 
| 1030 | 989 | 
| 1031   if (argv_in_register()) { | 990   if (argv_in_register()) { | 
| 1032     // Move argv into the correct register. | 991     // Move argv into the correct register. | 
| 1033     __ mr(r4, r5); | 992     __ LoadRR(r3, r4); | 
| 1034   } else { | 993   } else { | 
| 1035     // Compute the argv pointer. | 994     // Compute the argv pointer. | 
| 1036     __ ShiftLeftImm(r4, r3, Operand(kPointerSizeLog2)); | 995     __ ShiftLeftP(r3, r2, Operand(kPointerSizeLog2)); | 
| 1037     __ add(r4, r4, sp); | 996     __ lay(r3, MemOperand(r3, sp, -kPointerSize)); | 
| 1038     __ subi(r4, r4, Operand(kPointerSize)); |  | 
| 1039   } | 997   } | 
| 1040 | 998 | 
| 1041   // Enter the exit frame that transitions from JavaScript to C++. | 999   // Enter the exit frame that transitions from JavaScript to C++. | 
| 1042   FrameScope scope(masm, StackFrame::MANUAL); | 1000   FrameScope scope(masm, StackFrame::MANUAL); | 
| 1043 | 1001 | 
| 1044   // Need at least one extra slot for return address location. | 1002   // Need at least one extra slot for return address location. | 
| 1045   int arg_stack_space = 1; | 1003   int arg_stack_space = 1; | 
| 1046 | 1004 | 
| 1047   // Pass buffer for return value on stack if necessary | 1005   // Pass buffer for return value on stack if necessary | 
| 1048   bool needs_return_buffer = | 1006   bool needs_return_buffer = | 
| 1049       result_size() > 2 || | 1007       result_size() > 2 || | 
| 1050       (result_size() == 2 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS); | 1008       (result_size() == 2 && !ABI_RETURNS_OBJECTPAIR_IN_REGS); | 
| 1051   if (needs_return_buffer) { | 1009   if (needs_return_buffer) { | 
| 1052     arg_stack_space += result_size(); | 1010     arg_stack_space += result_size(); | 
| 1053   } | 1011   } | 
| 1054 | 1012 | 
|  | 1013 #if V8_TARGET_ARCH_S390X | 
|  | 1014   // 64-bit linux pass Argument object by reference not value | 
|  | 1015   arg_stack_space += 2; | 
|  | 1016 #endif | 
|  | 1017 | 
| 1055   __ EnterExitFrame(save_doubles(), arg_stack_space); | 1018   __ EnterExitFrame(save_doubles(), arg_stack_space); | 
| 1056 | 1019 | 
| 1057   // Store a copy of argc in callee-saved registers for later. | 1020   // Store a copy of argc, argv in callee-saved registers for later. | 
| 1058   __ mr(r14, r3); | 1021   __ LoadRR(r6, r2); | 
| 1059 | 1022   __ LoadRR(r8, r3); | 
| 1060   // r3, r14: number of arguments including receiver  (C callee-saved) | 1023   // r2, r6: number of arguments including receiver  (C callee-saved) | 
| 1061   // r4: pointer to the first argument | 1024   // r3, r8: pointer to the first argument | 
| 1062   // r15: pointer to builtin function  (C callee-saved) | 1025   // r7: pointer to builtin function  (C callee-saved) | 
| 1063 | 1026 | 
| 1064   // Result returned in registers or stack, depending on result size and ABI. | 1027   // Result returned in registers or stack, depending on result size and ABI. | 
| 1065 | 1028 | 
| 1066   Register isolate_reg = r5; | 1029   Register isolate_reg = r4; | 
| 1067   if (needs_return_buffer) { | 1030   if (needs_return_buffer) { | 
| 1068     // The return value is a non-scalar value. | 1031     // The return value is 16-byte non-scalar value. | 
| 1069     // Use frame storage reserved by calling function to pass return | 1032     // Use frame storage reserved by calling function to pass return | 
| 1070     // buffer as implicit first argument. | 1033     // buffer as implicit first argument in R2.  Shfit original parameters | 
| 1071     __ mr(r5, r4); | 1034     // by one register each. | 
| 1072     __ mr(r4, r3); | 1035     __ LoadRR(r4, r3); | 
| 1073     __ addi(r3, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize)); | 1036     __ LoadRR(r3, r2); | 
| 1074     isolate_reg = r6; | 1037     __ la(r2, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize)); | 
|  | 1038     isolate_reg = r5; | 
| 1075   } | 1039   } | 
| 1076 |  | 
| 1077   // Call C built-in. | 1040   // Call C built-in. | 
| 1078   __ mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate()))); | 1041   __ mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate()))); | 
| 1079 | 1042 | 
| 1080   Register target = r15; | 1043   Register target = r7; | 
| 1081   if (ABI_USES_FUNCTION_DESCRIPTORS) { |  | 
| 1082     // AIX/PPC64BE Linux use a function descriptor. |  | 
| 1083     __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(r15, kPointerSize)); |  | 
| 1084     __ LoadP(ip, MemOperand(r15, 0));  // Instruction address |  | 
| 1085     target = ip; |  | 
| 1086   } else if (ABI_CALL_VIA_IP) { |  | 
| 1087     __ Move(ip, r15); |  | 
| 1088     target = ip; |  | 
| 1089   } |  | 
| 1090 | 1044 | 
| 1091   // To let the GC traverse the return address of the exit frames, we need to | 1045   // To let the GC traverse the return address of the exit frames, we need to | 
| 1092   // know where the return address is. The CEntryStub is unmovable, so | 1046   // know where the return address is. The CEntryStub is unmovable, so | 
| 1093   // we can store the address on the stack to be able to find it again and | 1047   // we can store the address on the stack to be able to find it again and | 
| 1094   // we never have to restore it, because it will not change. | 1048   // we never have to restore it, because it will not change. | 
| 1095   Label after_call; | 1049   { | 
| 1096   __ mov_label_addr(r0, &after_call); | 1050     Label return_label; | 
| 1097   __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize)); | 1051     __ larl(r14, &return_label);  // Generate the return addr of call later. | 
| 1098   __ Call(target); | 1052     __ StoreP(r14, MemOperand(sp, kStackFrameRASlot * kPointerSize)); | 
| 1099   __ bind(&after_call); | 1053 | 
|  | 1054     // zLinux ABI requires caller's frame to have sufficient space for callee | 
|  | 1055     // preserved regsiter save area. | 
|  | 1056     // __ lay(sp, MemOperand(sp, -kCalleeRegisterSaveAreaSize)); | 
|  | 1057     __ positions_recorder()->WriteRecordedPositions(); | 
|  | 1058     __ b(target); | 
|  | 1059     __ bind(&return_label); | 
|  | 1060     // __ la(sp, MemOperand(sp, +kCalleeRegisterSaveAreaSize)); | 
|  | 1061   } | 
| 1100 | 1062 | 
| 1101   // If return value is on the stack, pop it to registers. | 1063   // If return value is on the stack, pop it to registers. | 
| 1102   if (needs_return_buffer) { | 1064   if (needs_return_buffer) { | 
| 1103     if (result_size() > 2) __ LoadP(r5, MemOperand(r3, 2 * kPointerSize)); | 1065     if (result_size() > 2) __ LoadP(r4, MemOperand(r2, 2 * kPointerSize)); | 
| 1104     __ LoadP(r4, MemOperand(r3, kPointerSize)); | 1066     __ LoadP(r3, MemOperand(r2, kPointerSize)); | 
| 1105     __ LoadP(r3, MemOperand(r3)); | 1067     __ LoadP(r2, MemOperand(r2)); | 
| 1106   } | 1068   } | 
| 1107 | 1069 | 
| 1108   // Check result for exception sentinel. | 1070   // Check result for exception sentinel. | 
| 1109   Label exception_returned; | 1071   Label exception_returned; | 
| 1110   __ CompareRoot(r3, Heap::kExceptionRootIndex); | 1072   __ CompareRoot(r2, Heap::kExceptionRootIndex); | 
| 1111   __ beq(&exception_returned); | 1073   __ beq(&exception_returned, Label::kNear); | 
| 1112 | 1074 | 
| 1113   // Check that there is no pending exception, otherwise we | 1075   // Check that there is no pending exception, otherwise we | 
| 1114   // should have returned the exception sentinel. | 1076   // should have returned the exception sentinel. | 
| 1115   if (FLAG_debug_code) { | 1077   if (FLAG_debug_code) { | 
| 1116     Label okay; | 1078     Label okay; | 
| 1117     ExternalReference pending_exception_address( | 1079     ExternalReference pending_exception_address( | 
| 1118         Isolate::kPendingExceptionAddress, isolate()); | 1080         Isolate::kPendingExceptionAddress, isolate()); | 
| 1119 | 1081     __ mov(r4, Operand(pending_exception_address)); | 
| 1120     __ mov(r6, Operand(pending_exception_address)); | 1082     __ LoadP(r4, MemOperand(r4)); | 
| 1121     __ LoadP(r6, MemOperand(r6)); | 1083     __ CompareRoot(r4, Heap::kTheHoleValueRootIndex); | 
| 1122     __ CompareRoot(r6, Heap::kTheHoleValueRootIndex); |  | 
| 1123     // Cannot use check here as it attempts to generate call into runtime. | 1084     // Cannot use check here as it attempts to generate call into runtime. | 
| 1124     __ beq(&okay); | 1085     __ beq(&okay, Label::kNear); | 
| 1125     __ stop("Unexpected pending exception"); | 1086     __ stop("Unexpected pending exception"); | 
| 1126     __ bind(&okay); | 1087     __ bind(&okay); | 
| 1127   } | 1088   } | 
| 1128 | 1089 | 
| 1129   // Exit C frame and return. | 1090   // Exit C frame and return. | 
| 1130   // r3:r4: result | 1091   // r2:r3: result | 
| 1131   // sp: stack pointer | 1092   // sp: stack pointer | 
| 1132   // fp: frame pointer | 1093   // fp: frame pointer | 
| 1133   Register argc; | 1094   Register argc; | 
| 1134   if (argv_in_register()) { | 1095   if (argv_in_register()) { | 
| 1135     // We don't want to pop arguments so set argc to no_reg. | 1096     // We don't want to pop arguments so set argc to no_reg. | 
| 1136     argc = no_reg; | 1097     argc = no_reg; | 
| 1137   } else { | 1098   } else { | 
| 1138     // r14: still holds argc (callee-saved). | 1099     // r6: still holds argc (callee-saved). | 
| 1139     argc = r14; | 1100     argc = r6; | 
| 1140   } | 1101   } | 
| 1141   __ LeaveExitFrame(save_doubles(), argc, true); | 1102   __ LeaveExitFrame(save_doubles(), argc, true); | 
| 1142   __ blr(); | 1103   __ b(r14); | 
| 1143 | 1104 | 
| 1144   // Handling of exception. | 1105   // Handling of exception. | 
| 1145   __ bind(&exception_returned); | 1106   __ bind(&exception_returned); | 
| 1146 | 1107 | 
| 1147   ExternalReference pending_handler_context_address( | 1108   ExternalReference pending_handler_context_address( | 
| 1148       Isolate::kPendingHandlerContextAddress, isolate()); | 1109       Isolate::kPendingHandlerContextAddress, isolate()); | 
| 1149   ExternalReference pending_handler_code_address( | 1110   ExternalReference pending_handler_code_address( | 
| 1150       Isolate::kPendingHandlerCodeAddress, isolate()); | 1111       Isolate::kPendingHandlerCodeAddress, isolate()); | 
| 1151   ExternalReference pending_handler_offset_address( | 1112   ExternalReference pending_handler_offset_address( | 
| 1152       Isolate::kPendingHandlerOffsetAddress, isolate()); | 1113       Isolate::kPendingHandlerOffsetAddress, isolate()); | 
| 1153   ExternalReference pending_handler_fp_address( | 1114   ExternalReference pending_handler_fp_address( | 
| 1154       Isolate::kPendingHandlerFPAddress, isolate()); | 1115       Isolate::kPendingHandlerFPAddress, isolate()); | 
| 1155   ExternalReference pending_handler_sp_address( | 1116   ExternalReference pending_handler_sp_address( | 
| 1156       Isolate::kPendingHandlerSPAddress, isolate()); | 1117       Isolate::kPendingHandlerSPAddress, isolate()); | 
| 1157 | 1118 | 
| 1158   // Ask the runtime for help to determine the handler. This will set r3 to | 1119   // Ask the runtime for help to determine the handler. This will set r3 to | 
| 1159   // contain the current pending exception, don't clobber it. | 1120   // contain the current pending exception, don't clobber it. | 
| 1160   ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler, | 1121   ExternalReference find_handler(Runtime::kUnwindAndFindExceptionHandler, | 
| 1161                                  isolate()); | 1122                                  isolate()); | 
| 1162   { | 1123   { | 
| 1163     FrameScope scope(masm, StackFrame::MANUAL); | 1124     FrameScope scope(masm, StackFrame::MANUAL); | 
| 1164     __ PrepareCallCFunction(3, 0, r3); | 1125     __ PrepareCallCFunction(3, 0, r2); | 
| 1165     __ li(r3, Operand::Zero()); | 1126     __ LoadImmP(r2, Operand::Zero()); | 
| 1166     __ li(r4, Operand::Zero()); | 1127     __ LoadImmP(r3, Operand::Zero()); | 
| 1167     __ mov(r5, Operand(ExternalReference::isolate_address(isolate()))); | 1128     __ mov(r4, Operand(ExternalReference::isolate_address(isolate()))); | 
| 1168     __ CallCFunction(find_handler, 3); | 1129     __ CallCFunction(find_handler, 3); | 
| 1169   } | 1130   } | 
| 1170 | 1131 | 
| 1171   // Retrieve the handler context, SP and FP. | 1132   // Retrieve the handler context, SP and FP. | 
| 1172   __ mov(cp, Operand(pending_handler_context_address)); | 1133   __ mov(cp, Operand(pending_handler_context_address)); | 
| 1173   __ LoadP(cp, MemOperand(cp)); | 1134   __ LoadP(cp, MemOperand(cp)); | 
| 1174   __ mov(sp, Operand(pending_handler_sp_address)); | 1135   __ mov(sp, Operand(pending_handler_sp_address)); | 
| 1175   __ LoadP(sp, MemOperand(sp)); | 1136   __ LoadP(sp, MemOperand(sp)); | 
| 1176   __ mov(fp, Operand(pending_handler_fp_address)); | 1137   __ mov(fp, Operand(pending_handler_fp_address)); | 
| 1177   __ LoadP(fp, MemOperand(fp)); | 1138   __ LoadP(fp, MemOperand(fp)); | 
| 1178 | 1139 | 
| 1179   // If the handler is a JS frame, restore the context to the frame. Note that | 1140   // If the handler is a JS frame, restore the context to the frame. Note that | 
| 1180   // the context will be set to (cp == 0) for non-JS frames. | 1141   // the context will be set to (cp == 0) for non-JS frames. | 
| 1181   Label skip; | 1142   Label skip; | 
| 1182   __ cmpi(cp, Operand::Zero()); | 1143   __ CmpP(cp, Operand::Zero()); | 
| 1183   __ beq(&skip); | 1144   __ beq(&skip, Label::kNear); | 
| 1184   __ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 1145   __ StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); | 
| 1185   __ bind(&skip); | 1146   __ bind(&skip); | 
| 1186 | 1147 | 
| 1187   // Compute the handler entry address and jump to it. | 1148   // Compute the handler entry address and jump to it. | 
| 1188   ConstantPoolUnavailableScope constant_pool_unavailable(masm); | 1149   __ mov(r3, Operand(pending_handler_code_address)); | 
| 1189   __ mov(r4, Operand(pending_handler_code_address)); | 1150   __ LoadP(r3, MemOperand(r3)); | 
|  | 1151   __ mov(r4, Operand(pending_handler_offset_address)); | 
| 1190   __ LoadP(r4, MemOperand(r4)); | 1152   __ LoadP(r4, MemOperand(r4)); | 
| 1191   __ mov(r5, Operand(pending_handler_offset_address)); | 1153   __ AddP(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start | 
| 1192   __ LoadP(r5, MemOperand(r5)); | 1154   __ AddP(ip, r3, r4); | 
| 1193   __ addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag));  // Code start |  | 
| 1194   if (FLAG_enable_embedded_constant_pool) { |  | 
| 1195     __ LoadConstantPoolPointerRegisterFromCodeTargetAddress(r4); |  | 
| 1196   } |  | 
| 1197   __ add(ip, r4, r5); |  | 
| 1198   __ Jump(ip); | 1155   __ Jump(ip); | 
| 1199 } | 1156 } | 
| 1200 | 1157 | 
| 1201 |  | 
| 1202 void JSEntryStub::Generate(MacroAssembler* masm) { | 1158 void JSEntryStub::Generate(MacroAssembler* masm) { | 
| 1203   // r3: code entry | 1159   // r2: code entry | 
| 1204   // r4: function | 1160   // r3: function | 
| 1205   // r5: receiver | 1161   // r4: receiver | 
| 1206   // r6: argc | 1162   // r5: argc | 
| 1207   // [sp+0]: argv | 1163   // r6: argv | 
| 1208 | 1164 | 
| 1209   Label invoke, handler_entry, exit; | 1165   Label invoke, handler_entry, exit; | 
| 1210 | 1166 | 
| 1211 // Called from C |  | 
| 1212   __ function_descriptor(); |  | 
| 1213 |  | 
| 1214   ProfileEntryHookStub::MaybeCallEntryHook(masm); | 1167   ProfileEntryHookStub::MaybeCallEntryHook(masm); | 
| 1215 | 1168 | 
| 1216   // PPC LINUX ABI: | 1169 // saving floating point registers | 
| 1217   // preserve LR in pre-reserved slot in caller's frame | 1170 #if V8_HOST_ARCH_S390X | 
| 1218   __ mflr(r0); | 1171   // 64bit ABI requires f8 to f15 be saved | 
| 1219   __ StoreP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize)); | 1172   __ lay(sp, MemOperand(sp, -8 * kDoubleSize)); | 
|  | 1173   __ std(d8, MemOperand(sp)); | 
|  | 1174   __ std(d9, MemOperand(sp, 1 * kDoubleSize)); | 
|  | 1175   __ std(d10, MemOperand(sp, 2 * kDoubleSize)); | 
|  | 1176   __ std(d11, MemOperand(sp, 3 * kDoubleSize)); | 
|  | 1177   __ std(d12, MemOperand(sp, 4 * kDoubleSize)); | 
|  | 1178   __ std(d13, MemOperand(sp, 5 * kDoubleSize)); | 
|  | 1179   __ std(d14, MemOperand(sp, 6 * kDoubleSize)); | 
|  | 1180   __ std(d15, MemOperand(sp, 7 * kDoubleSize)); | 
|  | 1181 #else | 
|  | 1182   // 31bit ABI requires you to store f4 and f6: | 
|  | 1183   // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN417 | 
|  | 1184   __ lay(sp, MemOperand(sp, -2 * kDoubleSize)); | 
|  | 1185   __ std(d4, MemOperand(sp)); | 
|  | 1186   __ std(d6, MemOperand(sp, kDoubleSize)); | 
|  | 1187 #endif | 
| 1220 | 1188 | 
| 1221   // Save callee saved registers on the stack. | 1189   // zLinux ABI | 
| 1222   __ MultiPush(kCalleeSaved); | 1190   //    Incoming parameters: | 
|  | 1191   //          r2: code entry | 
|  | 1192   //          r3: function | 
|  | 1193   //          r4: receiver | 
|  | 1194   //          r5: argc | 
|  | 1195   //          r6: argv | 
|  | 1196   //    Requires us to save the callee-preserved registers r6-r13 | 
|  | 1197   //    General convention is to also save r14 (return addr) and | 
|  | 1198   //    sp/r15 as well in a single STM/STMG | 
|  | 1199   __ lay(sp, MemOperand(sp, -10 * kPointerSize)); | 
|  | 1200   __ StoreMultipleP(r6, sp, MemOperand(sp, 0)); | 
| 1223 | 1201 | 
| 1224   // Save callee-saved double registers. |  | 
| 1225   __ MultiPushDoubles(kCalleeSavedDoubles); |  | 
| 1226   // Set up the reserved register for 0.0. | 1202   // Set up the reserved register for 0.0. | 
| 1227   __ LoadDoubleLiteral(kDoubleRegZero, 0.0, r0); | 1203   // __ LoadDoubleLiteral(kDoubleRegZero, 0.0, r0); | 
| 1228 | 1204 | 
| 1229   // Push a frame with special values setup to mark it as an entry frame. | 1205   // Push a frame with special values setup to mark it as an entry frame. | 
| 1230   // r3: code entry | 1206   //   Bad FP (-1) | 
| 1231   // r4: function | 1207   //   SMI Marker | 
| 1232   // r5: receiver | 1208   //   SMI Marker | 
| 1233   // r6: argc | 1209   //   kCEntryFPAddress | 
| 1234   // r7: argv | 1210   //   Frame type | 
| 1235   __ li(r0, Operand(-1));  // Push a bad frame pointer to fail if it is used. | 1211   __ lay(sp, MemOperand(sp, -5 * kPointerSize)); | 
| 1236   __ push(r0); | 1212   // Push a bad frame pointer to fail if it is used. | 
| 1237   if (FLAG_enable_embedded_constant_pool) { | 1213   __ LoadImmP(r10, Operand(-1)); | 
| 1238     __ li(kConstantPoolRegister, Operand::Zero()); | 1214 | 
| 1239     __ push(kConstantPoolRegister); |  | 
| 1240   } |  | 
| 1241   int marker = type(); | 1215   int marker = type(); | 
| 1242   __ LoadSmiLiteral(r0, Smi::FromInt(marker)); | 1216   __ LoadSmiLiteral(r9, Smi::FromInt(marker)); | 
| 1243   __ push(r0); | 1217   __ LoadSmiLiteral(r8, Smi::FromInt(marker)); | 
| 1244   __ push(r0); |  | 
| 1245   // Save copies of the top frame descriptor on the stack. | 1218   // Save copies of the top frame descriptor on the stack. | 
| 1246   __ mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | 1219   __ mov(r7, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | 
| 1247   __ LoadP(r0, MemOperand(r8)); | 1220   __ LoadP(r7, MemOperand(r7)); | 
| 1248   __ push(r0); | 1221   __ StoreMultipleP(r7, r10, MemOperand(sp, kPointerSize)); | 
| 1249 |  | 
| 1250   // Set up frame pointer for the frame to be pushed. | 1222   // Set up frame pointer for the frame to be pushed. | 
| 1251   __ addi(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); | 1223   // Need to add kPointerSize, because sp has one extra | 
|  | 1224   // frame already for the frame type being pushed later. | 
|  | 1225   __ lay(fp, | 
|  | 1226          MemOperand(sp, -EntryFrameConstants::kCallerFPOffset + kPointerSize)); | 
| 1252 | 1227 | 
| 1253   // If this is the outermost JS call, set js_entry_sp value. | 1228   // If this is the outermost JS call, set js_entry_sp value. | 
| 1254   Label non_outermost_js; | 1229   Label non_outermost_js; | 
| 1255   ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate()); | 1230   ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate()); | 
| 1256   __ mov(r8, Operand(ExternalReference(js_entry_sp))); | 1231   __ mov(r7, Operand(ExternalReference(js_entry_sp))); | 
| 1257   __ LoadP(r9, MemOperand(r8)); | 1232   __ LoadAndTestP(r8, MemOperand(r7)); | 
| 1258   __ cmpi(r9, Operand::Zero()); | 1233   __ bne(&non_outermost_js, Label::kNear); | 
| 1259   __ bne(&non_outermost_js); | 1234   __ StoreP(fp, MemOperand(r7)); | 
| 1260   __ StoreP(fp, MemOperand(r8)); |  | 
| 1261   __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)); | 1235   __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)); | 
| 1262   Label cont; | 1236   Label cont; | 
| 1263   __ b(&cont); | 1237   __ b(&cont, Label::kNear); | 
| 1264   __ bind(&non_outermost_js); | 1238   __ bind(&non_outermost_js); | 
| 1265   __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)); | 1239   __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)); | 
|  | 1240 | 
| 1266   __ bind(&cont); | 1241   __ bind(&cont); | 
| 1267   __ push(ip);  // frame-type | 1242   __ StoreP(ip, MemOperand(sp));  // frame-type | 
| 1268 | 1243 | 
| 1269   // Jump to a faked try block that does the invoke, with a faked catch | 1244   // Jump to a faked try block that does the invoke, with a faked catch | 
| 1270   // block that sets the pending exception. | 1245   // block that sets the pending exception. | 
| 1271   __ b(&invoke); | 1246   __ b(&invoke, Label::kNear); | 
| 1272 | 1247 | 
| 1273   __ bind(&handler_entry); | 1248   __ bind(&handler_entry); | 
| 1274   handler_offset_ = handler_entry.pos(); | 1249   handler_offset_ = handler_entry.pos(); | 
| 1275   // Caught exception: Store result (exception) in the pending exception | 1250   // Caught exception: Store result (exception) in the pending exception | 
| 1276   // field in the JSEnv and return a failure sentinel.  Coming in here the | 1251   // field in the JSEnv and return a failure sentinel.  Coming in here the | 
| 1277   // fp will be invalid because the PushStackHandler below sets it to 0 to | 1252   // fp will be invalid because the PushStackHandler below sets it to 0 to | 
| 1278   // signal the existence of the JSEntry frame. | 1253   // signal the existence of the JSEntry frame. | 
| 1279   __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 1254   __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 
| 1280                                        isolate()))); | 1255                                        isolate()))); | 
| 1281 | 1256 | 
| 1282   __ StoreP(r3, MemOperand(ip)); | 1257   __ StoreP(r2, MemOperand(ip)); | 
| 1283   __ LoadRoot(r3, Heap::kExceptionRootIndex); | 1258   __ LoadRoot(r2, Heap::kExceptionRootIndex); | 
| 1284   __ b(&exit); | 1259   __ b(&exit, Label::kNear); | 
| 1285 | 1260 | 
| 1286   // Invoke: Link this frame into the handler chain. | 1261   // Invoke: Link this frame into the handler chain. | 
| 1287   __ bind(&invoke); | 1262   __ bind(&invoke); | 
| 1288   // Must preserve r3-r7. | 1263   // Must preserve r2-r6. | 
| 1289   __ PushStackHandler(); | 1264   __ PushStackHandler(); | 
| 1290   // If an exception not caught by another handler occurs, this handler | 1265   // If an exception not caught by another handler occurs, this handler | 
| 1291   // returns control to the code after the b(&invoke) above, which | 1266   // returns control to the code after the b(&invoke) above, which | 
| 1292   // restores all kCalleeSaved registers (including cp and fp) to their | 1267   // restores all kCalleeSaved registers (including cp and fp) to their | 
| 1293   // saved values before returning a failure to C. | 1268   // saved values before returning a failure to C. | 
| 1294 | 1269 | 
| 1295   // Clear any pending exceptions. | 1270   // Clear any pending exceptions. | 
| 1296   __ mov(r8, Operand(isolate()->factory()->the_hole_value())); |  | 
| 1297   __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 1271   __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 
| 1298                                        isolate()))); | 1272                                        isolate()))); | 
| 1299   __ StoreP(r8, MemOperand(ip)); | 1273   __ mov(r7, Operand(isolate()->factory()->the_hole_value())); | 
|  | 1274   __ StoreP(r7, MemOperand(ip)); | 
| 1300 | 1275 | 
| 1301   // Invoke the function by calling through JS entry trampoline builtin. | 1276   // Invoke the function by calling through JS entry trampoline builtin. | 
| 1302   // Notice that we cannot store a reference to the trampoline code directly in | 1277   // Notice that we cannot store a reference to the trampoline code directly in | 
| 1303   // this stub, because runtime stubs are not traversed when doing GC. | 1278   // this stub, because runtime stubs are not traversed when doing GC. | 
| 1304 | 1279 | 
| 1305   // Expected registers by Builtins::JSEntryTrampoline | 1280   // Expected registers by Builtins::JSEntryTrampoline | 
| 1306   // r3: code entry | 1281   // r2: code entry | 
| 1307   // r4: function | 1282   // r3: function | 
| 1308   // r5: receiver | 1283   // r4: receiver | 
| 1309   // r6: argc | 1284   // r5: argc | 
| 1310   // r7: argv | 1285   // r6: argv | 
| 1311   if (type() == StackFrame::ENTRY_CONSTRUCT) { | 1286   if (type() == StackFrame::ENTRY_CONSTRUCT) { | 
| 1312     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, | 1287     ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, | 
| 1313                                       isolate()); | 1288                                       isolate()); | 
| 1314     __ mov(ip, Operand(construct_entry)); | 1289     __ mov(ip, Operand(construct_entry)); | 
| 1315   } else { | 1290   } else { | 
| 1316     ExternalReference entry(Builtins::kJSEntryTrampoline, isolate()); | 1291     ExternalReference entry(Builtins::kJSEntryTrampoline, isolate()); | 
| 1317     __ mov(ip, Operand(entry)); | 1292     __ mov(ip, Operand(entry)); | 
| 1318   } | 1293   } | 
| 1319   __ LoadP(ip, MemOperand(ip));  // deref address | 1294   __ LoadP(ip, MemOperand(ip));  // deref address | 
| 1320 | 1295 | 
| 1321   // Branch and link to JSEntryTrampoline. | 1296   // Branch and link to JSEntryTrampoline. | 
| 1322   // the address points to the start of the code object, skip the header | 1297   // the address points to the start of the code object, skip the header | 
| 1323   __ addi(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); | 1298   __ AddP(ip, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
| 1324   __ mtctr(ip); | 1299   Label return_addr; | 
| 1325   __ bctrl();  // make the call | 1300   // __ basr(r14, ip); | 
|  | 1301   __ larl(r14, &return_addr); | 
|  | 1302   __ b(ip); | 
|  | 1303   __ bind(&return_addr); | 
| 1326 | 1304 | 
| 1327   // Unlink this frame from the handler chain. | 1305   // Unlink this frame from the handler chain. | 
| 1328   __ PopStackHandler(); | 1306   __ PopStackHandler(); | 
| 1329 | 1307 | 
| 1330   __ bind(&exit);  // r3 holds result | 1308   __ bind(&exit);  // r2 holds result | 
| 1331   // Check if the current stack frame is marked as the outermost JS frame. | 1309   // Check if the current stack frame is marked as the outermost JS frame. | 
| 1332   Label non_outermost_js_2; | 1310   Label non_outermost_js_2; | 
| 1333   __ pop(r8); | 1311   __ pop(r7); | 
| 1334   __ CmpSmiLiteral(r8, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME), r0); | 1312   __ CmpSmiLiteral(r7, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME), r0); | 
| 1335   __ bne(&non_outermost_js_2); | 1313   __ bne(&non_outermost_js_2, Label::kNear); | 
| 1336   __ mov(r9, Operand::Zero()); | 1314   __ mov(r8, Operand::Zero()); | 
| 1337   __ mov(r8, Operand(ExternalReference(js_entry_sp))); | 1315   __ mov(r7, Operand(ExternalReference(js_entry_sp))); | 
| 1338   __ StoreP(r9, MemOperand(r8)); | 1316   __ StoreP(r8, MemOperand(r7)); | 
| 1339   __ bind(&non_outermost_js_2); | 1317   __ bind(&non_outermost_js_2); | 
| 1340 | 1318 | 
| 1341   // Restore the top frame descriptors from the stack. | 1319   // Restore the top frame descriptors from the stack. | 
| 1342   __ pop(r6); | 1320   __ pop(r5); | 
| 1343   __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | 1321   __ mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | 
| 1344   __ StoreP(r6, MemOperand(ip)); | 1322   __ StoreP(r5, MemOperand(ip)); | 
| 1345 | 1323 | 
| 1346   // Reset the stack to the callee saved registers. | 1324   // Reset the stack to the callee saved registers. | 
| 1347   __ addi(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); | 1325   __ lay(sp, MemOperand(sp, -EntryFrameConstants::kCallerFPOffset)); | 
| 1348 | 1326 | 
| 1349   // Restore callee-saved double registers. | 1327   // Reload callee-saved preserved regs, return address reg (r14) and sp | 
| 1350   __ MultiPopDoubles(kCalleeSavedDoubles); | 1328   __ LoadMultipleP(r6, sp, MemOperand(sp, 0)); | 
|  | 1329   __ la(sp, MemOperand(sp, 10 * kPointerSize)); | 
| 1351 | 1330 | 
| 1352   // Restore callee-saved registers. | 1331 // saving floating point registers | 
| 1353   __ MultiPop(kCalleeSaved); | 1332 #if V8_HOST_ARCH_S390X | 
|  | 1333   // 64bit ABI requires f8 to f15 be saved | 
|  | 1334   __ ld(d8, MemOperand(sp)); | 
|  | 1335   __ ld(d9, MemOperand(sp, 1 * kDoubleSize)); | 
|  | 1336   __ ld(d10, MemOperand(sp, 2 * kDoubleSize)); | 
|  | 1337   __ ld(d11, MemOperand(sp, 3 * kDoubleSize)); | 
|  | 1338   __ ld(d12, MemOperand(sp, 4 * kDoubleSize)); | 
|  | 1339   __ ld(d13, MemOperand(sp, 5 * kDoubleSize)); | 
|  | 1340   __ ld(d14, MemOperand(sp, 6 * kDoubleSize)); | 
|  | 1341   __ ld(d15, MemOperand(sp, 7 * kDoubleSize)); | 
|  | 1342   __ la(sp, MemOperand(sp, 8 * kDoubleSize)); | 
|  | 1343 #else | 
|  | 1344   // 31bit ABI requires you to store f4 and f6: | 
|  | 1345   // http://refspecs.linuxbase.org/ELF/zSeries/lzsabi0_s390.html#AEN417 | 
|  | 1346   __ ld(d4, MemOperand(sp)); | 
|  | 1347   __ ld(d6, MemOperand(sp, kDoubleSize)); | 
|  | 1348   __ la(sp, MemOperand(sp, 2 * kDoubleSize)); | 
|  | 1349 #endif | 
| 1354 | 1350 | 
| 1355   // Return | 1351   __ b(r14); | 
| 1356   __ LoadP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize)); |  | 
| 1357   __ mtlr(r0); |  | 
| 1358   __ blr(); |  | 
| 1359 } | 1352 } | 
| 1360 | 1353 | 
| 1361 |  | 
| 1362 void InstanceOfStub::Generate(MacroAssembler* masm) { | 1354 void InstanceOfStub::Generate(MacroAssembler* masm) { | 
| 1363   Register const object = r4;              // Object (lhs). | 1355   Register const object = r3;              // Object (lhs). | 
| 1364   Register const function = r3;            // Function (rhs). | 1356   Register const function = r2;            // Function (rhs). | 
| 1365   Register const object_map = r5;          // Map of {object}. | 1357   Register const object_map = r4;          // Map of {object}. | 
| 1366   Register const function_map = r6;        // Map of {function}. | 1358   Register const function_map = r5;        // Map of {function}. | 
| 1367   Register const function_prototype = r7;  // Prototype of {function}. | 1359   Register const function_prototype = r6;  // Prototype of {function}. | 
| 1368   Register const scratch = r8; | 1360   Register const scratch = r7; | 
| 1369 | 1361 | 
| 1370   DCHECK(object.is(InstanceOfDescriptor::LeftRegister())); | 1362   DCHECK(object.is(InstanceOfDescriptor::LeftRegister())); | 
| 1371   DCHECK(function.is(InstanceOfDescriptor::RightRegister())); | 1363   DCHECK(function.is(InstanceOfDescriptor::RightRegister())); | 
| 1372 | 1364 | 
| 1373   // Check if {object} is a smi. | 1365   // Check if {object} is a smi. | 
| 1374   Label object_is_smi; | 1366   Label object_is_smi; | 
| 1375   __ JumpIfSmi(object, &object_is_smi); | 1367   __ JumpIfSmi(object, &object_is_smi); | 
| 1376 | 1368 | 
| 1377   // Lookup the {function} and the {object} map in the global instanceof cache. | 1369   // Lookup the {function} and the {object} map in the global instanceof cache. | 
| 1378   // Note: This is safe because we clear the global instanceof cache whenever | 1370   // Note: This is safe because we clear the global instanceof cache whenever | 
| 1379   // we change the prototype of any object. | 1371   // we change the prototype of any object. | 
| 1380   Label fast_case, slow_case; | 1372   Label fast_case, slow_case; | 
| 1381   __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); | 1373   __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); | 
| 1382   __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex); | 1374   __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex); | 
| 1383   __ bne(&fast_case); | 1375   __ bne(&fast_case); | 
| 1384   __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex); | 1376   __ CompareRoot(object_map, Heap::kInstanceofCacheMapRootIndex); | 
| 1385   __ bne(&fast_case); | 1377   __ bne(&fast_case); | 
| 1386   __ LoadRoot(r3, Heap::kInstanceofCacheAnswerRootIndex); | 1378   __ LoadRoot(r2, Heap::kInstanceofCacheAnswerRootIndex); | 
| 1387   __ Ret(); | 1379   __ Ret(); | 
| 1388 | 1380 | 
| 1389   // If {object} is a smi we can safely return false if {function} is a JS | 1381   // If {object} is a smi we can safely return false if {function} is a JS | 
| 1390   // function, otherwise we have to miss to the runtime and throw an exception. | 1382   // function, otherwise we have to miss to the runtime and throw an exception. | 
| 1391   __ bind(&object_is_smi); | 1383   __ bind(&object_is_smi); | 
| 1392   __ JumpIfSmi(function, &slow_case); | 1384   __ JumpIfSmi(function, &slow_case); | 
| 1393   __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE); | 1385   __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE); | 
| 1394   __ bne(&slow_case); | 1386   __ bne(&slow_case); | 
| 1395   __ LoadRoot(r3, Heap::kFalseValueRootIndex); | 1387   __ LoadRoot(r2, Heap::kFalseValueRootIndex); | 
| 1396   __ Ret(); | 1388   __ Ret(); | 
| 1397 | 1389 | 
| 1398   // Fast-case: The {function} must be a valid JSFunction. | 1390   // Fast-case: The {function} must be a valid JSFunction. | 
| 1399   __ bind(&fast_case); | 1391   __ bind(&fast_case); | 
| 1400   __ JumpIfSmi(function, &slow_case); | 1392   __ JumpIfSmi(function, &slow_case); | 
| 1401   __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE); | 1393   __ CompareObjectType(function, function_map, scratch, JS_FUNCTION_TYPE); | 
| 1402   __ bne(&slow_case); | 1394   __ bne(&slow_case); | 
| 1403 | 1395 | 
| 1404   // Ensure that {function} has an instance prototype. | 1396   // Ensure that {function} has an instance prototype. | 
| 1405   __ lbz(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset)); | 1397   __ LoadlB(scratch, FieldMemOperand(function_map, Map::kBitFieldOffset)); | 
| 1406   __ TestBit(scratch, Map::kHasNonInstancePrototype, r0); | 1398   __ TestBit(scratch, Map::kHasNonInstancePrototype, r0); | 
| 1407   __ bne(&slow_case, cr0); | 1399   __ bne(&slow_case); | 
| 1408 | 1400 | 
| 1409   // Get the "prototype" (or initial map) of the {function}. | 1401   // Get the "prototype" (or initial map) of the {function}. | 
| 1410   __ LoadP(function_prototype, | 1402   __ LoadP(function_prototype, | 
| 1411            FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 1403            FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 
| 1412   __ AssertNotSmi(function_prototype); | 1404   __ AssertNotSmi(function_prototype); | 
| 1413 | 1405 | 
| 1414   // Resolve the prototype if the {function} has an initial map.  Afterwards the | 1406   // Resolve the prototype if the {function} has an initial map.  Afterwards the | 
| 1415   // {function_prototype} will be either the JSReceiver prototype object or the | 1407   // {function_prototype} will be either the JSReceiver prototype object or the | 
| 1416   // hole value, which means that no instances of the {function} were created so | 1408   // hole value, which means that no instances of the {function} were created so | 
| 1417   // far and hence we should return false. | 1409   // far and hence we should return false. | 
| 1418   Label function_prototype_valid; | 1410   Label function_prototype_valid; | 
| 1419   __ CompareObjectType(function_prototype, scratch, scratch, MAP_TYPE); | 1411   __ CompareObjectType(function_prototype, scratch, scratch, MAP_TYPE); | 
| 1420   __ bne(&function_prototype_valid); | 1412   __ bne(&function_prototype_valid); | 
| 1421   __ LoadP(function_prototype, | 1413   __ LoadP(function_prototype, | 
| 1422            FieldMemOperand(function_prototype, Map::kPrototypeOffset)); | 1414            FieldMemOperand(function_prototype, Map::kPrototypeOffset)); | 
| 1423   __ bind(&function_prototype_valid); | 1415   __ bind(&function_prototype_valid); | 
| 1424   __ AssertNotSmi(function_prototype); | 1416   __ AssertNotSmi(function_prototype); | 
| 1425 | 1417 | 
| 1426   // Update the global instanceof cache with the current {object} map and | 1418   // Update the global instanceof cache with the current {object} map and | 
| 1427   // {function}.  The cached answer will be set when it is known below. | 1419   // {function}.  The cached answer will be set when it is known below. | 
| 1428   __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); | 1420   __ StoreRoot(function, Heap::kInstanceofCacheFunctionRootIndex); | 
| 1429   __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex); | 1421   __ StoreRoot(object_map, Heap::kInstanceofCacheMapRootIndex); | 
| 1430 | 1422 | 
| 1431   // Loop through the prototype chain looking for the {function} prototype. | 1423   // Loop through the prototype chain looking for the {function} prototype. | 
| 1432   // Assume true, and change to false if not found. | 1424   // Assume true, and change to false if not found. | 
| 1433   Register const object_instance_type = function_map; | 1425   Register const object_instance_type = function_map; | 
| 1434   Register const map_bit_field = function_map; | 1426   Register const map_bit_field = function_map; | 
| 1435   Register const null = scratch; | 1427   Register const null = scratch; | 
| 1436   Register const result = r3; | 1428   Register const result = r2; | 
| 1437 | 1429 | 
| 1438   Label done, loop, fast_runtime_fallback; | 1430   Label done, loop, fast_runtime_fallback; | 
| 1439   __ LoadRoot(result, Heap::kTrueValueRootIndex); | 1431   __ LoadRoot(result, Heap::kTrueValueRootIndex); | 
| 1440   __ LoadRoot(null, Heap::kNullValueRootIndex); | 1432   __ LoadRoot(null, Heap::kNullValueRootIndex); | 
| 1441   __ bind(&loop); | 1433   __ bind(&loop); | 
| 1442 | 1434 | 
| 1443   // Check if the object needs to be access checked. | 1435   // Check if the object needs to be access checked. | 
| 1444   __ lbz(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset)); | 1436   __ LoadlB(map_bit_field, FieldMemOperand(object_map, Map::kBitFieldOffset)); | 
| 1445   __ TestBit(map_bit_field, Map::kIsAccessCheckNeeded, r0); | 1437   __ TestBit(map_bit_field, Map::kIsAccessCheckNeeded, r0); | 
| 1446   __ bne(&fast_runtime_fallback, cr0); | 1438   __ bne(&fast_runtime_fallback); | 
| 1447   // Check if the current object is a Proxy. | 1439   // Check if the current object is a Proxy. | 
| 1448   __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE); | 1440   __ CompareInstanceType(object_map, object_instance_type, JS_PROXY_TYPE); | 
| 1449   __ beq(&fast_runtime_fallback); | 1441   __ beq(&fast_runtime_fallback); | 
| 1450 | 1442 | 
| 1451   __ LoadP(object, FieldMemOperand(object_map, Map::kPrototypeOffset)); | 1443   __ LoadP(object, FieldMemOperand(object_map, Map::kPrototypeOffset)); | 
| 1452   __ cmp(object, function_prototype); | 1444   __ CmpP(object, function_prototype); | 
| 1453   __ beq(&done); | 1445   __ beq(&done); | 
| 1454   __ cmp(object, null); | 1446   __ CmpP(object, null); | 
| 1455   __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); | 1447   __ LoadP(object_map, FieldMemOperand(object, HeapObject::kMapOffset)); | 
| 1456   __ bne(&loop); | 1448   __ bne(&loop); | 
| 1457   __ LoadRoot(result, Heap::kFalseValueRootIndex); | 1449   __ LoadRoot(result, Heap::kFalseValueRootIndex); | 
| 1458   __ bind(&done); | 1450   __ bind(&done); | 
| 1459   __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex); | 1451   __ StoreRoot(result, Heap::kInstanceofCacheAnswerRootIndex); | 
| 1460   __ Ret(); | 1452   __ Ret(); | 
| 1461 | 1453 | 
| 1462   // Found Proxy or access check needed: Call the runtime | 1454   // Found Proxy or access check needed: Call the runtime | 
| 1463   __ bind(&fast_runtime_fallback); | 1455   __ bind(&fast_runtime_fallback); | 
| 1464   __ Push(object, function_prototype); | 1456   __ Push(object, function_prototype); | 
| 1465   // Invalidate the instanceof cache. | 1457   // Invalidate the instanceof cache. | 
| 1466   __ LoadSmiLiteral(scratch, Smi::FromInt(0)); | 1458   __ LoadSmiLiteral(scratch, Smi::FromInt(0)); | 
| 1467   __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex); | 1459   __ StoreRoot(scratch, Heap::kInstanceofCacheFunctionRootIndex); | 
| 1468   __ TailCallRuntime(Runtime::kHasInPrototypeChain); | 1460   __ TailCallRuntime(Runtime::kHasInPrototypeChain); | 
| 1469 | 1461 | 
| 1470   // Slow-case: Call the %InstanceOf runtime function. | 1462   // Slow-case: Call the %InstanceOf runtime function. | 
| 1471   __ bind(&slow_case); | 1463   __ bind(&slow_case); | 
| 1472   __ Push(object, function); | 1464   __ Push(object, function); | 
| 1473   __ TailCallRuntime(Runtime::kInstanceOf); | 1465   __ TailCallRuntime(Runtime::kInstanceOf); | 
| 1474 } | 1466 } | 
| 1475 | 1467 | 
| 1476 |  | 
| 1477 void FunctionPrototypeStub::Generate(MacroAssembler* masm) { | 1468 void FunctionPrototypeStub::Generate(MacroAssembler* masm) { | 
| 1478   Label miss; | 1469   Label miss; | 
| 1479   Register receiver = LoadDescriptor::ReceiverRegister(); | 1470   Register receiver = LoadDescriptor::ReceiverRegister(); | 
| 1480   // Ensure that the vector and slot registers won't be clobbered before | 1471   // Ensure that the vector and slot registers won't be clobbered before | 
| 1481   // calling the miss handler. | 1472   // calling the miss handler. | 
| 1482   DCHECK(!AreAliased(r7, r8, LoadWithVectorDescriptor::VectorRegister(), | 1473   DCHECK(!AreAliased(r6, r7, LoadWithVectorDescriptor::VectorRegister(), | 
| 1483                      LoadWithVectorDescriptor::SlotRegister())); | 1474                      LoadWithVectorDescriptor::SlotRegister())); | 
| 1484 | 1475 | 
| 1485   NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r7, | 1476   NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r6, | 
| 1486                                                           r8, &miss); | 1477                                                           r7, &miss); | 
| 1487   __ bind(&miss); | 1478   __ bind(&miss); | 
| 1488   PropertyAccessCompiler::TailCallBuiltin( | 1479   PropertyAccessCompiler::TailCallBuiltin( | 
| 1489       masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC)); | 1480       masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC)); | 
| 1490 } | 1481 } | 
| 1491 | 1482 | 
| 1492 |  | 
| 1493 void LoadIndexedStringStub::Generate(MacroAssembler* masm) { | 1483 void LoadIndexedStringStub::Generate(MacroAssembler* masm) { | 
| 1494   // Return address is in lr. | 1484   // Return address is in lr. | 
| 1495   Label miss; | 1485   Label miss; | 
| 1496 | 1486 | 
| 1497   Register receiver = LoadDescriptor::ReceiverRegister(); | 1487   Register receiver = LoadDescriptor::ReceiverRegister(); | 
| 1498   Register index = LoadDescriptor::NameRegister(); | 1488   Register index = LoadDescriptor::NameRegister(); | 
| 1499   Register scratch = r8; | 1489   Register scratch = r7; | 
| 1500   Register result = r3; | 1490   Register result = r2; | 
| 1501   DCHECK(!scratch.is(receiver) && !scratch.is(index)); | 1491   DCHECK(!scratch.is(receiver) && !scratch.is(index)); | 
| 1502   DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) && | 1492   DCHECK(!scratch.is(LoadWithVectorDescriptor::VectorRegister()) && | 
| 1503          result.is(LoadWithVectorDescriptor::SlotRegister())); | 1493          result.is(LoadWithVectorDescriptor::SlotRegister())); | 
| 1504 | 1494 | 
| 1505   // StringCharAtGenerator doesn't use the result register until it's passed | 1495   // StringCharAtGenerator doesn't use the result register until it's passed | 
| 1506   // the different miss possibilities. If it did, we would have a conflict | 1496   // the different miss possibilities. If it did, we would have a conflict | 
| 1507   // when FLAG_vector_ics is true. | 1497   // when FLAG_vector_ics is true. | 
| 1508   StringCharAtGenerator char_at_generator(receiver, index, scratch, result, | 1498   StringCharAtGenerator char_at_generator(receiver, index, scratch, result, | 
| 1509                                           &miss,  // When not a string. | 1499                                           &miss,  // When not a string. | 
| 1510                                           &miss,  // When not a number. | 1500                                           &miss,  // When not a number. | 
| 1511                                           &miss,  // When index out of range. | 1501                                           &miss,  // When index out of range. | 
| 1512                                           STRING_INDEX_IS_ARRAY_INDEX, | 1502                                           STRING_INDEX_IS_ARRAY_INDEX, | 
| 1513                                           RECEIVER_IS_STRING); | 1503                                           RECEIVER_IS_STRING); | 
| 1514   char_at_generator.GenerateFast(masm); | 1504   char_at_generator.GenerateFast(masm); | 
| 1515   __ Ret(); | 1505   __ Ret(); | 
| 1516 | 1506 | 
| 1517   StubRuntimeCallHelper call_helper; | 1507   StubRuntimeCallHelper call_helper; | 
| 1518   char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper); | 1508   char_at_generator.GenerateSlow(masm, PART_OF_IC_HANDLER, call_helper); | 
| 1519 | 1509 | 
| 1520   __ bind(&miss); | 1510   __ bind(&miss); | 
| 1521   PropertyAccessCompiler::TailCallBuiltin( | 1511   PropertyAccessCompiler::TailCallBuiltin( | 
| 1522       masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC)); | 1512       masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC)); | 
| 1523 } | 1513 } | 
| 1524 | 1514 | 
| 1525 |  | 
| 1526 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) { | 1515 void LoadIndexedInterceptorStub::Generate(MacroAssembler* masm) { | 
| 1527   // Return address is in lr. | 1516   // Return address is in lr. | 
| 1528   Label slow; | 1517   Label slow; | 
| 1529 | 1518 | 
| 1530   Register receiver = LoadDescriptor::ReceiverRegister(); | 1519   Register receiver = LoadDescriptor::ReceiverRegister(); | 
| 1531   Register key = LoadDescriptor::NameRegister(); | 1520   Register key = LoadDescriptor::NameRegister(); | 
| 1532 | 1521 | 
| 1533   // Check that the key is an array index, that is Uint32. | 1522   // Check that the key is an array index, that is Uint32. | 
| 1534   __ TestIfPositiveSmi(key, r0); | 1523   __ TestIfPositiveSmi(key, r0); | 
| 1535   __ bne(&slow, cr0); | 1524   __ bne(&slow); | 
| 1536 | 1525 | 
| 1537   // Everything is fine, call runtime. | 1526   // Everything is fine, call runtime. | 
| 1538   __ Push(receiver, key);  // Receiver, key. | 1527   __ Push(receiver, key);  // Receiver, key. | 
| 1539 | 1528 | 
| 1540   // Perform tail call to the entry. | 1529   // Perform tail call to the entry. | 
| 1541   __ TailCallRuntime(Runtime::kLoadElementWithInterceptor); | 1530   __ TailCallRuntime(Runtime::kLoadElementWithInterceptor); | 
| 1542 | 1531 | 
| 1543   __ bind(&slow); | 1532   __ bind(&slow); | 
| 1544   PropertyAccessCompiler::TailCallBuiltin( | 1533   PropertyAccessCompiler::TailCallBuiltin( | 
| 1545       masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC)); | 1534       masm, PropertyAccessCompiler::MissBuiltin(Code::KEYED_LOAD_IC)); | 
| 1546 } | 1535 } | 
| 1547 | 1536 | 
| 1548 |  | 
| 1549 void RegExpExecStub::Generate(MacroAssembler* masm) { | 1537 void RegExpExecStub::Generate(MacroAssembler* masm) { | 
| 1550 // Just jump directly to runtime if native RegExp is not selected at compile | 1538 // Just jump directly to runtime if native RegExp is not selected at compile | 
| 1551 // time or if regexp entry in generated code is turned off runtime switch or | 1539 // time or if regexp entry in generated code is turned off runtime switch or | 
| 1552 // at compilation. | 1540 // at compilation. | 
| 1553 #ifdef V8_INTERPRETED_REGEXP | 1541 #ifdef V8_INTERPRETED_REGEXP | 
| 1554   __ TailCallRuntime(Runtime::kRegExpExec); | 1542   __ TailCallRuntime(Runtime::kRegExpExec); | 
| 1555 #else  // V8_INTERPRETED_REGEXP | 1543 #else   // V8_INTERPRETED_REGEXP | 
| 1556 | 1544 | 
| 1557   // Stack frame on entry. | 1545   // Stack frame on entry. | 
| 1558   //  sp[0]: last_match_info (expected JSArray) | 1546   //  sp[0]: last_match_info (expected JSArray) | 
| 1559   //  sp[4]: previous index | 1547   //  sp[4]: previous index | 
| 1560   //  sp[8]: subject string | 1548   //  sp[8]: subject string | 
| 1561   //  sp[12]: JSRegExp object | 1549   //  sp[12]: JSRegExp object | 
| 1562 | 1550 | 
| 1563   const int kLastMatchInfoOffset = 0 * kPointerSize; | 1551   const int kLastMatchInfoOffset = 0 * kPointerSize; | 
| 1564   const int kPreviousIndexOffset = 1 * kPointerSize; | 1552   const int kPreviousIndexOffset = 1 * kPointerSize; | 
| 1565   const int kSubjectOffset = 2 * kPointerSize; | 1553   const int kSubjectOffset = 2 * kPointerSize; | 
| 1566   const int kJSRegExpOffset = 3 * kPointerSize; | 1554   const int kJSRegExpOffset = 3 * kPointerSize; | 
| 1567 | 1555 | 
| 1568   Label runtime, br_over, encoding_type_UC16; | 1556   Label runtime, br_over, encoding_type_UC16; | 
| 1569 | 1557 | 
| 1570   // Allocation of registers for this function. These are in callee save | 1558   // Allocation of registers for this function. These are in callee save | 
| 1571   // registers and will be preserved by the call to the native RegExp code, as | 1559   // registers and will be preserved by the call to the native RegExp code, as | 
| 1572   // this code is called using the normal C calling convention. When calling | 1560   // this code is called using the normal C calling convention. When calling | 
| 1573   // directly from generated code the native RegExp code will not do a GC and | 1561   // directly from generated code the native RegExp code will not do a GC and | 
| 1574   // therefore the content of these registers are safe to use after the call. | 1562   // therefore the content of these registers are safe to use after the call. | 
| 1575   Register subject = r14; | 1563   Register subject = r6; | 
| 1576   Register regexp_data = r15; | 1564   Register regexp_data = r7; | 
| 1577   Register last_match_info_elements = r16; | 1565   Register last_match_info_elements = r8; | 
| 1578   Register code = r17; | 1566   Register code = r9; | 
|  | 1567 | 
|  | 1568   __ CleanseP(r14); | 
| 1579 | 1569 | 
| 1580   // Ensure register assigments are consistent with callee save masks | 1570   // Ensure register assigments are consistent with callee save masks | 
| 1581   DCHECK(subject.bit() & kCalleeSaved); | 1571   DCHECK(subject.bit() & kCalleeSaved); | 
| 1582   DCHECK(regexp_data.bit() & kCalleeSaved); | 1572   DCHECK(regexp_data.bit() & kCalleeSaved); | 
| 1583   DCHECK(last_match_info_elements.bit() & kCalleeSaved); | 1573   DCHECK(last_match_info_elements.bit() & kCalleeSaved); | 
| 1584   DCHECK(code.bit() & kCalleeSaved); | 1574   DCHECK(code.bit() & kCalleeSaved); | 
| 1585 | 1575 | 
| 1586   // Ensure that a RegExp stack is allocated. | 1576   // Ensure that a RegExp stack is allocated. | 
| 1587   ExternalReference address_of_regexp_stack_memory_address = | 1577   ExternalReference address_of_regexp_stack_memory_address = | 
| 1588       ExternalReference::address_of_regexp_stack_memory_address(isolate()); | 1578       ExternalReference::address_of_regexp_stack_memory_address(isolate()); | 
| 1589   ExternalReference address_of_regexp_stack_memory_size = | 1579   ExternalReference address_of_regexp_stack_memory_size = | 
| 1590       ExternalReference::address_of_regexp_stack_memory_size(isolate()); | 1580       ExternalReference::address_of_regexp_stack_memory_size(isolate()); | 
| 1591   __ mov(r3, Operand(address_of_regexp_stack_memory_size)); | 1581   __ mov(r2, Operand(address_of_regexp_stack_memory_size)); | 
| 1592   __ LoadP(r3, MemOperand(r3, 0)); | 1582   __ LoadAndTestP(r2, MemOperand(r2)); | 
| 1593   __ cmpi(r3, Operand::Zero()); |  | 
| 1594   __ beq(&runtime); | 1583   __ beq(&runtime); | 
| 1595 | 1584 | 
| 1596   // Check that the first argument is a JSRegExp object. | 1585   // Check that the first argument is a JSRegExp object. | 
| 1597   __ LoadP(r3, MemOperand(sp, kJSRegExpOffset)); | 1586   __ LoadP(r2, MemOperand(sp, kJSRegExpOffset)); | 
| 1598   __ JumpIfSmi(r3, &runtime); | 1587   __ JumpIfSmi(r2, &runtime); | 
| 1599   __ CompareObjectType(r3, r4, r4, JS_REGEXP_TYPE); | 1588   __ CompareObjectType(r2, r3, r3, JS_REGEXP_TYPE); | 
| 1600   __ bne(&runtime); | 1589   __ bne(&runtime); | 
| 1601 | 1590 | 
| 1602   // Check that the RegExp has been compiled (data contains a fixed array). | 1591   // Check that the RegExp has been compiled (data contains a fixed array). | 
| 1603   __ LoadP(regexp_data, FieldMemOperand(r3, JSRegExp::kDataOffset)); | 1592   __ LoadP(regexp_data, FieldMemOperand(r2, JSRegExp::kDataOffset)); | 
| 1604   if (FLAG_debug_code) { | 1593   if (FLAG_debug_code) { | 
| 1605     __ TestIfSmi(regexp_data, r0); | 1594     __ TestIfSmi(regexp_data); | 
| 1606     __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected, cr0); | 1595     __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected, cr0); | 
| 1607     __ CompareObjectType(regexp_data, r3, r3, FIXED_ARRAY_TYPE); | 1596     __ CompareObjectType(regexp_data, r2, r2, FIXED_ARRAY_TYPE); | 
| 1608     __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected); | 1597     __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected); | 
| 1609   } | 1598   } | 
| 1610 | 1599 | 
| 1611   // regexp_data: RegExp data (FixedArray) | 1600   // regexp_data: RegExp data (FixedArray) | 
| 1612   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. | 1601   // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. | 
| 1613   __ LoadP(r3, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); | 1602   __ LoadP(r2, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); | 
| 1614   // DCHECK(Smi::FromInt(JSRegExp::IRREGEXP) < (char *)0xffffu); | 1603   // DCHECK(Smi::FromInt(JSRegExp::IRREGEXP) < (char *)0xffffu); | 
| 1615   __ CmpSmiLiteral(r3, Smi::FromInt(JSRegExp::IRREGEXP), r0); | 1604   __ CmpSmiLiteral(r2, Smi::FromInt(JSRegExp::IRREGEXP), r0); | 
| 1616   __ bne(&runtime); | 1605   __ bne(&runtime); | 
| 1617 | 1606 | 
| 1618   // regexp_data: RegExp data (FixedArray) | 1607   // regexp_data: RegExp data (FixedArray) | 
| 1619   // Check that the number of captures fit in the static offsets vector buffer. | 1608   // Check that the number of captures fit in the static offsets vector buffer. | 
| 1620   __ LoadP(r5, | 1609   __ LoadP(r4, | 
| 1621            FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); | 1610            FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); | 
| 1622   // Check (number_of_captures + 1) * 2 <= offsets vector size | 1611   // Check (number_of_captures + 1) * 2 <= offsets vector size | 
| 1623   // Or          number_of_captures * 2 <= offsets vector size - 2 | 1612   // Or          number_of_captures * 2 <= offsets vector size - 2 | 
| 1624   // SmiToShortArrayOffset accomplishes the multiplication by 2 and | 1613   // SmiToShortArrayOffset accomplishes the multiplication by 2 and | 
| 1625   // SmiUntag (which is a nop for 32-bit). | 1614   // SmiUntag (which is a nop for 32-bit). | 
| 1626   __ SmiToShortArrayOffset(r5, r5); | 1615   __ SmiToShortArrayOffset(r4, r4); | 
| 1627   STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2); | 1616   STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2); | 
| 1628   __ cmpli(r5, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2)); | 1617   __ CmpLogicalP(r4, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2)); | 
| 1629   __ bgt(&runtime); | 1618   __ bgt(&runtime); | 
| 1630 | 1619 | 
| 1631   // Reset offset for possibly sliced string. | 1620   // Reset offset for possibly sliced string. | 
| 1632   __ li(r11, Operand::Zero()); | 1621   __ LoadImmP(ip, Operand::Zero()); | 
| 1633   __ LoadP(subject, MemOperand(sp, kSubjectOffset)); | 1622   __ LoadP(subject, MemOperand(sp, kSubjectOffset)); | 
| 1634   __ JumpIfSmi(subject, &runtime); | 1623   __ JumpIfSmi(subject, &runtime); | 
| 1635   __ mr(r6, subject);  // Make a copy of the original subject string. | 1624   __ LoadRR(r5, subject);  // Make a copy of the original subject string. | 
| 1636   __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset)); | 1625   __ LoadP(r2, FieldMemOperand(subject, HeapObject::kMapOffset)); | 
| 1637   __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset)); | 1626   __ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset)); | 
| 1638   // subject: subject string | 1627   // subject: subject string | 
| 1639   // r6: subject string | 1628   // r5: subject string | 
| 1640   // r3: subject string instance type | 1629   // r2: subject string instance type | 
| 1641   // regexp_data: RegExp data (FixedArray) | 1630   // regexp_data: RegExp data (FixedArray) | 
| 1642   // Handle subject string according to its encoding and representation: | 1631   // Handle subject string according to its encoding and representation: | 
| 1643   // (1) Sequential string?  If yes, go to (5). | 1632   // (1) Sequential string?  If yes, go to (5). | 
| 1644   // (2) Anything but sequential or cons?  If yes, go to (6). | 1633   // (2) Anything but sequential or cons?  If yes, go to (6). | 
| 1645   // (3) Cons string.  If the string is flat, replace subject with first string. | 1634   // (3) Cons string.  If the string is flat, replace subject with first string. | 
| 1646   //     Otherwise bailout. | 1635   //     Otherwise bailout. | 
| 1647   // (4) Is subject external?  If yes, go to (7). | 1636   // (4) Is subject external?  If yes, go to (7). | 
| 1648   // (5) Sequential string.  Load regexp code according to encoding. | 1637   // (5) Sequential string.  Load regexp code according to encoding. | 
| 1649   // (E) Carry on. | 1638   // (E) Carry on. | 
| 1650   /// [...] | 1639   /// [...] | 
| 1651 | 1640 | 
| 1652   // Deferred code at the end of the stub: | 1641   // Deferred code at the end of the stub: | 
| 1653   // (6) Not a long external string?  If yes, go to (8). | 1642   // (6) Not a long external string?  If yes, go to (8). | 
| 1654   // (7) External string.  Make it, offset-wise, look like a sequential string. | 1643   // (7) External string.  Make it, offset-wise, look like a sequential string. | 
| 1655   //     Go to (5). | 1644   //     Go to (5). | 
| 1656   // (8) Short external string or not a string?  If yes, bail out to runtime. | 1645   // (8) Short external string or not a string?  If yes, bail out to runtime. | 
| 1657   // (9) Sliced string.  Replace subject with parent.  Go to (4). | 1646   // (9) Sliced string.  Replace subject with parent.  Go to (4). | 
| 1658 | 1647 | 
| 1659   Label seq_string /* 5 */, external_string /* 7 */, check_underlying /* 4 */, | 1648   Label seq_string /* 5 */, external_string /* 7 */, check_underlying /* 4 */, | 
| 1660       not_seq_nor_cons /* 6 */, not_long_external /* 8 */; | 1649       not_seq_nor_cons /* 6 */, not_long_external /* 8 */; | 
| 1661 | 1650 | 
| 1662   // (1) Sequential string?  If yes, go to (5). | 1651   // (1) Sequential string?  If yes, go to (5). | 
| 1663   STATIC_ASSERT((kIsNotStringMask | kStringRepresentationMask | | 1652   STATIC_ASSERT((kIsNotStringMask | kStringRepresentationMask | | 
| 1664                  kShortExternalStringMask) == 0x93); | 1653                  kShortExternalStringMask) == 0x93); | 
| 1665   __ andi(r4, r3, Operand(kIsNotStringMask | kStringRepresentationMask | | 1654   __ mov(r3, Operand(kIsNotStringMask | kStringRepresentationMask | | 
| 1666                           kShortExternalStringMask)); | 1655                      kShortExternalStringMask)); | 
|  | 1656   __ AndP(r3, r2); | 
| 1667   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); | 1657   STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); | 
| 1668   __ beq(&seq_string, cr0);  // Go to (5). | 1658   __ beq(&seq_string);  // Go to (5). | 
| 1669 | 1659 | 
| 1670   // (2) Anything but sequential or cons?  If yes, go to (6). | 1660   // (2) Anything but sequential or cons?  If yes, go to (6). | 
| 1671   STATIC_ASSERT(kConsStringTag < kExternalStringTag); | 1661   STATIC_ASSERT(kConsStringTag < kExternalStringTag); | 
| 1672   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); | 1662   STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); | 
| 1673   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); | 1663   STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); | 
| 1674   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); | 1664   STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); | 
| 1675   STATIC_ASSERT(kExternalStringTag < 0xffffu); | 1665   STATIC_ASSERT(kExternalStringTag < 0xffffu); | 
| 1676   __ cmpi(r4, Operand(kExternalStringTag)); | 1666   __ CmpP(r3, Operand(kExternalStringTag)); | 
| 1677   __ bge(¬_seq_nor_cons);  // Go to (6). | 1667   __ bge(¬_seq_nor_cons);  // Go to (6). | 
| 1678 | 1668 | 
| 1679   // (3) Cons string.  Check that it's flat. | 1669   // (3) Cons string.  Check that it's flat. | 
| 1680   // Replace subject with first string and reload instance type. | 1670   // Replace subject with first string and reload instance type. | 
| 1681   __ LoadP(r3, FieldMemOperand(subject, ConsString::kSecondOffset)); | 1671   __ LoadP(r2, FieldMemOperand(subject, ConsString::kSecondOffset)); | 
| 1682   __ CompareRoot(r3, Heap::kempty_stringRootIndex); | 1672   __ CompareRoot(r2, Heap::kempty_stringRootIndex); | 
| 1683   __ bne(&runtime); | 1673   __ bne(&runtime); | 
| 1684   __ LoadP(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); | 1674   __ LoadP(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); | 
| 1685 | 1675 | 
| 1686   // (4) Is subject external?  If yes, go to (7). | 1676   // (4) Is subject external?  If yes, go to (7). | 
| 1687   __ bind(&check_underlying); | 1677   __ bind(&check_underlying); | 
| 1688   __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset)); | 1678   __ LoadP(r2, FieldMemOperand(subject, HeapObject::kMapOffset)); | 
| 1689   __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset)); | 1679   __ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset)); | 
| 1690   STATIC_ASSERT(kSeqStringTag == 0); | 1680   STATIC_ASSERT(kSeqStringTag == 0); | 
| 1691   STATIC_ASSERT(kStringRepresentationMask == 3); | 1681   STATIC_ASSERT(kStringRepresentationMask == 3); | 
| 1692   __ andi(r0, r3, Operand(kStringRepresentationMask)); | 1682   __ tmll(r2, Operand(kStringRepresentationMask)); | 
| 1693   // The underlying external string is never a short external string. | 1683   // The underlying external string is never a short external string. | 
| 1694   STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength); | 1684   STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength); | 
| 1695   STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength); | 1685   STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength); | 
| 1696   __ bne(&external_string, cr0);  // Go to (7). | 1686   __ bne(&external_string);  // Go to (7). | 
| 1697 | 1687 | 
| 1698   // (5) Sequential string.  Load regexp code according to encoding. | 1688   // (5) Sequential string.  Load regexp code according to encoding. | 
| 1699   __ bind(&seq_string); | 1689   __ bind(&seq_string); | 
| 1700   // subject: sequential subject string (or look-alike, external string) | 1690   // subject: sequential subject string (or look-alike, external string) | 
| 1701   // r6: original subject string | 1691   // r5: original subject string | 
| 1702   // Load previous index and check range before r6 is overwritten.  We have to | 1692   // Load previous index and check range before r5 is overwritten.  We have to | 
| 1703   // use r6 instead of subject here because subject might have been only made | 1693   // use r5 instead of subject here because subject might have been only made | 
| 1704   // to look like a sequential string when it actually is an external string. | 1694   // to look like a sequential string when it actually is an external string. | 
| 1705   __ LoadP(r4, MemOperand(sp, kPreviousIndexOffset)); | 1695   __ LoadP(r3, MemOperand(sp, kPreviousIndexOffset)); | 
| 1706   __ JumpIfNotSmi(r4, &runtime); | 1696   __ JumpIfNotSmi(r3, &runtime); | 
| 1707   __ LoadP(r6, FieldMemOperand(r6, String::kLengthOffset)); | 1697   __ LoadP(r5, FieldMemOperand(r5, String::kLengthOffset)); | 
| 1708   __ cmpl(r6, r4); | 1698   __ CmpLogicalP(r5, r3); | 
| 1709   __ ble(&runtime); | 1699   __ ble(&runtime); | 
| 1710   __ SmiUntag(r4); | 1700   __ SmiUntag(r3); | 
| 1711 | 1701 | 
| 1712   STATIC_ASSERT(4 == kOneByteStringTag); | 1702   STATIC_ASSERT(4 == kOneByteStringTag); | 
| 1713   STATIC_ASSERT(kTwoByteStringTag == 0); | 1703   STATIC_ASSERT(kTwoByteStringTag == 0); | 
| 1714   STATIC_ASSERT(kStringEncodingMask == 4); | 1704   STATIC_ASSERT(kStringEncodingMask == 4); | 
| 1715   __ ExtractBitMask(r6, r3, kStringEncodingMask, SetRC); | 1705   __ ExtractBitMask(r5, r2, kStringEncodingMask, SetRC); | 
| 1716   __ beq(&encoding_type_UC16, cr0); | 1706   __ beq(&encoding_type_UC16, Label::kNear); | 
| 1717   __ LoadP(code, | 1707   __ LoadP(code, | 
| 1718            FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset)); | 1708            FieldMemOperand(regexp_data, JSRegExp::kDataOneByteCodeOffset)); | 
| 1719   __ b(&br_over); | 1709   __ b(&br_over, Label::kNear); | 
| 1720   __ bind(&encoding_type_UC16); | 1710   __ bind(&encoding_type_UC16); | 
| 1721   __ LoadP(code, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset)); | 1711   __ LoadP(code, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset)); | 
| 1722   __ bind(&br_over); | 1712   __ bind(&br_over); | 
| 1723 | 1713 | 
| 1724   // (E) Carry on.  String handling is done. | 1714   // (E) Carry on.  String handling is done. | 
| 1725   // code: irregexp code | 1715   // code: irregexp code | 
| 1726   // Check that the irregexp code has been generated for the actual string | 1716   // Check that the irregexp code has been generated for the actual string | 
| 1727   // encoding. If it has, the field contains a code object otherwise it contains | 1717   // encoding. If it has, the field contains a code object otherwise it contains | 
| 1728   // a smi (code flushing support). | 1718   // a smi (code flushing support). | 
| 1729   __ JumpIfSmi(code, &runtime); | 1719   __ JumpIfSmi(code, &runtime); | 
| 1730 | 1720 | 
| 1731   // r4: previous index | 1721   // r3: previous index | 
| 1732   // r6: encoding of subject string (1 if one_byte, 0 if two_byte); | 1722   // r5: encoding of subject string (1 if one_byte, 0 if two_byte); | 
| 1733   // code: Address of generated regexp code | 1723   // code: Address of generated regexp code | 
| 1734   // subject: Subject string | 1724   // subject: Subject string | 
| 1735   // regexp_data: RegExp data (FixedArray) | 1725   // regexp_data: RegExp data (FixedArray) | 
| 1736   // All checks done. Now push arguments for native regexp code. | 1726   // All checks done. Now push arguments for native regexp code. | 
| 1737   __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r3, r5); | 1727   __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r2, r4); | 
| 1738 | 1728 | 
| 1739   // Isolates: note we add an additional parameter here (isolate pointer). | 1729   // Isolates: note we add an additional parameter here (isolate pointer). | 
| 1740   const int kRegExpExecuteArguments = 10; | 1730   const int kRegExpExecuteArguments = 10; | 
| 1741   const int kParameterRegisters = 8; | 1731   const int kParameterRegisters = 5; | 
| 1742   __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); | 1732   __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); | 
| 1743 | 1733 | 
| 1744   // Stack pointer now points to cell where return address is to be written. | 1734   // Stack pointer now points to cell where return address is to be written. | 
| 1745   // Arguments are before that on the stack or in registers. | 1735   // Arguments are before that on the stack or in registers. | 
| 1746 | 1736 | 
| 1747   // Argument 10 (in stack parameter area): Pass current isolate address. | 1737   // Argument 10 (in stack parameter area): Pass current isolate address. | 
| 1748   __ mov(r3, Operand(ExternalReference::isolate_address(isolate()))); | 1738   __ mov(r2, Operand(ExternalReference::isolate_address(isolate()))); | 
| 1749   __ StoreP(r3, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize)); | 1739   __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize + | 
|  | 1740                                    4 * kPointerSize)); | 
| 1750 | 1741 | 
| 1751   // Argument 9 is a dummy that reserves the space used for | 1742   // Argument 9 is a dummy that reserves the space used for | 
| 1752   // the return address added by the ExitFrame in native calls. | 1743   // the return address added by the ExitFrame in native calls. | 
|  | 1744   __ mov(r2, Operand::Zero()); | 
|  | 1745   __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize + | 
|  | 1746                                    3 * kPointerSize)); | 
| 1753 | 1747 | 
| 1754   // Argument 8 (r10): Indicate that this is a direct call from JavaScript. | 1748   // Argument 8: Indicate that this is a direct call from JavaScript. | 
| 1755   __ li(r10, Operand(1)); | 1749   __ mov(r2, Operand(1)); | 
|  | 1750   __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize + | 
|  | 1751                                    2 * kPointerSize)); | 
| 1756 | 1752 | 
| 1757   // Argument 7 (r9): Start (high end) of backtracking stack memory area. | 1753   // Argument 7: Start (high end) of backtracking stack memory area. | 
| 1758   __ mov(r3, Operand(address_of_regexp_stack_memory_address)); | 1754   __ mov(r2, Operand(address_of_regexp_stack_memory_address)); | 
| 1759   __ LoadP(r3, MemOperand(r3, 0)); | 1755   __ LoadP(r2, MemOperand(r2, 0)); | 
| 1760   __ mov(r5, Operand(address_of_regexp_stack_memory_size)); | 1756   __ mov(r1, Operand(address_of_regexp_stack_memory_size)); | 
| 1761   __ LoadP(r5, MemOperand(r5, 0)); | 1757   __ LoadP(r1, MemOperand(r1, 0)); | 
| 1762   __ add(r9, r3, r5); | 1758   __ AddP(r2, r1); | 
|  | 1759   __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize + | 
|  | 1760                                    1 * kPointerSize)); | 
| 1763 | 1761 | 
| 1764   // Argument 6 (r8): Set the number of capture registers to zero to force | 1762   // Argument 6: Set the number of capture registers to zero to force | 
| 1765   // global egexps to behave as non-global.  This does not affect non-global | 1763   // global egexps to behave as non-global.  This does not affect non-global | 
| 1766   // regexps. | 1764   // regexps. | 
| 1767   __ li(r8, Operand::Zero()); | 1765   __ mov(r2, Operand::Zero()); | 
|  | 1766   __ StoreP(r2, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize + | 
|  | 1767                                    0 * kPointerSize)); | 
| 1768 | 1768 | 
| 1769   // Argument 5 (r7): static offsets vector buffer. | 1769   // Argument 1 (r2): Subject string. | 
|  | 1770   // Load the length from the original subject string from the previous stack | 
|  | 1771   // frame. Therefore we have to use fp, which points exactly to 15 pointer | 
|  | 1772   // sizes below the previous sp. (Because creating a new stack frame pushes | 
|  | 1773   // the previous fp onto the stack and moves up sp by 2 * kPointerSize and | 
|  | 1774   // 13 registers saved on the stack previously) | 
|  | 1775   __ LoadP(r2, MemOperand(fp, kSubjectOffset + 2 * kPointerSize)); | 
|  | 1776 | 
|  | 1777   // Argument 2 (r3): Previous index. | 
|  | 1778   // Already there | 
|  | 1779   __ AddP(r1, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); | 
|  | 1780 | 
|  | 1781   // Argument 5 (r6): static offsets vector buffer. | 
| 1770   __ mov( | 1782   __ mov( | 
| 1771       r7, | 1783       r6, | 
| 1772       Operand(ExternalReference::address_of_static_offsets_vector(isolate()))); | 1784       Operand(ExternalReference::address_of_static_offsets_vector(isolate()))); | 
| 1773 | 1785 | 
| 1774   // For arguments 4 (r6) and 3 (r5) get string length, calculate start of data | 1786   // For arguments 4 (r5) and 3 (r4) get string length, calculate start of data | 
| 1775   // and calculate the shift of the index (0 for one-byte and 1 for two-byte). | 1787   // and calculate the shift of the index (0 for one-byte and 1 for two byte). | 
| 1776   __ addi(r18, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); | 1788   __ XorP(r5, Operand(1)); | 
| 1777   __ xori(r6, r6, Operand(1)); |  | 
| 1778   // Load the length from the original subject string from the previous stack |  | 
| 1779   // frame. Therefore we have to use fp, which points exactly to two pointer |  | 
| 1780   // sizes below the previous sp. (Because creating a new stack frame pushes |  | 
| 1781   // the previous fp onto the stack and moves up sp by 2 * kPointerSize.) |  | 
| 1782   __ LoadP(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize)); |  | 
| 1783   // If slice offset is not 0, load the length from the original sliced string. | 1789   // If slice offset is not 0, load the length from the original sliced string. | 
| 1784   // Argument 4, r6: End of string data | 1790   // Argument 3, r4: Start of string data | 
| 1785   // Argument 3, r5: Start of string data |  | 
| 1786   // Prepare start and end index of the input. | 1791   // Prepare start and end index of the input. | 
| 1787   __ ShiftLeft_(r11, r11, r6); | 1792   __ ShiftLeftP(ip, ip, r5); | 
| 1788   __ add(r11, r18, r11); | 1793   __ AddP(ip, r1, ip); | 
| 1789   __ ShiftLeft_(r5, r4, r6); | 1794   __ ShiftLeftP(r4, r3, r5); | 
| 1790   __ add(r5, r11, r5); | 1795   __ AddP(r4, ip, r4); | 
| 1791 | 1796 | 
| 1792   __ LoadP(r18, FieldMemOperand(subject, String::kLengthOffset)); | 1797   // Argument 4, r5: End of string data | 
| 1793   __ SmiUntag(r18); | 1798   __ LoadP(r1, FieldMemOperand(r2, String::kLengthOffset)); | 
| 1794   __ ShiftLeft_(r6, r18, r6); | 1799   __ SmiUntag(r1); | 
| 1795   __ add(r6, r11, r6); | 1800   __ ShiftLeftP(r0, r1, r5); | 
| 1796 | 1801   __ AddP(r5, ip, r0); | 
| 1797   // Argument 2 (r4): Previous index. |  | 
| 1798   // Already there |  | 
| 1799 |  | 
| 1800   // Argument 1 (r3): Subject string. |  | 
| 1801   __ mr(r3, subject); |  | 
| 1802 | 1802 | 
| 1803   // Locate the code entry and call it. | 1803   // Locate the code entry and call it. | 
| 1804   __ addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag)); | 1804   __ AddP(code, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
| 1805 | 1805 | 
| 1806   DirectCEntryStub stub(isolate()); | 1806   DirectCEntryStub stub(isolate()); | 
| 1807   stub.GenerateCall(masm, code); | 1807   stub.GenerateCall(masm, code); | 
| 1808 | 1808 | 
| 1809   __ LeaveExitFrame(false, no_reg, true); | 1809   __ LeaveExitFrame(false, no_reg, true); | 
| 1810 | 1810 | 
| 1811   // r3: result (int32) | 1811   // r2: result (int32) | 
| 1812   // subject: subject string (callee saved) | 1812   // subject: subject string -- needed to reload | 
|  | 1813   __ LoadP(subject, MemOperand(sp, kSubjectOffset)); | 
|  | 1814 | 
| 1813   // regexp_data: RegExp data (callee saved) | 1815   // regexp_data: RegExp data (callee saved) | 
| 1814   // last_match_info_elements: Last match info elements (callee saved) | 1816   // last_match_info_elements: Last match info elements (callee saved) | 
| 1815   // Check the result. | 1817   // Check the result. | 
| 1816   Label success; | 1818   Label success; | 
| 1817   __ cmpwi(r3, Operand(1)); | 1819   __ Cmp32(r2, Operand(1)); | 
| 1818   // We expect exactly one result since we force the called regexp to behave | 1820   // We expect exactly one result since we force the called regexp to behave | 
| 1819   // as non-global. | 1821   // as non-global. | 
| 1820   __ beq(&success); | 1822   __ beq(&success); | 
| 1821   Label failure; | 1823   Label failure; | 
| 1822   __ cmpwi(r3, Operand(NativeRegExpMacroAssembler::FAILURE)); | 1824   __ Cmp32(r2, Operand(NativeRegExpMacroAssembler::FAILURE)); | 
| 1823   __ beq(&failure); | 1825   __ beq(&failure); | 
| 1824   __ cmpwi(r3, Operand(NativeRegExpMacroAssembler::EXCEPTION)); | 1826   __ Cmp32(r2, Operand(NativeRegExpMacroAssembler::EXCEPTION)); | 
| 1825   // If not exception it can only be retry. Handle that in the runtime system. | 1827   // If not exception it can only be retry. Handle that in the runtime system. | 
| 1826   __ bne(&runtime); | 1828   __ bne(&runtime); | 
| 1827   // Result must now be exception. If there is no pending exception already a | 1829   // Result must now be exception. If there is no pending exception already a | 
| 1828   // stack overflow (on the backtrack stack) was detected in RegExp code but | 1830   // stack overflow (on the backtrack stack) was detected in RegExp code but | 
| 1829   // haven't created the exception yet. Handle that in the runtime system. | 1831   // haven't created the exception yet. Handle that in the runtime system. | 
| 1830   // TODO(592): Rerunning the RegExp to get the stack overflow exception. | 1832   // TODO(592): Rerunning the RegExp to get the stack overflow exception. | 
| 1831   __ mov(r4, Operand(isolate()->factory()->the_hole_value())); | 1833   __ mov(r3, Operand(isolate()->factory()->the_hole_value())); | 
| 1832   __ mov(r5, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 1834   __ mov(r4, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 
| 1833                                        isolate()))); | 1835                                        isolate()))); | 
| 1834   __ LoadP(r3, MemOperand(r5, 0)); | 1836   __ LoadP(r2, MemOperand(r4, 0)); | 
| 1835   __ cmp(r3, r4); | 1837   __ CmpP(r2, r3); | 
| 1836   __ beq(&runtime); | 1838   __ beq(&runtime); | 
| 1837 | 1839 | 
| 1838   // For exception, throw the exception again. | 1840   // For exception, throw the exception again. | 
| 1839   __ TailCallRuntime(Runtime::kRegExpExecReThrow); | 1841   __ TailCallRuntime(Runtime::kRegExpExecReThrow); | 
| 1840 | 1842 | 
| 1841   __ bind(&failure); | 1843   __ bind(&failure); | 
| 1842   // For failure and exception return null. | 1844   // For failure and exception return null. | 
| 1843   __ mov(r3, Operand(isolate()->factory()->null_value())); | 1845   __ mov(r2, Operand(isolate()->factory()->null_value())); | 
| 1844   __ addi(sp, sp, Operand(4 * kPointerSize)); | 1846   __ la(sp, MemOperand(sp, (4 * kPointerSize))); | 
| 1845   __ Ret(); | 1847   __ Ret(); | 
| 1846 | 1848 | 
| 1847   // Process the result from the native regexp code. | 1849   // Process the result from the native regexp code. | 
| 1848   __ bind(&success); | 1850   __ bind(&success); | 
| 1849   __ LoadP(r4, | 1851   __ LoadP(r3, | 
| 1850            FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); | 1852            FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); | 
| 1851   // Calculate number of capture registers (number_of_captures + 1) * 2. | 1853   // Calculate number of capture registers (number_of_captures + 1) * 2. | 
| 1852   // SmiToShortArrayOffset accomplishes the multiplication by 2 and | 1854   // SmiToShortArrayOffset accomplishes the multiplication by 2 and | 
| 1853   // SmiUntag (which is a nop for 32-bit). | 1855   // SmiUntag (which is a nop for 32-bit). | 
| 1854   __ SmiToShortArrayOffset(r4, r4); | 1856   __ SmiToShortArrayOffset(r3, r3); | 
| 1855   __ addi(r4, r4, Operand(2)); | 1857   __ AddP(r3, Operand(2)); | 
| 1856 | 1858 | 
| 1857   __ LoadP(r3, MemOperand(sp, kLastMatchInfoOffset)); | 1859   __ LoadP(r2, MemOperand(sp, kLastMatchInfoOffset)); | 
| 1858   __ JumpIfSmi(r3, &runtime); | 1860   __ JumpIfSmi(r2, &runtime); | 
| 1859   __ CompareObjectType(r3, r5, r5, JS_ARRAY_TYPE); | 1861   __ CompareObjectType(r2, r4, r4, JS_ARRAY_TYPE); | 
| 1860   __ bne(&runtime); | 1862   __ bne(&runtime); | 
| 1861   // Check that the JSArray is in fast case. | 1863   // Check that the JSArray is in fast case. | 
| 1862   __ LoadP(last_match_info_elements, | 1864   __ LoadP(last_match_info_elements, | 
| 1863            FieldMemOperand(r3, JSArray::kElementsOffset)); | 1865            FieldMemOperand(r2, JSArray::kElementsOffset)); | 
| 1864   __ LoadP(r3, | 1866   __ LoadP(r2, | 
| 1865            FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); | 1867            FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); | 
| 1866   __ CompareRoot(r3, Heap::kFixedArrayMapRootIndex); | 1868   __ CompareRoot(r2, Heap::kFixedArrayMapRootIndex); | 
| 1867   __ bne(&runtime); | 1869   __ bne(&runtime); | 
| 1868   // Check that the last match info has space for the capture registers and the | 1870   // Check that the last match info has space for the capture registers and the | 
| 1869   // additional information. | 1871   // additional information. | 
| 1870   __ LoadP( | 1872   __ LoadP( | 
| 1871       r3, FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); | 1873       r2, FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); | 
| 1872   __ addi(r5, r4, Operand(RegExpImpl::kLastMatchOverhead)); | 1874   __ AddP(r4, r3, Operand(RegExpImpl::kLastMatchOverhead)); | 
| 1873   __ SmiUntag(r0, r3); | 1875   __ SmiUntag(r0, r2); | 
| 1874   __ cmp(r5, r0); | 1876   __ CmpP(r4, r0); | 
| 1875   __ bgt(&runtime); | 1877   __ bgt(&runtime); | 
| 1876 | 1878 | 
| 1877   // r4: number of capture registers | 1879   // r3: number of capture registers | 
| 1878   // subject: subject string | 1880   // subject: subject string | 
| 1879   // Store the capture count. | 1881   // Store the capture count. | 
| 1880   __ SmiTag(r5, r4); | 1882   __ SmiTag(r4, r3); | 
| 1881   __ StoreP(r5, FieldMemOperand(last_match_info_elements, | 1883   __ StoreP(r4, FieldMemOperand(last_match_info_elements, | 
| 1882                                 RegExpImpl::kLastCaptureCountOffset), | 1884                                 RegExpImpl::kLastCaptureCountOffset)); | 
| 1883             r0); |  | 
| 1884   // Store last subject and last input. | 1885   // Store last subject and last input. | 
| 1885   __ StoreP(subject, FieldMemOperand(last_match_info_elements, | 1886   __ StoreP(subject, FieldMemOperand(last_match_info_elements, | 
| 1886                                      RegExpImpl::kLastSubjectOffset), | 1887                                      RegExpImpl::kLastSubjectOffset)); | 
| 1887             r0); | 1888   __ LoadRR(r4, subject); | 
| 1888   __ mr(r5, subject); |  | 
| 1889   __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastSubjectOffset, | 1889   __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastSubjectOffset, | 
| 1890                       subject, r10, kLRHasNotBeenSaved, kDontSaveFPRegs); | 1890                       subject, r9, kLRHasNotBeenSaved, kDontSaveFPRegs); | 
| 1891   __ mr(subject, r5); | 1891   __ LoadRR(subject, r4); | 
| 1892   __ StoreP(subject, FieldMemOperand(last_match_info_elements, | 1892   __ StoreP(subject, FieldMemOperand(last_match_info_elements, | 
| 1893                                      RegExpImpl::kLastInputOffset), | 1893                                      RegExpImpl::kLastInputOffset)); | 
| 1894             r0); |  | 
| 1895   __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastInputOffset, | 1894   __ RecordWriteField(last_match_info_elements, RegExpImpl::kLastInputOffset, | 
| 1896                       subject, r10, kLRHasNotBeenSaved, kDontSaveFPRegs); | 1895                       subject, r9, kLRHasNotBeenSaved, kDontSaveFPRegs); | 
| 1897 | 1896 | 
| 1898   // Get the static offsets vector filled by the native regexp code. | 1897   // Get the static offsets vector filled by the native regexp code. | 
| 1899   ExternalReference address_of_static_offsets_vector = | 1898   ExternalReference address_of_static_offsets_vector = | 
| 1900       ExternalReference::address_of_static_offsets_vector(isolate()); | 1899       ExternalReference::address_of_static_offsets_vector(isolate()); | 
| 1901   __ mov(r5, Operand(address_of_static_offsets_vector)); | 1900   __ mov(r4, Operand(address_of_static_offsets_vector)); | 
| 1902 | 1901 | 
| 1903   // r4: number of capture registers | 1902   // r3: number of capture registers | 
| 1904   // r5: offsets vector | 1903   // r4: offsets vector | 
| 1905   Label next_capture; | 1904   Label next_capture; | 
| 1906   // Capture register counter starts from number of capture registers and | 1905   // Capture register counter starts from number of capture registers and | 
| 1907   // counts down until wraping after zero. | 1906   // counts down until wraping after zero. | 
| 1908   __ addi( | 1907   __ AddP( | 
| 1909       r3, last_match_info_elements, | 1908       r2, last_match_info_elements, | 
| 1910       Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag - kPointerSize)); | 1909       Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag - kPointerSize)); | 
| 1911   __ addi(r5, r5, Operand(-kIntSize));  // bias down for lwzu | 1910   __ AddP(r4, Operand(-kIntSize));  // bias down for lwzu | 
| 1912   __ mtctr(r4); |  | 
| 1913   __ bind(&next_capture); | 1911   __ bind(&next_capture); | 
| 1914   // Read the value from the static offsets vector buffer. | 1912   // Read the value from the static offsets vector buffer. | 
| 1915   __ lwzu(r6, MemOperand(r5, kIntSize)); | 1913   __ ly(r5, MemOperand(r4, kIntSize)); | 
|  | 1914   __ lay(r4, MemOperand(r4, kIntSize)); | 
| 1916   // Store the smi value in the last match info. | 1915   // Store the smi value in the last match info. | 
| 1917   __ SmiTag(r6); | 1916   __ SmiTag(r5); | 
| 1918   __ StorePU(r6, MemOperand(r3, kPointerSize)); | 1917   __ StoreP(r5, MemOperand(r2, kPointerSize)); | 
| 1919   __ bdnz(&next_capture); | 1918   __ lay(r2, MemOperand(r2, kPointerSize)); | 
|  | 1919   __ BranchOnCount(r3, &next_capture); | 
| 1920 | 1920 | 
| 1921   // Return last match info. | 1921   // Return last match info. | 
| 1922   __ LoadP(r3, MemOperand(sp, kLastMatchInfoOffset)); | 1922   __ LoadP(r2, MemOperand(sp, kLastMatchInfoOffset)); | 
| 1923   __ addi(sp, sp, Operand(4 * kPointerSize)); | 1923   __ la(sp, MemOperand(sp, (4 * kPointerSize))); | 
| 1924   __ Ret(); | 1924   __ Ret(); | 
| 1925 | 1925 | 
| 1926   // Do the runtime call to execute the regexp. | 1926   // Do the runtime call to execute the regexp. | 
| 1927   __ bind(&runtime); | 1927   __ bind(&runtime); | 
| 1928   __ TailCallRuntime(Runtime::kRegExpExec); | 1928   __ TailCallRuntime(Runtime::kRegExpExec); | 
| 1929 | 1929 | 
| 1930   // Deferred code for string handling. | 1930   // Deferred code for string handling. | 
| 1931   // (6) Not a long external string?  If yes, go to (8). | 1931   // (6) Not a long external string?  If yes, go to (8). | 
| 1932   __ bind(¬_seq_nor_cons); | 1932   __ bind(¬_seq_nor_cons); | 
| 1933   // Compare flags are still set. | 1933   // Compare flags are still set. | 
| 1934   __ bgt(¬_long_external);  // Go to (8). | 1934   __ bgt(¬_long_external);  // Go to (8). | 
| 1935 | 1935 | 
| 1936   // (7) External string.  Make it, offset-wise, look like a sequential string. | 1936   // (7) External string.  Make it, offset-wise, look like a sequential string. | 
| 1937   __ bind(&external_string); | 1937   __ bind(&external_string); | 
| 1938   __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset)); | 1938   __ LoadP(r2, FieldMemOperand(subject, HeapObject::kMapOffset)); | 
| 1939   __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset)); | 1939   __ LoadlB(r2, FieldMemOperand(r2, Map::kInstanceTypeOffset)); | 
| 1940   if (FLAG_debug_code) { | 1940   if (FLAG_debug_code) { | 
| 1941     // Assert that we do not have a cons or slice (indirect strings) here. | 1941     // Assert that we do not have a cons or slice (indirect strings) here. | 
| 1942     // Sequential strings have already been ruled out. | 1942     // Sequential strings have already been ruled out. | 
| 1943     STATIC_ASSERT(kIsIndirectStringMask == 1); | 1943     STATIC_ASSERT(kIsIndirectStringMask == 1); | 
| 1944     __ andi(r0, r3, Operand(kIsIndirectStringMask)); | 1944     __ tmll(r2, Operand(kIsIndirectStringMask)); | 
| 1945     __ Assert(eq, kExternalStringExpectedButNotFound, cr0); | 1945     __ Assert(eq, kExternalStringExpectedButNotFound, cr0); | 
| 1946   } | 1946   } | 
| 1947   __ LoadP(subject, | 1947   __ LoadP(subject, | 
| 1948            FieldMemOperand(subject, ExternalString::kResourceDataOffset)); | 1948            FieldMemOperand(subject, ExternalString::kResourceDataOffset)); | 
| 1949   // Move the pointer so that offset-wise, it looks like a sequential string. | 1949   // Move the pointer so that offset-wise, it looks like a sequential string. | 
| 1950   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); | 1950   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); | 
| 1951   __ subi(subject, subject, | 1951   __ SubP(subject, subject, | 
| 1952           Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | 1952           Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | 
| 1953   __ b(&seq_string);  // Go to (5). | 1953   __ b(&seq_string);  // Go to (5). | 
| 1954 | 1954 | 
| 1955   // (8) Short external string or not a string?  If yes, bail out to runtime. | 1955   // (8) Short external string or not a string?  If yes, bail out to runtime. | 
| 1956   __ bind(¬_long_external); | 1956   __ bind(¬_long_external); | 
| 1957   STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag != 0); | 1957   STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag != 0); | 
| 1958   __ andi(r0, r4, Operand(kIsNotStringMask | kShortExternalStringMask)); | 1958   __ mov(r0, Operand(kIsNotStringMask | kShortExternalStringMask)); | 
| 1959   __ bne(&runtime, cr0); | 1959   __ AndP(r0, r3); | 
|  | 1960   __ bne(&runtime); | 
| 1960 | 1961 | 
| 1961   // (9) Sliced string.  Replace subject with parent.  Go to (4). | 1962   // (9) Sliced string.  Replace subject with parent.  Go to (4). | 
| 1962   // Load offset into r11 and replace subject string with parent. | 1963   // Load offset into ip and replace subject string with parent. | 
| 1963   __ LoadP(r11, FieldMemOperand(subject, SlicedString::kOffsetOffset)); | 1964   __ LoadP(ip, FieldMemOperand(subject, SlicedString::kOffsetOffset)); | 
| 1964   __ SmiUntag(r11); | 1965   __ SmiUntag(ip); | 
| 1965   __ LoadP(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); | 1966   __ LoadP(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); | 
| 1966   __ b(&check_underlying);  // Go to (4). | 1967   __ b(&check_underlying);  // Go to (4). | 
| 1967 #endif  // V8_INTERPRETED_REGEXP | 1968 #endif  // V8_INTERPRETED_REGEXP | 
| 1968 } | 1969 } | 
| 1969 | 1970 | 
| 1970 |  | 
| 1971 static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) { | 1971 static void CallStubInRecordCallTarget(MacroAssembler* masm, CodeStub* stub) { | 
| 1972   // r3 : number of arguments to the construct function | 1972   // r2 : number of arguments to the construct function | 
| 1973   // r4 : the function to call | 1973   // r3 : the function to call | 
| 1974   // r5 : feedback vector | 1974   // r4 : feedback vector | 
| 1975   // r6 : slot in feedback vector (Smi) | 1975   // r5 : slot in feedback vector (Smi) | 
| 1976   FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 1976   FrameScope scope(masm, StackFrame::INTERNAL); | 
| 1977 | 1977 | 
| 1978   // Number-of-arguments register must be smi-tagged to call out. | 1978   // Number-of-arguments register must be smi-tagged to call out. | 
| 1979   __ SmiTag(r3); | 1979   __ SmiTag(r2); | 
| 1980   __ Push(r6, r5, r4, r3); | 1980   __ Push(r5, r4, r3, r2); | 
| 1981 | 1981 | 
| 1982   __ CallStub(stub); | 1982   __ CallStub(stub); | 
| 1983 | 1983 | 
| 1984   __ Pop(r6, r5, r4, r3); | 1984   __ Pop(r5, r4, r3, r2); | 
| 1985   __ SmiUntag(r3); | 1985   __ SmiUntag(r2); | 
| 1986 } | 1986 } | 
| 1987 | 1987 | 
| 1988 |  | 
| 1989 static void GenerateRecordCallTarget(MacroAssembler* masm) { | 1988 static void GenerateRecordCallTarget(MacroAssembler* masm) { | 
| 1990   // Cache the called function in a feedback vector slot.  Cache states | 1989   // Cache the called function in a feedback vector slot.  Cache states | 
| 1991   // are uninitialized, monomorphic (indicated by a JSFunction), and | 1990   // are uninitialized, monomorphic (indicated by a JSFunction), and | 
| 1992   // megamorphic. | 1991   // megamorphic. | 
| 1993   // r3 : number of arguments to the construct function | 1992   // r2 : number of arguments to the construct function | 
| 1994   // r4 : the function to call | 1993   // r3 : the function to call | 
| 1995   // r5 : feedback vector | 1994   // r4 : feedback vector | 
| 1996   // r6 : slot in feedback vector (Smi) | 1995   // r5 : slot in feedback vector (Smi) | 
| 1997   Label initialize, done, miss, megamorphic, not_array_function; | 1996   Label initialize, done, miss, megamorphic, not_array_function; | 
| 1998 | 1997 | 
| 1999   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()), | 1998   DCHECK_EQ(*TypeFeedbackVector::MegamorphicSentinel(masm->isolate()), | 
| 2000             masm->isolate()->heap()->megamorphic_symbol()); | 1999             masm->isolate()->heap()->megamorphic_symbol()); | 
| 2001   DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()), | 2000   DCHECK_EQ(*TypeFeedbackVector::UninitializedSentinel(masm->isolate()), | 
| 2002             masm->isolate()->heap()->uninitialized_symbol()); | 2001             masm->isolate()->heap()->uninitialized_symbol()); | 
| 2003 | 2002 | 
| 2004   // Load the cache state into r8. | 2003   // Load the cache state into r7. | 
| 2005   __ SmiToPtrArrayOffset(r8, r6); | 2004   __ SmiToPtrArrayOffset(r7, r5); | 
| 2006   __ add(r8, r5, r8); | 2005   __ AddP(r7, r4, r7); | 
| 2007   __ LoadP(r8, FieldMemOperand(r8, FixedArray::kHeaderSize)); | 2006   __ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize)); | 
| 2008 | 2007 | 
| 2009   // A monomorphic cache hit or an already megamorphic state: invoke the | 2008   // A monomorphic cache hit or an already megamorphic state: invoke the | 
| 2010   // function without changing the state. | 2009   // function without changing the state. | 
| 2011   // We don't know if r8 is a WeakCell or a Symbol, but it's harmless to read at | 2010   // We don't know if r7 is a WeakCell or a Symbol, but it's harmless to read at | 
| 2012   // this position in a symbol (see static asserts in type-feedback-vector.h). | 2011   // this position in a symbol (see static asserts in type-feedback-vector.h). | 
| 2013   Label check_allocation_site; | 2012   Label check_allocation_site; | 
| 2014   Register feedback_map = r9; | 2013   Register feedback_map = r8; | 
| 2015   Register weak_value = r10; | 2014   Register weak_value = r9; | 
| 2016   __ LoadP(weak_value, FieldMemOperand(r8, WeakCell::kValueOffset)); | 2015   __ LoadP(weak_value, FieldMemOperand(r7, WeakCell::kValueOffset)); | 
| 2017   __ cmp(r4, weak_value); | 2016   __ CmpP(r3, weak_value); | 
| 2018   __ beq(&done); | 2017   __ beq(&done); | 
| 2019   __ CompareRoot(r8, Heap::kmegamorphic_symbolRootIndex); | 2018   __ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex); | 
| 2020   __ beq(&done); | 2019   __ beq(&done); | 
| 2021   __ LoadP(feedback_map, FieldMemOperand(r8, HeapObject::kMapOffset)); | 2020   __ LoadP(feedback_map, FieldMemOperand(r7, HeapObject::kMapOffset)); | 
| 2022   __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex); | 2021   __ CompareRoot(feedback_map, Heap::kWeakCellMapRootIndex); | 
| 2023   __ bne(&check_allocation_site); | 2022   __ bne(&check_allocation_site); | 
| 2024 | 2023 | 
| 2025   // If the weak cell is cleared, we have a new chance to become monomorphic. | 2024   // If the weak cell is cleared, we have a new chance to become monomorphic. | 
| 2026   __ JumpIfSmi(weak_value, &initialize); | 2025   __ JumpIfSmi(weak_value, &initialize); | 
| 2027   __ b(&megamorphic); | 2026   __ b(&megamorphic); | 
| 2028 | 2027 | 
| 2029   __ bind(&check_allocation_site); | 2028   __ bind(&check_allocation_site); | 
| 2030   // If we came here, we need to see if we are the array function. | 2029   // If we came here, we need to see if we are the array function. | 
| 2031   // If we didn't have a matching function, and we didn't find the megamorph | 2030   // If we didn't have a matching function, and we didn't find the megamorph | 
| 2032   // sentinel, then we have in the slot either some other function or an | 2031   // sentinel, then we have in the slot either some other function or an | 
| 2033   // AllocationSite. | 2032   // AllocationSite. | 
| 2034   __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex); | 2033   __ CompareRoot(feedback_map, Heap::kAllocationSiteMapRootIndex); | 
| 2035   __ bne(&miss); | 2034   __ bne(&miss); | 
| 2036 | 2035 | 
| 2037   // Make sure the function is the Array() function | 2036   // Make sure the function is the Array() function | 
| 2038   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8); | 2037   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7); | 
| 2039   __ cmp(r4, r8); | 2038   __ CmpP(r3, r7); | 
| 2040   __ bne(&megamorphic); | 2039   __ bne(&megamorphic); | 
| 2041   __ b(&done); | 2040   __ b(&done); | 
| 2042 | 2041 | 
| 2043   __ bind(&miss); | 2042   __ bind(&miss); | 
| 2044 | 2043 | 
| 2045   // A monomorphic miss (i.e, here the cache is not uninitialized) goes | 2044   // A monomorphic miss (i.e, here the cache is not uninitialized) goes | 
| 2046   // megamorphic. | 2045   // megamorphic. | 
| 2047   __ CompareRoot(r8, Heap::kuninitialized_symbolRootIndex); | 2046   __ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex); | 
| 2048   __ beq(&initialize); | 2047   __ beq(&initialize); | 
| 2049   // MegamorphicSentinel is an immortal immovable object (undefined) so no | 2048   // MegamorphicSentinel is an immortal immovable object (undefined) so no | 
| 2050   // write-barrier is needed. | 2049   // write-barrier is needed. | 
| 2051   __ bind(&megamorphic); | 2050   __ bind(&megamorphic); | 
| 2052   __ SmiToPtrArrayOffset(r8, r6); | 2051   __ SmiToPtrArrayOffset(r7, r5); | 
| 2053   __ add(r8, r5, r8); | 2052   __ AddP(r7, r4, r7); | 
| 2054   __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex); | 2053   __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex); | 
| 2055   __ StoreP(ip, FieldMemOperand(r8, FixedArray::kHeaderSize), r0); | 2054   __ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0); | 
| 2056   __ jmp(&done); | 2055   __ jmp(&done); | 
| 2057 | 2056 | 
| 2058   // An uninitialized cache is patched with the function | 2057   // An uninitialized cache is patched with the function | 
| 2059   __ bind(&initialize); | 2058   __ bind(&initialize); | 
| 2060 | 2059 | 
| 2061   // Make sure the function is the Array() function. | 2060   // Make sure the function is the Array() function. | 
| 2062   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8); | 2061   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7); | 
| 2063   __ cmp(r4, r8); | 2062   __ CmpP(r3, r7); | 
| 2064   __ bne(¬_array_function); | 2063   __ bne(¬_array_function); | 
| 2065 | 2064 | 
| 2066   // The target function is the Array constructor, | 2065   // The target function is the Array constructor, | 
| 2067   // Create an AllocationSite if we don't already have it, store it in the | 2066   // Create an AllocationSite if we don't already have it, store it in the | 
| 2068   // slot. | 2067   // slot. | 
| 2069   CreateAllocationSiteStub create_stub(masm->isolate()); | 2068   CreateAllocationSiteStub create_stub(masm->isolate()); | 
| 2070   CallStubInRecordCallTarget(masm, &create_stub); | 2069   CallStubInRecordCallTarget(masm, &create_stub); | 
| 2071   __ b(&done); | 2070   __ b(&done); | 
| 2072 | 2071 | 
| 2073   __ bind(¬_array_function); | 2072   __ bind(¬_array_function); | 
| 2074 | 2073 | 
| 2075   CreateWeakCellStub weak_cell_stub(masm->isolate()); | 2074   CreateWeakCellStub weak_cell_stub(masm->isolate()); | 
| 2076   CallStubInRecordCallTarget(masm, &weak_cell_stub); | 2075   CallStubInRecordCallTarget(masm, &weak_cell_stub); | 
| 2077   __ bind(&done); | 2076   __ bind(&done); | 
| 2078 } | 2077 } | 
| 2079 | 2078 | 
| 2080 |  | 
| 2081 void CallConstructStub::Generate(MacroAssembler* masm) { | 2079 void CallConstructStub::Generate(MacroAssembler* masm) { | 
| 2082   // r3 : number of arguments | 2080   // r2 : number of arguments | 
| 2083   // r4 : the function to call | 2081   // r3 : the function to call | 
| 2084   // r5 : feedback vector | 2082   // r4 : feedback vector | 
| 2085   // r6 : slot in feedback vector (Smi, for RecordCallTarget) | 2083   // r5 : slot in feedback vector (Smi, for RecordCallTarget) | 
| 2086 | 2084 | 
| 2087   Label non_function; | 2085   Label non_function; | 
| 2088   // Check that the function is not a smi. | 2086   // Check that the function is not a smi. | 
| 2089   __ JumpIfSmi(r4, &non_function); | 2087   __ JumpIfSmi(r3, &non_function); | 
| 2090   // Check that the function is a JSFunction. | 2088   // Check that the function is a JSFunction. | 
| 2091   __ CompareObjectType(r4, r8, r8, JS_FUNCTION_TYPE); | 2089   __ CompareObjectType(r3, r7, r7, JS_FUNCTION_TYPE); | 
| 2092   __ bne(&non_function); | 2090   __ bne(&non_function); | 
| 2093 | 2091 | 
| 2094   GenerateRecordCallTarget(masm); | 2092   GenerateRecordCallTarget(masm); | 
| 2095 | 2093 | 
| 2096   __ SmiToPtrArrayOffset(r8, r6); | 2094   __ SmiToPtrArrayOffset(r7, r5); | 
| 2097   __ add(r8, r5, r8); | 2095   __ AddP(r7, r4, r7); | 
| 2098   // Put the AllocationSite from the feedback vector into r5, or undefined. | 2096   // Put the AllocationSite from the feedback vector into r4, or undefined. | 
| 2099   __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize)); | 2097   __ LoadP(r4, FieldMemOperand(r7, FixedArray::kHeaderSize)); | 
| 2100   __ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset)); | 2098   __ LoadP(r7, FieldMemOperand(r4, AllocationSite::kMapOffset)); | 
| 2101   __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex); | 2099   __ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex); | 
| 2102   if (CpuFeatures::IsSupported(ISELECT)) { | 2100   Label feedback_register_initialized; | 
| 2103     __ LoadRoot(r8, Heap::kUndefinedValueRootIndex); | 2101   __ beq(&feedback_register_initialized); | 
| 2104     __ isel(eq, r5, r5, r8); | 2102   __ LoadRoot(r4, Heap::kUndefinedValueRootIndex); | 
| 2105   } else { | 2103   __ bind(&feedback_register_initialized); | 
| 2106     Label feedback_register_initialized; |  | 
| 2107     __ beq(&feedback_register_initialized); |  | 
| 2108     __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); |  | 
| 2109     __ bind(&feedback_register_initialized); |  | 
| 2110   } |  | 
| 2111 | 2104 | 
| 2112   __ AssertUndefinedOrAllocationSite(r5, r8); | 2105   __ AssertUndefinedOrAllocationSite(r4, r7); | 
| 2113 | 2106 | 
| 2114   // Pass function as new target. | 2107   // Pass function as new target. | 
| 2115   __ mr(r6, r4); | 2108   __ LoadRR(r5, r3); | 
| 2116 | 2109 | 
| 2117   // Tail call to the function-specific construct stub (still in the caller | 2110   // Tail call to the function-specific construct stub (still in the caller | 
| 2118   // context at this point). | 2111   // context at this point). | 
| 2119   __ LoadP(r7, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); | 2112   __ LoadP(r6, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); | 
| 2120   __ LoadP(r7, FieldMemOperand(r7, SharedFunctionInfo::kConstructStubOffset)); | 2113   __ LoadP(r6, FieldMemOperand(r6, SharedFunctionInfo::kConstructStubOffset)); | 
| 2121   __ addi(ip, r7, Operand(Code::kHeaderSize - kHeapObjectTag)); | 2114   __ AddP(ip, r6, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
| 2122   __ JumpToJSEntry(ip); | 2115   __ JumpToJSEntry(ip); | 
| 2123 | 2116 | 
| 2124   __ bind(&non_function); | 2117   __ bind(&non_function); | 
| 2125   __ mr(r6, r4); | 2118   __ LoadRR(r5, r3); | 
| 2126   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET); | 2119   __ Jump(isolate()->builtins()->Construct(), RelocInfo::CODE_TARGET); | 
| 2127 } | 2120 } | 
| 2128 | 2121 | 
| 2129 |  | 
| 2130 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) { | 2122 void CallICStub::HandleArrayCase(MacroAssembler* masm, Label* miss) { | 
| 2131   // r4 - function | 2123   // r3 - function | 
| 2132   // r6 - slot id | 2124   // r5 - slot id | 
| 2133   // r5 - vector | 2125   // r4 - vector | 
| 2134   // r7 - allocation site (loaded from vector[slot]) | 2126   // r6 - allocation site (loaded from vector[slot]) | 
| 2135   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r8); | 2127   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7); | 
| 2136   __ cmp(r4, r8); | 2128   __ CmpP(r3, r7); | 
| 2137   __ bne(miss); | 2129   __ bne(miss); | 
| 2138 | 2130 | 
| 2139   __ mov(r3, Operand(arg_count())); | 2131   __ mov(r2, Operand(arg_count())); | 
| 2140 | 2132 | 
| 2141   // Increment the call count for monomorphic function calls. | 2133   // Increment the call count for monomorphic function calls. | 
| 2142   const int count_offset = FixedArray::kHeaderSize + kPointerSize; | 2134   const int count_offset = FixedArray::kHeaderSize + kPointerSize; | 
| 2143   __ SmiToPtrArrayOffset(r8, r6); | 2135   __ SmiToPtrArrayOffset(r7, r5); | 
| 2144   __ add(r5, r5, r8); | 2136   __ AddP(r4, r4, r7); | 
| 2145   __ LoadP(r6, FieldMemOperand(r5, count_offset)); | 2137   __ LoadP(r5, FieldMemOperand(r4, count_offset)); | 
| 2146   __ AddSmiLiteral(r6, r6, Smi::FromInt(CallICNexus::kCallCountIncrement), r0); | 2138   __ AddSmiLiteral(r5, r5, Smi::FromInt(CallICNexus::kCallCountIncrement), r0); | 
| 2147   __ StoreP(r6, FieldMemOperand(r5, count_offset), r0); | 2139   __ StoreP(r5, FieldMemOperand(r4, count_offset), r0); | 
| 2148 | 2140 | 
| 2149   __ mr(r5, r7); | 2141   __ LoadRR(r4, r6); | 
| 2150   __ mr(r6, r4); | 2142   __ LoadRR(r5, r3); | 
| 2151   ArrayConstructorStub stub(masm->isolate(), arg_count()); | 2143   ArrayConstructorStub stub(masm->isolate(), arg_count()); | 
| 2152   __ TailCallStub(&stub); | 2144   __ TailCallStub(&stub); | 
| 2153 } | 2145 } | 
| 2154 | 2146 | 
| 2155 |  | 
| 2156 void CallICStub::Generate(MacroAssembler* masm) { | 2147 void CallICStub::Generate(MacroAssembler* masm) { | 
| 2157   // r4 - function | 2148   // r3 - function | 
| 2158   // r6 - slot id (Smi) | 2149   // r5 - slot id (Smi) | 
| 2159   // r5 - vector | 2150   // r4 - vector | 
| 2160   Label extra_checks_or_miss, call, call_function; | 2151   Label extra_checks_or_miss, call, call_function; | 
| 2161   int argc = arg_count(); | 2152   int argc = arg_count(); | 
| 2162   ParameterCount actual(argc); | 2153   ParameterCount actual(argc); | 
| 2163 | 2154 | 
| 2164   // The checks. First, does r4 match the recorded monomorphic target? | 2155   // The checks. First, does r3 match the recorded monomorphic target? | 
| 2165   __ SmiToPtrArrayOffset(r9, r6); | 2156   __ SmiToPtrArrayOffset(r8, r5); | 
| 2166   __ add(r9, r5, r9); | 2157   __ AddP(r8, r4, r8); | 
| 2167   __ LoadP(r7, FieldMemOperand(r9, FixedArray::kHeaderSize)); | 2158   __ LoadP(r6, FieldMemOperand(r8, FixedArray::kHeaderSize)); | 
| 2168 | 2159 | 
| 2169   // We don't know that we have a weak cell. We might have a private symbol | 2160   // We don't know that we have a weak cell. We might have a private symbol | 
| 2170   // or an AllocationSite, but the memory is safe to examine. | 2161   // or an AllocationSite, but the memory is safe to examine. | 
| 2171   // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to | 2162   // AllocationSite::kTransitionInfoOffset - contains a Smi or pointer to | 
| 2172   // FixedArray. | 2163   // FixedArray. | 
| 2173   // WeakCell::kValueOffset - contains a JSFunction or Smi(0) | 2164   // WeakCell::kValueOffset - contains a JSFunction or Smi(0) | 
| 2174   // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not | 2165   // Symbol::kHashFieldSlot - if the low bit is 1, then the hash is not | 
| 2175   // computed, meaning that it can't appear to be a pointer. If the low bit is | 2166   // computed, meaning that it can't appear to be a pointer. If the low bit is | 
| 2176   // 0, then hash is computed, but the 0 bit prevents the field from appearing | 2167   // 0, then hash is computed, but the 0 bit prevents the field from appearing | 
| 2177   // to be a pointer. | 2168   // to be a pointer. | 
| 2178   STATIC_ASSERT(WeakCell::kSize >= kPointerSize); | 2169   STATIC_ASSERT(WeakCell::kSize >= kPointerSize); | 
| 2179   STATIC_ASSERT(AllocationSite::kTransitionInfoOffset == | 2170   STATIC_ASSERT(AllocationSite::kTransitionInfoOffset == | 
| 2180                     WeakCell::kValueOffset && | 2171                     WeakCell::kValueOffset && | 
| 2181                 WeakCell::kValueOffset == Symbol::kHashFieldSlot); | 2172                 WeakCell::kValueOffset == Symbol::kHashFieldSlot); | 
| 2182 | 2173 | 
| 2183   __ LoadP(r8, FieldMemOperand(r7, WeakCell::kValueOffset)); | 2174   __ LoadP(r7, FieldMemOperand(r6, WeakCell::kValueOffset)); | 
| 2184   __ cmp(r4, r8); | 2175   __ CmpP(r3, r7); | 
| 2185   __ bne(&extra_checks_or_miss); | 2176   __ bne(&extra_checks_or_miss, Label::kNear); | 
| 2186 | 2177 | 
| 2187   // The compare above could have been a SMI/SMI comparison. Guard against this | 2178   // The compare above could have been a SMI/SMI comparison. Guard against this | 
| 2188   // convincing us that we have a monomorphic JSFunction. | 2179   // convincing us that we have a monomorphic JSFunction. | 
| 2189   __ JumpIfSmi(r4, &extra_checks_or_miss); | 2180   __ JumpIfSmi(r3, &extra_checks_or_miss); | 
| 2190 | 2181 | 
| 2191   // Increment the call count for monomorphic function calls. | 2182   // Increment the call count for monomorphic function calls. | 
| 2192   const int count_offset = FixedArray::kHeaderSize + kPointerSize; | 2183   const int count_offset = FixedArray::kHeaderSize + kPointerSize; | 
| 2193   __ LoadP(r6, FieldMemOperand(r9, count_offset)); | 2184   __ LoadP(r5, FieldMemOperand(r8, count_offset)); | 
| 2194   __ AddSmiLiteral(r6, r6, Smi::FromInt(CallICNexus::kCallCountIncrement), r0); | 2185   __ AddSmiLiteral(r5, r5, Smi::FromInt(CallICNexus::kCallCountIncrement), r0); | 
| 2195   __ StoreP(r6, FieldMemOperand(r9, count_offset), r0); | 2186   __ StoreP(r5, FieldMemOperand(r8, count_offset), r0); | 
| 2196 | 2187 | 
| 2197   __ bind(&call_function); | 2188   __ bind(&call_function); | 
| 2198   __ mov(r3, Operand(argc)); | 2189   __ mov(r2, Operand(argc)); | 
| 2199   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(), | 2190   __ Jump(masm->isolate()->builtins()->CallFunction(convert_mode(), | 
| 2200                                                     tail_call_mode()), | 2191                                                     tail_call_mode()), | 
| 2201           RelocInfo::CODE_TARGET); | 2192           RelocInfo::CODE_TARGET); | 
| 2202 | 2193 | 
| 2203   __ bind(&extra_checks_or_miss); | 2194   __ bind(&extra_checks_or_miss); | 
| 2204   Label uninitialized, miss, not_allocation_site; | 2195   Label uninitialized, miss, not_allocation_site; | 
| 2205 | 2196 | 
| 2206   __ CompareRoot(r7, Heap::kmegamorphic_symbolRootIndex); | 2197   __ CompareRoot(r6, Heap::kmegamorphic_symbolRootIndex); | 
| 2207   __ beq(&call); | 2198   __ beq(&call); | 
| 2208 | 2199 | 
| 2209   // Verify that r7 contains an AllocationSite | 2200   // Verify that r6 contains an AllocationSite | 
| 2210   __ LoadP(r8, FieldMemOperand(r7, HeapObject::kMapOffset)); | 2201   __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset)); | 
| 2211   __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex); | 2202   __ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex); | 
| 2212   __ bne(¬_allocation_site); | 2203   __ bne(¬_allocation_site); | 
| 2213 | 2204 | 
| 2214   // We have an allocation site. | 2205   // We have an allocation site. | 
| 2215   HandleArrayCase(masm, &miss); | 2206   HandleArrayCase(masm, &miss); | 
| 2216 | 2207 | 
| 2217   __ bind(¬_allocation_site); | 2208   __ bind(¬_allocation_site); | 
| 2218 | 2209 | 
| 2219   // The following cases attempt to handle MISS cases without going to the | 2210   // The following cases attempt to handle MISS cases without going to the | 
| 2220   // runtime. | 2211   // runtime. | 
| 2221   if (FLAG_trace_ic) { | 2212   if (FLAG_trace_ic) { | 
| 2222     __ b(&miss); | 2213     __ b(&miss); | 
| 2223   } | 2214   } | 
| 2224 | 2215 | 
| 2225   __ CompareRoot(r7, Heap::kuninitialized_symbolRootIndex); | 2216   __ CompareRoot(r6, Heap::kuninitialized_symbolRootIndex); | 
| 2226   __ beq(&uninitialized); | 2217   __ beq(&uninitialized); | 
| 2227 | 2218 | 
| 2228   // We are going megamorphic. If the feedback is a JSFunction, it is fine | 2219   // We are going megamorphic. If the feedback is a JSFunction, it is fine | 
| 2229   // to handle it here. More complex cases are dealt with in the runtime. | 2220   // to handle it here. More complex cases are dealt with in the runtime. | 
| 2230   __ AssertNotSmi(r7); | 2221   __ AssertNotSmi(r6); | 
| 2231   __ CompareObjectType(r7, r8, r8, JS_FUNCTION_TYPE); | 2222   __ CompareObjectType(r6, r7, r7, JS_FUNCTION_TYPE); | 
| 2232   __ bne(&miss); | 2223   __ bne(&miss); | 
| 2233   __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex); | 2224   __ LoadRoot(ip, Heap::kmegamorphic_symbolRootIndex); | 
| 2234   __ StoreP(ip, FieldMemOperand(r9, FixedArray::kHeaderSize), r0); | 2225   __ StoreP(ip, FieldMemOperand(r8, FixedArray::kHeaderSize), r0); | 
| 2235 | 2226 | 
| 2236   __ bind(&call); | 2227   __ bind(&call); | 
| 2237   __ mov(r3, Operand(argc)); | 2228   __ mov(r2, Operand(argc)); | 
| 2238   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()), | 2229   __ Jump(masm->isolate()->builtins()->Call(convert_mode(), tail_call_mode()), | 
| 2239           RelocInfo::CODE_TARGET); | 2230           RelocInfo::CODE_TARGET); | 
| 2240 | 2231 | 
| 2241   __ bind(&uninitialized); | 2232   __ bind(&uninitialized); | 
| 2242 | 2233 | 
| 2243   // We are going monomorphic, provided we actually have a JSFunction. | 2234   // We are going monomorphic, provided we actually have a JSFunction. | 
| 2244   __ JumpIfSmi(r4, &miss); | 2235   __ JumpIfSmi(r3, &miss); | 
| 2245 | 2236 | 
| 2246   // Goto miss case if we do not have a function. | 2237   // Goto miss case if we do not have a function. | 
| 2247   __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE); | 2238   __ CompareObjectType(r3, r6, r6, JS_FUNCTION_TYPE); | 
| 2248   __ bne(&miss); | 2239   __ bne(&miss); | 
| 2249 | 2240 | 
| 2250   // Make sure the function is not the Array() function, which requires special | 2241   // Make sure the function is not the Array() function, which requires special | 
| 2251   // behavior on MISS. | 2242   // behavior on MISS. | 
| 2252   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r7); | 2243   __ LoadNativeContextSlot(Context::ARRAY_FUNCTION_INDEX, r6); | 
| 2253   __ cmp(r4, r7); | 2244   __ CmpP(r3, r6); | 
| 2254   __ beq(&miss); | 2245   __ beq(&miss); | 
| 2255 | 2246 | 
| 2256   // Make sure the function belongs to the same native context. | 2247   // Make sure the function belongs to the same native context. | 
| 2257   __ LoadP(r7, FieldMemOperand(r4, JSFunction::kContextOffset)); | 2248   __ LoadP(r6, FieldMemOperand(r3, JSFunction::kContextOffset)); | 
| 2258   __ LoadP(r7, ContextMemOperand(r7, Context::NATIVE_CONTEXT_INDEX)); | 2249   __ LoadP(r6, ContextMemOperand(r6, Context::NATIVE_CONTEXT_INDEX)); | 
| 2259   __ LoadP(ip, NativeContextMemOperand()); | 2250   __ LoadP(ip, NativeContextMemOperand()); | 
| 2260   __ cmp(r7, ip); | 2251   __ CmpP(r6, ip); | 
| 2261   __ bne(&miss); | 2252   __ bne(&miss); | 
| 2262 | 2253 | 
| 2263   // Initialize the call counter. | 2254   // Initialize the call counter. | 
| 2264   __ LoadSmiLiteral(r8, Smi::FromInt(CallICNexus::kCallCountIncrement)); | 2255   __ LoadSmiLiteral(r7, Smi::FromInt(CallICNexus::kCallCountIncrement)); | 
| 2265   __ StoreP(r8, FieldMemOperand(r9, count_offset), r0); | 2256   __ StoreP(r7, FieldMemOperand(r8, count_offset), r0); | 
| 2266 | 2257 | 
| 2267   // Store the function. Use a stub since we need a frame for allocation. | 2258   // Store the function. Use a stub since we need a frame for allocation. | 
| 2268   // r5 - vector | 2259   // r4 - vector | 
| 2269   // r6 - slot | 2260   // r5 - slot | 
| 2270   // r4 - function | 2261   // r3 - function | 
| 2271   { | 2262   { | 
| 2272     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 2263     FrameScope scope(masm, StackFrame::INTERNAL); | 
| 2273     CreateWeakCellStub create_stub(masm->isolate()); | 2264     CreateWeakCellStub create_stub(masm->isolate()); | 
| 2274     __ Push(r4); | 2265     __ Push(r3); | 
| 2275     __ CallStub(&create_stub); | 2266     __ CallStub(&create_stub); | 
| 2276     __ Pop(r4); | 2267     __ Pop(r3); | 
| 2277   } | 2268   } | 
| 2278 | 2269 | 
| 2279   __ b(&call_function); | 2270   __ b(&call_function); | 
| 2280 | 2271 | 
| 2281   // We are here because tracing is on or we encountered a MISS case we can't | 2272   // We are here because tracing is on or we encountered a MISS case we can't | 
| 2282   // handle here. | 2273   // handle here. | 
| 2283   __ bind(&miss); | 2274   __ bind(&miss); | 
| 2284   GenerateMiss(masm); | 2275   GenerateMiss(masm); | 
| 2285 | 2276 | 
| 2286   __ b(&call); | 2277   __ b(&call); | 
| 2287 } | 2278 } | 
| 2288 | 2279 | 
| 2289 |  | 
| 2290 void CallICStub::GenerateMiss(MacroAssembler* masm) { | 2280 void CallICStub::GenerateMiss(MacroAssembler* masm) { | 
| 2291   FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 2281   FrameScope scope(masm, StackFrame::INTERNAL); | 
| 2292 | 2282 | 
| 2293   // Push the function and feedback info. | 2283   // Push the function and feedback info. | 
| 2294   __ Push(r4, r5, r6); | 2284   __ Push(r3, r4, r5); | 
| 2295 | 2285 | 
| 2296   // Call the entry. | 2286   // Call the entry. | 
| 2297   __ CallRuntime(Runtime::kCallIC_Miss); | 2287   __ CallRuntime(Runtime::kCallIC_Miss); | 
| 2298 | 2288 | 
| 2299   // Move result to r4 and exit the internal frame. | 2289   // Move result to r3 and exit the internal frame. | 
| 2300   __ mr(r4, r3); | 2290   __ LoadRR(r3, r2); | 
| 2301 } | 2291 } | 
| 2302 | 2292 | 
| 2303 |  | 
| 2304 // StringCharCodeAtGenerator | 2293 // StringCharCodeAtGenerator | 
| 2305 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { | 2294 void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { | 
| 2306   // If the receiver is a smi trigger the non-string case. | 2295   // If the receiver is a smi trigger the non-string case. | 
| 2307   if (check_mode_ == RECEIVER_IS_UNKNOWN) { | 2296   if (check_mode_ == RECEIVER_IS_UNKNOWN) { | 
| 2308     __ JumpIfSmi(object_, receiver_not_string_); | 2297     __ JumpIfSmi(object_, receiver_not_string_); | 
| 2309 | 2298 | 
| 2310     // Fetch the instance type of the receiver into result register. | 2299     // Fetch the instance type of the receiver into result register. | 
| 2311     __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 2300     __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 
| 2312     __ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 2301     __ LoadlB(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 
| 2313     // If the receiver is not a string trigger the non-string case. | 2302     // If the receiver is not a string trigger the non-string case. | 
| 2314     __ andi(r0, result_, Operand(kIsNotStringMask)); | 2303     __ mov(r0, Operand(kIsNotStringMask)); | 
| 2315     __ bne(receiver_not_string_, cr0); | 2304     __ AndP(r0, result_); | 
|  | 2305     __ bne(receiver_not_string_); | 
| 2316   } | 2306   } | 
| 2317 | 2307 | 
| 2318   // If the index is non-smi trigger the non-smi case. | 2308   // If the index is non-smi trigger the non-smi case. | 
| 2319   __ JumpIfNotSmi(index_, &index_not_smi_); | 2309   __ JumpIfNotSmi(index_, &index_not_smi_); | 
| 2320   __ bind(&got_smi_index_); | 2310   __ bind(&got_smi_index_); | 
| 2321 | 2311 | 
| 2322   // Check for index out of range. | 2312   // Check for index out of range. | 
| 2323   __ LoadP(ip, FieldMemOperand(object_, String::kLengthOffset)); | 2313   __ LoadP(ip, FieldMemOperand(object_, String::kLengthOffset)); | 
| 2324   __ cmpl(ip, index_); | 2314   __ CmpLogicalP(ip, index_); | 
| 2325   __ ble(index_out_of_range_); | 2315   __ ble(index_out_of_range_); | 
| 2326 | 2316 | 
| 2327   __ SmiUntag(index_); | 2317   __ SmiUntag(index_); | 
| 2328 | 2318 | 
| 2329   StringCharLoadGenerator::Generate(masm, object_, index_, result_, | 2319   StringCharLoadGenerator::Generate(masm, object_, index_, result_, | 
| 2330                                     &call_runtime_); | 2320                                     &call_runtime_); | 
| 2331 | 2321 | 
| 2332   __ SmiTag(result_); | 2322   __ SmiTag(result_); | 
| 2333   __ bind(&exit_); | 2323   __ bind(&exit_); | 
| 2334 } | 2324 } | 
| 2335 | 2325 | 
| 2336 |  | 
| 2337 void StringCharCodeAtGenerator::GenerateSlow( | 2326 void StringCharCodeAtGenerator::GenerateSlow( | 
| 2338     MacroAssembler* masm, EmbedMode embed_mode, | 2327     MacroAssembler* masm, EmbedMode embed_mode, | 
| 2339     const RuntimeCallHelper& call_helper) { | 2328     const RuntimeCallHelper& call_helper) { | 
| 2340   __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase); | 2329   __ Abort(kUnexpectedFallthroughToCharCodeAtSlowCase); | 
| 2341 | 2330 | 
| 2342   // Index is not a smi. | 2331   // Index is not a smi. | 
| 2343   __ bind(&index_not_smi_); | 2332   __ bind(&index_not_smi_); | 
| 2344   // If index is a heap number, try converting it to an integer. | 2333   // If index is a heap number, try converting it to an integer. | 
| 2345   __ CheckMap(index_, result_, Heap::kHeapNumberMapRootIndex, index_not_number_, | 2334   __ CheckMap(index_, result_, Heap::kHeapNumberMapRootIndex, index_not_number_, | 
| 2346               DONT_DO_SMI_CHECK); | 2335               DONT_DO_SMI_CHECK); | 
| 2347   call_helper.BeforeCall(masm); | 2336   call_helper.BeforeCall(masm); | 
| 2348   if (embed_mode == PART_OF_IC_HANDLER) { | 2337   if (embed_mode == PART_OF_IC_HANDLER) { | 
| 2349     __ Push(LoadWithVectorDescriptor::VectorRegister(), | 2338     __ Push(LoadWithVectorDescriptor::VectorRegister(), | 
| 2350             LoadWithVectorDescriptor::SlotRegister(), object_, index_); | 2339             LoadWithVectorDescriptor::SlotRegister(), object_, index_); | 
| 2351   } else { | 2340   } else { | 
| 2352     // index_ is consumed by runtime conversion function. | 2341     // index_ is consumed by runtime conversion function. | 
| 2353     __ Push(object_, index_); | 2342     __ Push(object_, index_); | 
| 2354   } | 2343   } | 
| 2355   if (index_flags_ == STRING_INDEX_IS_NUMBER) { | 2344   if (index_flags_ == STRING_INDEX_IS_NUMBER) { | 
| 2356     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero); | 2345     __ CallRuntime(Runtime::kNumberToIntegerMapMinusZero); | 
| 2357   } else { | 2346   } else { | 
| 2358     DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); | 2347     DCHECK(index_flags_ == STRING_INDEX_IS_ARRAY_INDEX); | 
| 2359     // NumberToSmi discards numbers that are not exact integers. | 2348     // NumberToSmi discards numbers that are not exact integers. | 
| 2360     __ CallRuntime(Runtime::kNumberToSmi); | 2349     __ CallRuntime(Runtime::kNumberToSmi); | 
| 2361   } | 2350   } | 
| 2362   // Save the conversion result before the pop instructions below | 2351   // Save the conversion result before the pop instructions below | 
| 2363   // have a chance to overwrite it. | 2352   // have a chance to overwrite it. | 
| 2364   __ Move(index_, r3); | 2353   __ Move(index_, r2); | 
| 2365   if (embed_mode == PART_OF_IC_HANDLER) { | 2354   if (embed_mode == PART_OF_IC_HANDLER) { | 
| 2366     __ Pop(LoadWithVectorDescriptor::VectorRegister(), | 2355     __ Pop(LoadWithVectorDescriptor::VectorRegister(), | 
| 2367            LoadWithVectorDescriptor::SlotRegister(), object_); | 2356            LoadWithVectorDescriptor::SlotRegister(), object_); | 
| 2368   } else { | 2357   } else { | 
| 2369     __ pop(object_); | 2358     __ pop(object_); | 
| 2370   } | 2359   } | 
| 2371   // Reload the instance type. | 2360   // Reload the instance type. | 
| 2372   __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 2361   __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 
| 2373   __ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 2362   __ LoadlB(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 
| 2374   call_helper.AfterCall(masm); | 2363   call_helper.AfterCall(masm); | 
| 2375   // If index is still not a smi, it must be out of range. | 2364   // If index is still not a smi, it must be out of range. | 
| 2376   __ JumpIfNotSmi(index_, index_out_of_range_); | 2365   __ JumpIfNotSmi(index_, index_out_of_range_); | 
| 2377   // Otherwise, return to the fast path. | 2366   // Otherwise, return to the fast path. | 
| 2378   __ b(&got_smi_index_); | 2367   __ b(&got_smi_index_); | 
| 2379 | 2368 | 
| 2380   // Call runtime. We get here when the receiver is a string and the | 2369   // Call runtime. We get here when the receiver is a string and the | 
| 2381   // index is a number, but the code of getting the actual character | 2370   // index is a number, but the code of getting the actual character | 
| 2382   // is too complex (e.g., when the string needs to be flattened). | 2371   // is too complex (e.g., when the string needs to be flattened). | 
| 2383   __ bind(&call_runtime_); | 2372   __ bind(&call_runtime_); | 
| 2384   call_helper.BeforeCall(masm); | 2373   call_helper.BeforeCall(masm); | 
| 2385   __ SmiTag(index_); | 2374   __ SmiTag(index_); | 
| 2386   __ Push(object_, index_); | 2375   __ Push(object_, index_); | 
| 2387   __ CallRuntime(Runtime::kStringCharCodeAtRT); | 2376   __ CallRuntime(Runtime::kStringCharCodeAtRT); | 
| 2388   __ Move(result_, r3); | 2377   __ Move(result_, r2); | 
| 2389   call_helper.AfterCall(masm); | 2378   call_helper.AfterCall(masm); | 
| 2390   __ b(&exit_); | 2379   __ b(&exit_); | 
| 2391 | 2380 | 
| 2392   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); | 2381   __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); | 
| 2393 } | 2382 } | 
| 2394 | 2383 | 
| 2395 |  | 
| 2396 // ------------------------------------------------------------------------- | 2384 // ------------------------------------------------------------------------- | 
| 2397 // StringCharFromCodeGenerator | 2385 // StringCharFromCodeGenerator | 
| 2398 | 2386 | 
| 2399 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { | 2387 void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { | 
| 2400   // Fast case of Heap::LookupSingleCharacterStringFromCode. | 2388   // Fast case of Heap::LookupSingleCharacterStringFromCode. | 
| 2401   DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1)); | 2389   DCHECK(base::bits::IsPowerOfTwo32(String::kMaxOneByteCharCodeU + 1)); | 
| 2402   __ LoadSmiLiteral(r0, Smi::FromInt(~String::kMaxOneByteCharCodeU)); | 2390   __ LoadSmiLiteral(r0, Smi::FromInt(~String::kMaxOneByteCharCodeU)); | 
| 2403   __ ori(r0, r0, Operand(kSmiTagMask)); | 2391   __ OrP(r0, r0, Operand(kSmiTagMask)); | 
| 2404   __ and_(r0, code_, r0, SetRC); | 2392   __ AndP(r0, code_, r0); | 
| 2405   __ bne(&slow_case_, cr0); | 2393   __ bne(&slow_case_); | 
| 2406 | 2394 | 
| 2407   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); | 2395   __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); | 
| 2408   // At this point code register contains smi tagged one-byte char code. | 2396   // At this point code register contains smi tagged one-byte char code. | 
| 2409   __ mr(r0, code_); | 2397   __ LoadRR(r0, code_); | 
| 2410   __ SmiToPtrArrayOffset(code_, code_); | 2398   __ SmiToPtrArrayOffset(code_, code_); | 
| 2411   __ add(result_, result_, code_); | 2399   __ AddP(result_, code_); | 
| 2412   __ mr(code_, r0); | 2400   __ LoadRR(code_, r0); | 
| 2413   __ LoadP(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); | 2401   __ LoadP(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); | 
| 2414   __ CompareRoot(result_, Heap::kUndefinedValueRootIndex); | 2402   __ CompareRoot(result_, Heap::kUndefinedValueRootIndex); | 
| 2415   __ beq(&slow_case_); | 2403   __ beq(&slow_case_); | 
| 2416   __ bind(&exit_); | 2404   __ bind(&exit_); | 
| 2417 } | 2405 } | 
| 2418 | 2406 | 
| 2419 |  | 
| 2420 void StringCharFromCodeGenerator::GenerateSlow( | 2407 void StringCharFromCodeGenerator::GenerateSlow( | 
| 2421     MacroAssembler* masm, const RuntimeCallHelper& call_helper) { | 2408     MacroAssembler* masm, const RuntimeCallHelper& call_helper) { | 
| 2422   __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase); | 2409   __ Abort(kUnexpectedFallthroughToCharFromCodeSlowCase); | 
| 2423 | 2410 | 
| 2424   __ bind(&slow_case_); | 2411   __ bind(&slow_case_); | 
| 2425   call_helper.BeforeCall(masm); | 2412   call_helper.BeforeCall(masm); | 
| 2426   __ push(code_); | 2413   __ push(code_); | 
| 2427   __ CallRuntime(Runtime::kStringCharFromCode); | 2414   __ CallRuntime(Runtime::kStringCharFromCode); | 
| 2428   __ Move(result_, r3); | 2415   __ Move(result_, r2); | 
| 2429   call_helper.AfterCall(masm); | 2416   call_helper.AfterCall(masm); | 
| 2430   __ b(&exit_); | 2417   __ b(&exit_); | 
| 2431 | 2418 | 
| 2432   __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase); | 2419   __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase); | 
| 2433 } | 2420 } | 
| 2434 | 2421 | 
| 2435 | 2422 enum CopyCharactersFlags { COPY_ASCII = 1, DEST_ALWAYS_ALIGNED = 2 }; | 
| 2436 enum CopyCharactersFlags { COPY_ONE_BYTE = 1, DEST_ALWAYS_ALIGNED = 2 }; |  | 
| 2437 |  | 
| 2438 | 2423 | 
| 2439 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, Register dest, | 2424 void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, Register dest, | 
| 2440                                           Register src, Register count, | 2425                                           Register src, Register count, | 
| 2441                                           Register scratch, | 2426                                           Register scratch, | 
| 2442                                           String::Encoding encoding) { | 2427                                           String::Encoding encoding) { | 
| 2443   if (FLAG_debug_code) { | 2428   if (FLAG_debug_code) { | 
| 2444     // Check that destination is word aligned. | 2429     // Check that destination is word aligned. | 
| 2445     __ andi(r0, dest, Operand(kPointerAlignmentMask)); | 2430     __ mov(r0, Operand(kPointerAlignmentMask)); | 
|  | 2431     __ AndP(r0, dest); | 
| 2446     __ Check(eq, kDestinationOfCopyNotAligned, cr0); | 2432     __ Check(eq, kDestinationOfCopyNotAligned, cr0); | 
| 2447   } | 2433   } | 
| 2448 | 2434 | 
| 2449   // Nothing to do for zero characters. | 2435   // Nothing to do for zero characters. | 
| 2450   Label done; | 2436   Label done; | 
| 2451   if (encoding == String::TWO_BYTE_ENCODING) { | 2437   if (encoding == String::TWO_BYTE_ENCODING) { | 
| 2452     // double the length | 2438     // double the length | 
| 2453     __ add(count, count, count, LeaveOE, SetRC); | 2439     __ AddP(count, count, count); | 
| 2454     __ beq(&done, cr0); | 2440     __ beq(&done, Label::kNear); | 
| 2455   } else { | 2441   } else { | 
| 2456     __ cmpi(count, Operand::Zero()); | 2442     __ CmpP(count, Operand::Zero()); | 
| 2457     __ beq(&done); | 2443     __ beq(&done, Label::kNear); | 
| 2458   } | 2444   } | 
| 2459 | 2445 | 
| 2460   // Copy count bytes from src to dst. | 2446   // Copy count bytes from src to dst. | 
| 2461   Label byte_loop; | 2447   Label byte_loop; | 
| 2462   __ mtctr(count); | 2448   // TODO(joransiu): Convert into MVC loop | 
| 2463   __ bind(&byte_loop); | 2449   __ bind(&byte_loop); | 
| 2464   __ lbz(scratch, MemOperand(src)); | 2450   __ LoadlB(scratch, MemOperand(src)); | 
| 2465   __ addi(src, src, Operand(1)); | 2451   __ la(src, MemOperand(src, 1)); | 
| 2466   __ stb(scratch, MemOperand(dest)); | 2452   __ stc(scratch, MemOperand(dest)); | 
| 2467   __ addi(dest, dest, Operand(1)); | 2453   __ la(dest, MemOperand(dest, 1)); | 
| 2468   __ bdnz(&byte_loop); | 2454   __ BranchOnCount(count, &byte_loop); | 
| 2469 | 2455 | 
| 2470   __ bind(&done); | 2456   __ bind(&done); | 
| 2471 } | 2457 } | 
| 2472 | 2458 | 
| 2473 |  | 
| 2474 void SubStringStub::Generate(MacroAssembler* masm) { | 2459 void SubStringStub::Generate(MacroAssembler* masm) { | 
| 2475   Label runtime; | 2460   Label runtime; | 
| 2476 | 2461 | 
| 2477   // Stack frame on entry. | 2462   // Stack frame on entry. | 
| 2478   //  lr: return address | 2463   //  lr: return address | 
| 2479   //  sp[0]: to | 2464   //  sp[0]: to | 
| 2480   //  sp[4]: from | 2465   //  sp[4]: from | 
| 2481   //  sp[8]: string | 2466   //  sp[8]: string | 
| 2482 | 2467 | 
| 2483   // This stub is called from the native-call %_SubString(...), so | 2468   // This stub is called from the native-call %_SubString(...), so | 
| 2484   // nothing can be assumed about the arguments. It is tested that: | 2469   // nothing can be assumed about the arguments. It is tested that: | 
| 2485   //  "string" is a sequential string, | 2470   //  "string" is a sequential string, | 
| 2486   //  both "from" and "to" are smis, and | 2471   //  both "from" and "to" are smis, and | 
| 2487   //  0 <= from <= to <= string.length. | 2472   //  0 <= from <= to <= string.length. | 
| 2488   // If any of these assumptions fail, we call the runtime system. | 2473   // If any of these assumptions fail, we call the runtime system. | 
| 2489 | 2474 | 
| 2490   const int kToOffset = 0 * kPointerSize; | 2475   const int kToOffset = 0 * kPointerSize; | 
| 2491   const int kFromOffset = 1 * kPointerSize; | 2476   const int kFromOffset = 1 * kPointerSize; | 
| 2492   const int kStringOffset = 2 * kPointerSize; | 2477   const int kStringOffset = 2 * kPointerSize; | 
| 2493 | 2478 | 
| 2494   __ LoadP(r5, MemOperand(sp, kToOffset)); | 2479   __ LoadP(r4, MemOperand(sp, kToOffset)); | 
| 2495   __ LoadP(r6, MemOperand(sp, kFromOffset)); | 2480   __ LoadP(r5, MemOperand(sp, kFromOffset)); | 
| 2496 | 2481 | 
| 2497   // If either to or from had the smi tag bit set, then fail to generic runtime | 2482   // If either to or from had the smi tag bit set, then fail to generic runtime | 
|  | 2483   __ JumpIfNotSmi(r4, &runtime); | 
| 2498   __ JumpIfNotSmi(r5, &runtime); | 2484   __ JumpIfNotSmi(r5, &runtime); | 
| 2499   __ JumpIfNotSmi(r6, &runtime); | 2485   __ SmiUntag(r4); | 
| 2500   __ SmiUntag(r5); | 2486   __ SmiUntag(r5); | 
| 2501   __ SmiUntag(r6, SetRC); | 2487   // Both r4 and r5 are untagged integers. | 
| 2502   // Both r5 and r6 are untagged integers. |  | 
| 2503 | 2488 | 
| 2504   // We want to bailout to runtime here if From is negative. | 2489   // We want to bailout to runtime here if From is negative. | 
| 2505   __ blt(&runtime, cr0);  // From < 0. | 2490   __ blt(&runtime);  // From < 0. | 
| 2506 | 2491 | 
| 2507   __ cmpl(r6, r5); | 2492   __ CmpLogicalP(r5, r4); | 
| 2508   __ bgt(&runtime);  // Fail if from > to. | 2493   __ bgt(&runtime);  // Fail if from > to. | 
| 2509   __ sub(r5, r5, r6); | 2494   __ SubP(r4, r4, r5); | 
| 2510 | 2495 | 
| 2511   // Make sure first argument is a string. | 2496   // Make sure first argument is a string. | 
| 2512   __ LoadP(r3, MemOperand(sp, kStringOffset)); | 2497   __ LoadP(r2, MemOperand(sp, kStringOffset)); | 
| 2513   __ JumpIfSmi(r3, &runtime); | 2498   __ JumpIfSmi(r2, &runtime); | 
| 2514   Condition is_string = masm->IsObjectStringType(r3, r4); | 2499   Condition is_string = masm->IsObjectStringType(r2, r3); | 
| 2515   __ b(NegateCondition(is_string), &runtime, cr0); | 2500   __ b(NegateCondition(is_string), &runtime); | 
| 2516 | 2501 | 
| 2517   Label single_char; | 2502   Label single_char; | 
| 2518   __ cmpi(r5, Operand(1)); | 2503   __ CmpP(r4, Operand(1)); | 
| 2519   __ b(eq, &single_char); | 2504   __ b(eq, &single_char); | 
| 2520 | 2505 | 
| 2521   // Short-cut for the case of trivial substring. | 2506   // Short-cut for the case of trivial substring. | 
| 2522   Label return_r3; | 2507   Label return_r2; | 
| 2523   // r3: original string | 2508   // r2: original string | 
| 2524   // r5: result string length | 2509   // r4: result string length | 
| 2525   __ LoadP(r7, FieldMemOperand(r3, String::kLengthOffset)); | 2510   __ LoadP(r6, FieldMemOperand(r2, String::kLengthOffset)); | 
| 2526   __ SmiUntag(r0, r7); | 2511   __ SmiUntag(r0, r6); | 
| 2527   __ cmpl(r5, r0); | 2512   __ CmpLogicalP(r4, r0); | 
| 2528   // Return original string. | 2513   // Return original string. | 
| 2529   __ beq(&return_r3); | 2514   __ beq(&return_r2); | 
| 2530   // Longer than original string's length or negative: unsafe arguments. | 2515   // Longer than original string's length or negative: unsafe arguments. | 
| 2531   __ bgt(&runtime); | 2516   __ bgt(&runtime); | 
| 2532   // Shorter than original string's length: an actual substring. | 2517   // Shorter than original string's length: an actual substring. | 
| 2533 | 2518 | 
| 2534   // Deal with different string types: update the index if necessary | 2519   // Deal with different string types: update the index if necessary | 
| 2535   // and put the underlying string into r8. | 2520   // and put the underlying string into r7. | 
| 2536   // r3: original string | 2521   // r2: original string | 
| 2537   // r4: instance type | 2522   // r3: instance type | 
| 2538   // r5: length | 2523   // r4: length | 
| 2539   // r6: from index (untagged) | 2524   // r5: from index (untagged) | 
| 2540   Label underlying_unpacked, sliced_string, seq_or_external_string; | 2525   Label underlying_unpacked, sliced_string, seq_or_external_string; | 
| 2541   // If the string is not indirect, it can only be sequential or external. | 2526   // If the string is not indirect, it can only be sequential or external. | 
| 2542   STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); | 2527   STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); | 
| 2543   STATIC_ASSERT(kIsIndirectStringMask != 0); | 2528   STATIC_ASSERT(kIsIndirectStringMask != 0); | 
| 2544   __ andi(r0, r4, Operand(kIsIndirectStringMask)); | 2529   __ mov(r0, Operand(kIsIndirectStringMask)); | 
| 2545   __ beq(&seq_or_external_string, cr0); | 2530   __ AndP(r0, r3); | 
|  | 2531   __ beq(&seq_or_external_string); | 
| 2546 | 2532 | 
| 2547   __ andi(r0, r4, Operand(kSlicedNotConsMask)); | 2533   __ mov(r0, Operand(kSlicedNotConsMask)); | 
| 2548   __ bne(&sliced_string, cr0); | 2534   __ AndP(r0, r3); | 
|  | 2535   __ bne(&sliced_string); | 
| 2549   // Cons string.  Check whether it is flat, then fetch first part. | 2536   // Cons string.  Check whether it is flat, then fetch first part. | 
| 2550   __ LoadP(r8, FieldMemOperand(r3, ConsString::kSecondOffset)); | 2537   __ LoadP(r7, FieldMemOperand(r2, ConsString::kSecondOffset)); | 
| 2551   __ CompareRoot(r8, Heap::kempty_stringRootIndex); | 2538   __ CompareRoot(r7, Heap::kempty_stringRootIndex); | 
| 2552   __ bne(&runtime); | 2539   __ bne(&runtime); | 
| 2553   __ LoadP(r8, FieldMemOperand(r3, ConsString::kFirstOffset)); | 2540   __ LoadP(r7, FieldMemOperand(r2, ConsString::kFirstOffset)); | 
| 2554   // Update instance type. | 2541   // Update instance type. | 
| 2555   __ LoadP(r4, FieldMemOperand(r8, HeapObject::kMapOffset)); | 2542   __ LoadP(r3, FieldMemOperand(r7, HeapObject::kMapOffset)); | 
| 2556   __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | 2543   __ LoadlB(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset)); | 
| 2557   __ b(&underlying_unpacked); | 2544   __ b(&underlying_unpacked); | 
| 2558 | 2545 | 
| 2559   __ bind(&sliced_string); | 2546   __ bind(&sliced_string); | 
| 2560   // Sliced string.  Fetch parent and correct start index by offset. | 2547   // Sliced string.  Fetch parent and correct start index by offset. | 
| 2561   __ LoadP(r8, FieldMemOperand(r3, SlicedString::kParentOffset)); | 2548   __ LoadP(r7, FieldMemOperand(r2, SlicedString::kParentOffset)); | 
| 2562   __ LoadP(r7, FieldMemOperand(r3, SlicedString::kOffsetOffset)); | 2549   __ LoadP(r6, FieldMemOperand(r2, SlicedString::kOffsetOffset)); | 
| 2563   __ SmiUntag(r4, r7); | 2550   __ SmiUntag(r3, r6); | 
| 2564   __ add(r6, r6, r4);  // Add offset to index. | 2551   __ AddP(r5, r3);  // Add offset to index. | 
| 2565   // Update instance type. | 2552   // Update instance type. | 
| 2566   __ LoadP(r4, FieldMemOperand(r8, HeapObject::kMapOffset)); | 2553   __ LoadP(r3, FieldMemOperand(r7, HeapObject::kMapOffset)); | 
| 2567   __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | 2554   __ LoadlB(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset)); | 
| 2568   __ b(&underlying_unpacked); | 2555   __ b(&underlying_unpacked); | 
| 2569 | 2556 | 
| 2570   __ bind(&seq_or_external_string); | 2557   __ bind(&seq_or_external_string); | 
| 2571   // Sequential or external string.  Just move string to the expected register. | 2558   // Sequential or external string.  Just move string to the expected register. | 
| 2572   __ mr(r8, r3); | 2559   __ LoadRR(r7, r2); | 
| 2573 | 2560 | 
| 2574   __ bind(&underlying_unpacked); | 2561   __ bind(&underlying_unpacked); | 
| 2575 | 2562 | 
| 2576   if (FLAG_string_slices) { | 2563   if (FLAG_string_slices) { | 
| 2577     Label copy_routine; | 2564     Label copy_routine; | 
| 2578     // r8: underlying subject string | 2565     // r7: underlying subject string | 
| 2579     // r4: instance type of underlying subject string | 2566     // r3: instance type of underlying subject string | 
| 2580     // r5: length | 2567     // r4: length | 
| 2581     // r6: adjusted start index (untagged) | 2568     // r5: adjusted start index (untagged) | 
| 2582     __ cmpi(r5, Operand(SlicedString::kMinLength)); | 2569     __ CmpP(r4, Operand(SlicedString::kMinLength)); | 
| 2583     // Short slice.  Copy instead of slicing. | 2570     // Short slice.  Copy instead of slicing. | 
| 2584     __ blt(©_routine); | 2571     __ blt(©_routine); | 
| 2585     // Allocate new sliced string.  At this point we do not reload the instance | 2572     // Allocate new sliced string.  At this point we do not reload the instance | 
| 2586     // type including the string encoding because we simply rely on the info | 2573     // type including the string encoding because we simply rely on the info | 
| 2587     // provided by the original string.  It does not matter if the original | 2574     // provided by the original string.  It does not matter if the original | 
| 2588     // string's encoding is wrong because we always have to recheck encoding of | 2575     // string's encoding is wrong because we always have to recheck encoding of | 
| 2589     // the newly created string's parent anyways due to externalized strings. | 2576     // the newly created string's parent anyways due to externalized strings. | 
| 2590     Label two_byte_slice, set_slice_header; | 2577     Label two_byte_slice, set_slice_header; | 
| 2591     STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); | 2578     STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); | 
| 2592     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); | 2579     STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); | 
| 2593     __ andi(r0, r4, Operand(kStringEncodingMask)); | 2580     __ mov(r0, Operand(kStringEncodingMask)); | 
| 2594     __ beq(&two_byte_slice, cr0); | 2581     __ AndP(r0, r3); | 
| 2595     __ AllocateOneByteSlicedString(r3, r5, r9, r10, &runtime); | 2582     __ beq(&two_byte_slice); | 
|  | 2583     __ AllocateOneByteSlicedString(r2, r4, r8, r9, &runtime); | 
| 2596     __ b(&set_slice_header); | 2584     __ b(&set_slice_header); | 
| 2597     __ bind(&two_byte_slice); | 2585     __ bind(&two_byte_slice); | 
| 2598     __ AllocateTwoByteSlicedString(r3, r5, r9, r10, &runtime); | 2586     __ AllocateTwoByteSlicedString(r2, r4, r8, r9, &runtime); | 
| 2599     __ bind(&set_slice_header); | 2587     __ bind(&set_slice_header); | 
| 2600     __ SmiTag(r6); | 2588     __ SmiTag(r5); | 
| 2601     __ StoreP(r8, FieldMemOperand(r3, SlicedString::kParentOffset), r0); | 2589     __ StoreP(r7, FieldMemOperand(r2, SlicedString::kParentOffset)); | 
| 2602     __ StoreP(r6, FieldMemOperand(r3, SlicedString::kOffsetOffset), r0); | 2590     __ StoreP(r5, FieldMemOperand(r2, SlicedString::kOffsetOffset)); | 
| 2603     __ b(&return_r3); | 2591     __ b(&return_r2); | 
| 2604 | 2592 | 
| 2605     __ bind(©_routine); | 2593     __ bind(©_routine); | 
| 2606   } | 2594   } | 
| 2607 | 2595 | 
| 2608   // r8: underlying subject string | 2596   // r7: underlying subject string | 
| 2609   // r4: instance type of underlying subject string | 2597   // r3: instance type of underlying subject string | 
| 2610   // r5: length | 2598   // r4: length | 
| 2611   // r6: adjusted start index (untagged) | 2599   // r5: adjusted start index (untagged) | 
| 2612   Label two_byte_sequential, sequential_string, allocate_result; | 2600   Label two_byte_sequential, sequential_string, allocate_result; | 
| 2613   STATIC_ASSERT(kExternalStringTag != 0); | 2601   STATIC_ASSERT(kExternalStringTag != 0); | 
| 2614   STATIC_ASSERT(kSeqStringTag == 0); | 2602   STATIC_ASSERT(kSeqStringTag == 0); | 
| 2615   __ andi(r0, r4, Operand(kExternalStringTag)); | 2603   __ mov(r0, Operand(kExternalStringTag)); | 
| 2616   __ beq(&sequential_string, cr0); | 2604   __ AndP(r0, r3); | 
|  | 2605   __ beq(&sequential_string); | 
| 2617 | 2606 | 
| 2618   // Handle external string. | 2607   // Handle external string. | 
| 2619   // Rule out short external strings. | 2608   // Rule out short external strings. | 
| 2620   STATIC_ASSERT(kShortExternalStringTag != 0); | 2609   STATIC_ASSERT(kShortExternalStringTag != 0); | 
| 2621   __ andi(r0, r4, Operand(kShortExternalStringTag)); | 2610   __ mov(r0, Operand(kShortExternalStringTag)); | 
| 2622   __ bne(&runtime, cr0); | 2611   __ AndP(r0, r3); | 
| 2623   __ LoadP(r8, FieldMemOperand(r8, ExternalString::kResourceDataOffset)); | 2612   __ bne(&runtime); | 
| 2624   // r8 already points to the first character of underlying string. | 2613   __ LoadP(r7, FieldMemOperand(r7, ExternalString::kResourceDataOffset)); | 
|  | 2614   // r7 already points to the first character of underlying string. | 
| 2625   __ b(&allocate_result); | 2615   __ b(&allocate_result); | 
| 2626 | 2616 | 
| 2627   __ bind(&sequential_string); | 2617   __ bind(&sequential_string); | 
| 2628   // Locate first character of underlying subject string. | 2618   // Locate first character of underlying subject string. | 
| 2629   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); | 2619   STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); | 
| 2630   __ addi(r8, r8, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | 2620   __ AddP(r7, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | 
| 2631 | 2621 | 
| 2632   __ bind(&allocate_result); | 2622   __ bind(&allocate_result); | 
| 2633   // Sequential acii string.  Allocate the result. | 2623   // Sequential acii string.  Allocate the result. | 
| 2634   STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); | 2624   STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); | 
| 2635   __ andi(r0, r4, Operand(kStringEncodingMask)); | 2625   __ mov(r0, Operand(kStringEncodingMask)); | 
| 2636   __ beq(&two_byte_sequential, cr0); | 2626   __ AndP(r0, r3); | 
|  | 2627   __ beq(&two_byte_sequential); | 
| 2637 | 2628 | 
| 2638   // Allocate and copy the resulting one-byte string. | 2629   // Allocate and copy the resulting one-byte string. | 
| 2639   __ AllocateOneByteString(r3, r5, r7, r9, r10, &runtime); | 2630   __ AllocateOneByteString(r2, r4, r6, r8, r9, &runtime); | 
| 2640 | 2631 | 
| 2641   // Locate first character of substring to copy. | 2632   // Locate first character of substring to copy. | 
| 2642   __ add(r8, r8, r6); | 2633   __ AddP(r7, r5); | 
| 2643   // Locate first character of result. | 2634   // Locate first character of result. | 
| 2644   __ addi(r4, r3, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | 2635   __ AddP(r3, r2, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | 
| 2645 | 2636 | 
| 2646   // r3: result string | 2637   // r2: result string | 
| 2647   // r4: first character of result string | 2638   // r3: first character of result string | 
| 2648   // r5: result string length | 2639   // r4: result string length | 
| 2649   // r8: first character of substring to copy | 2640   // r7: first character of substring to copy | 
| 2650   STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 2641   STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 
| 2651   StringHelper::GenerateCopyCharacters(masm, r4, r8, r5, r6, | 2642   StringHelper::GenerateCopyCharacters(masm, r3, r7, r4, r5, | 
| 2652                                        String::ONE_BYTE_ENCODING); | 2643                                        String::ONE_BYTE_ENCODING); | 
| 2653   __ b(&return_r3); | 2644   __ b(&return_r2); | 
| 2654 | 2645 | 
| 2655   // Allocate and copy the resulting two-byte string. | 2646   // Allocate and copy the resulting two-byte string. | 
| 2656   __ bind(&two_byte_sequential); | 2647   __ bind(&two_byte_sequential); | 
| 2657   __ AllocateTwoByteString(r3, r5, r7, r9, r10, &runtime); | 2648   __ AllocateTwoByteString(r2, r4, r6, r8, r9, &runtime); | 
| 2658 | 2649 | 
| 2659   // Locate first character of substring to copy. | 2650   // Locate first character of substring to copy. | 
| 2660   __ ShiftLeftImm(r4, r6, Operand(1)); | 2651   __ ShiftLeftP(r3, r5, Operand(1)); | 
| 2661   __ add(r8, r8, r4); | 2652   __ AddP(r7, r3); | 
| 2662   // Locate first character of result. | 2653   // Locate first character of result. | 
| 2663   __ addi(r4, r3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | 2654   __ AddP(r3, r2, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | 
| 2664 | 2655 | 
| 2665   // r3: result string. | 2656   // r2: result string. | 
| 2666   // r4: first character of result. | 2657   // r3: first character of result. | 
| 2667   // r5: result length. | 2658   // r4: result length. | 
| 2668   // r8: first character of substring to copy. | 2659   // r7: first character of substring to copy. | 
| 2669   STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 2660   STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 
| 2670   StringHelper::GenerateCopyCharacters(masm, r4, r8, r5, r6, | 2661   StringHelper::GenerateCopyCharacters(masm, r3, r7, r4, r5, | 
| 2671                                        String::TWO_BYTE_ENCODING); | 2662                                        String::TWO_BYTE_ENCODING); | 
| 2672 | 2663 | 
| 2673   __ bind(&return_r3); | 2664   __ bind(&return_r2); | 
| 2674   Counters* counters = isolate()->counters(); | 2665   Counters* counters = isolate()->counters(); | 
| 2675   __ IncrementCounter(counters->sub_string_native(), 1, r6, r7); | 2666   __ IncrementCounter(counters->sub_string_native(), 1, r5, r6); | 
| 2676   __ Drop(3); | 2667   __ Drop(3); | 
| 2677   __ Ret(); | 2668   __ Ret(); | 
| 2678 | 2669 | 
| 2679   // Just jump to runtime to create the sub string. | 2670   // Just jump to runtime to create the sub string. | 
| 2680   __ bind(&runtime); | 2671   __ bind(&runtime); | 
| 2681   __ TailCallRuntime(Runtime::kSubString); | 2672   __ TailCallRuntime(Runtime::kSubString); | 
| 2682 | 2673 | 
| 2683   __ bind(&single_char); | 2674   __ bind(&single_char); | 
| 2684   // r3: original string | 2675   // r2: original string | 
| 2685   // r4: instance type | 2676   // r3: instance type | 
| 2686   // r5: length | 2677   // r4: length | 
| 2687   // r6: from index (untagged) | 2678   // r5: from index (untagged) | 
| 2688   __ SmiTag(r6, r6); | 2679   __ SmiTag(r5, r5); | 
| 2689   StringCharAtGenerator generator(r3, r6, r5, r3, &runtime, &runtime, &runtime, | 2680   StringCharAtGenerator generator(r2, r5, r4, r2, &runtime, &runtime, &runtime, | 
| 2690                                   STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING); | 2681                                   STRING_INDEX_IS_NUMBER, RECEIVER_IS_STRING); | 
| 2691   generator.GenerateFast(masm); | 2682   generator.GenerateFast(masm); | 
| 2692   __ Drop(3); | 2683   __ Drop(3); | 
| 2693   __ Ret(); | 2684   __ Ret(); | 
| 2694   generator.SkipSlow(masm, &runtime); | 2685   generator.SkipSlow(masm, &runtime); | 
| 2695 } | 2686 } | 
| 2696 | 2687 | 
| 2697 |  | 
| 2698 void ToNumberStub::Generate(MacroAssembler* masm) { | 2688 void ToNumberStub::Generate(MacroAssembler* masm) { | 
| 2699   // The ToNumber stub takes one argument in r3. | 2689   // The ToNumber stub takes one argument in r2. | 
| 2700   Label not_smi; | 2690   Label not_smi; | 
| 2701   __ JumpIfNotSmi(r3, ¬_smi); | 2691   __ JumpIfNotSmi(r2, ¬_smi); | 
| 2702   __ blr(); | 2692   __ b(r14); | 
| 2703   __ bind(¬_smi); | 2693   __ bind(¬_smi); | 
| 2704 | 2694 | 
| 2705   __ CompareObjectType(r3, r4, r4, HEAP_NUMBER_TYPE); | 2695   __ CompareObjectType(r2, r3, r3, HEAP_NUMBER_TYPE); | 
| 2706   // r3: receiver | 2696   // r2: receiver | 
| 2707   // r4: receiver instance type | 2697   // r3: receiver instance type | 
| 2708   __ Ret(eq); | 2698   Label not_heap_number; | 
|  | 2699   __ bne(¬_heap_number); | 
|  | 2700   __ Ret(); | 
|  | 2701   __ bind(¬_heap_number); | 
| 2709 | 2702 | 
| 2710   Label not_string, slow_string; | 2703   Label not_string, slow_string; | 
| 2711   __ cmpli(r4, Operand(FIRST_NONSTRING_TYPE)); | 2704   __ CmpLogicalP(r3, Operand(FIRST_NONSTRING_TYPE)); | 
| 2712   __ bge(¬_string); | 2705   __ bge(¬_string, Label::kNear); | 
| 2713   // Check if string has a cached array index. | 2706   // Check if string has a cached array index. | 
| 2714   __ lwz(r5, FieldMemOperand(r3, String::kHashFieldOffset)); | 2707   __ LoadlW(r4, FieldMemOperand(r2, String::kHashFieldOffset)); | 
| 2715   __ And(r0, r5, Operand(String::kContainsCachedArrayIndexMask), SetRC); | 2708   __ AndP(r0, r4, Operand(String::kContainsCachedArrayIndexMask)); | 
| 2716   __ bne(&slow_string, cr0); | 2709   __ bne(&slow_string, Label::kNear); | 
| 2717   __ IndexFromHash(r5, r3); | 2710   __ IndexFromHash(r4, r2); | 
| 2718   __ blr(); | 2711   __ b(r14); | 
| 2719   __ bind(&slow_string); | 2712   __ bind(&slow_string); | 
| 2720   __ push(r3);  // Push argument. | 2713   __ push(r2);  // Push argument. | 
| 2721   __ TailCallRuntime(Runtime::kStringToNumber); | 2714   __ TailCallRuntime(Runtime::kStringToNumber); | 
| 2722   __ bind(¬_string); | 2715   __ bind(¬_string); | 
| 2723 | 2716 | 
| 2724   Label not_oddball; | 2717   Label not_oddball; | 
| 2725   __ cmpi(r4, Operand(ODDBALL_TYPE)); | 2718   __ CmpP(r3, Operand(ODDBALL_TYPE)); | 
| 2726   __ bne(¬_oddball); | 2719   __ bne(¬_oddball, Label::kNear); | 
| 2727   __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset)); | 2720   __ LoadP(r2, FieldMemOperand(r2, Oddball::kToNumberOffset)); | 
| 2728   __ blr(); | 2721   __ b(r14); | 
| 2729   __ bind(¬_oddball); | 2722   __ bind(¬_oddball); | 
| 2730 | 2723 | 
| 2731   __ push(r3);  // Push argument. | 2724   __ push(r2);  // Push argument. | 
| 2732   __ TailCallRuntime(Runtime::kToNumber); | 2725   __ TailCallRuntime(Runtime::kToNumber); | 
| 2733 } | 2726 } | 
| 2734 | 2727 | 
| 2735 |  | 
| 2736 void ToLengthStub::Generate(MacroAssembler* masm) { | 2728 void ToLengthStub::Generate(MacroAssembler* masm) { | 
| 2737   // The ToLength stub takes one argument in r3. | 2729   // The ToLength stub takes one argument in r2. | 
| 2738   Label not_smi; | 2730   Label not_smi; | 
| 2739   __ JumpIfNotSmi(r3, ¬_smi); | 2731   __ JumpIfNotSmi(r2, ¬_smi); | 
| 2740   STATIC_ASSERT(kSmiTag == 0); | 2732   STATIC_ASSERT(kSmiTag == 0); | 
| 2741   __ cmpi(r3, Operand::Zero()); | 2733   __ CmpP(r2, Operand::Zero()); | 
| 2742   if (CpuFeatures::IsSupported(ISELECT)) { | 2734   Label positive; | 
| 2743     __ isel(lt, r3, r0, r3); | 2735   __ bgt(&positive); | 
| 2744   } else { | 2736   __ LoadImmP(r2, Operand::Zero()); | 
| 2745     Label positive; | 2737   __ bind(&positive); | 
| 2746     __ bgt(&positive); |  | 
| 2747     __ li(r3, Operand::Zero()); |  | 
| 2748     __ bind(&positive); |  | 
| 2749   } |  | 
| 2750   __ Ret(); | 2738   __ Ret(); | 
| 2751   __ bind(¬_smi); | 2739   __ bind(¬_smi); | 
| 2752 | 2740 | 
| 2753   __ push(r3);  // Push argument. | 2741   __ push(r2);  // Push argument. | 
| 2754   __ TailCallRuntime(Runtime::kToLength); | 2742   __ TailCallRuntime(Runtime::kToLength); | 
| 2755 } | 2743 } | 
| 2756 | 2744 | 
|  | 2745 void ToStringStub::Generate(MacroAssembler* masm) { | 
|  | 2746   // The ToString stub takes one argument in r2. | 
|  | 2747   Label done; | 
|  | 2748   Label is_number; | 
|  | 2749   __ JumpIfSmi(r2, &is_number); | 
| 2757 | 2750 | 
| 2758 void ToStringStub::Generate(MacroAssembler* masm) { | 2751   __ CompareObjectType(r2, r3, r3, FIRST_NONSTRING_TYPE); | 
| 2759   // The ToString stub takes one argument in r3. | 2752   // r2: receiver | 
| 2760   Label is_number; | 2753   // r3: receiver instance type | 
| 2761   __ JumpIfSmi(r3, &is_number); | 2754   __ blt(&done); | 
| 2762 |  | 
| 2763   __ CompareObjectType(r3, r4, r4, FIRST_NONSTRING_TYPE); |  | 
| 2764   // r3: receiver |  | 
| 2765   // r4: receiver instance type |  | 
| 2766   __ Ret(lt); |  | 
| 2767 | 2755 | 
| 2768   Label not_heap_number; | 2756   Label not_heap_number; | 
| 2769   __ cmpi(r4, Operand(HEAP_NUMBER_TYPE)); | 2757   __ CmpP(r3, Operand(HEAP_NUMBER_TYPE)); | 
| 2770   __ bne(¬_heap_number); | 2758   __ bne(¬_heap_number); | 
| 2771   __ bind(&is_number); | 2759   __ bind(&is_number); | 
| 2772   NumberToStringStub stub(isolate()); | 2760   NumberToStringStub stub(isolate()); | 
| 2773   __ TailCallStub(&stub); | 2761   __ TailCallStub(&stub); | 
| 2774   __ bind(¬_heap_number); | 2762   __ bind(¬_heap_number); | 
| 2775 | 2763 | 
| 2776   Label not_oddball; | 2764   Label not_oddball; | 
| 2777   __ cmpi(r4, Operand(ODDBALL_TYPE)); | 2765   __ CmpP(r3, Operand(ODDBALL_TYPE)); | 
| 2778   __ bne(¬_oddball); | 2766   __ bne(¬_oddball); | 
| 2779   __ LoadP(r3, FieldMemOperand(r3, Oddball::kToStringOffset)); | 2767   __ LoadP(r2, FieldMemOperand(r2, Oddball::kToStringOffset)); | 
| 2780   __ Ret(); | 2768   __ Ret(); | 
| 2781   __ bind(¬_oddball); | 2769   __ bind(¬_oddball); | 
| 2782 | 2770 | 
| 2783   __ push(r3);  // Push argument. | 2771   __ push(r2);  // Push argument. | 
| 2784   __ TailCallRuntime(Runtime::kToString); | 2772   __ TailCallRuntime(Runtime::kToString); | 
|  | 2773 | 
|  | 2774   __ bind(&done); | 
|  | 2775   __ Ret(); | 
| 2785 } | 2776 } | 
| 2786 | 2777 | 
| 2787 |  | 
| 2788 void ToNameStub::Generate(MacroAssembler* masm) { | 2778 void ToNameStub::Generate(MacroAssembler* masm) { | 
| 2789   // The ToName stub takes one argument in r3. | 2779   // The ToName stub takes one argument in r2. | 
| 2790   Label is_number; | 2780   Label is_number; | 
| 2791   __ JumpIfSmi(r3, &is_number); | 2781   __ JumpIfSmi(r2, &is_number); | 
| 2792 | 2782 | 
| 2793   STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE); | 2783   STATIC_ASSERT(FIRST_NAME_TYPE == FIRST_TYPE); | 
| 2794   __ CompareObjectType(r3, r4, r4, LAST_NAME_TYPE); | 2784   __ CompareObjectType(r2, r3, r3, LAST_NAME_TYPE); | 
| 2795   // r3: receiver | 2785   // r2: receiver | 
| 2796   // r4: receiver instance type | 2786   // r3: receiver instance type | 
| 2797   __ Ret(le); | 2787   __ Ret(le); | 
| 2798 | 2788 | 
| 2799   Label not_heap_number; | 2789   Label not_heap_number; | 
| 2800   __ cmpi(r4, Operand(HEAP_NUMBER_TYPE)); | 2790   __ CmpP(r3, Operand(HEAP_NUMBER_TYPE)); | 
| 2801   __ bne(¬_heap_number); | 2791   __ bne(¬_heap_number); | 
| 2802   __ bind(&is_number); | 2792   __ bind(&is_number); | 
| 2803   NumberToStringStub stub(isolate()); | 2793   NumberToStringStub stub(isolate()); | 
| 2804   __ TailCallStub(&stub); | 2794   __ TailCallStub(&stub); | 
| 2805   __ bind(¬_heap_number); | 2795   __ bind(¬_heap_number); | 
| 2806 | 2796 | 
| 2807   Label not_oddball; | 2797   Label not_oddball; | 
| 2808   __ cmpi(r4, Operand(ODDBALL_TYPE)); | 2798   __ CmpP(r3, Operand(ODDBALL_TYPE)); | 
| 2809   __ bne(¬_oddball); | 2799   __ bne(¬_oddball); | 
| 2810   __ LoadP(r3, FieldMemOperand(r3, Oddball::kToStringOffset)); | 2800   __ LoadP(r2, FieldMemOperand(r2, Oddball::kToStringOffset)); | 
| 2811   __ Ret(); | 2801   __ Ret(); | 
| 2812   __ bind(¬_oddball); | 2802   __ bind(¬_oddball); | 
| 2813 | 2803 | 
| 2814   __ push(r3);  // Push argument. | 2804   __ push(r2);  // Push argument. | 
| 2815   __ TailCallRuntime(Runtime::kToName); | 2805   __ TailCallRuntime(Runtime::kToName); | 
| 2816 } | 2806 } | 
| 2817 | 2807 | 
| 2818 |  | 
| 2819 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm, | 2808 void StringHelper::GenerateFlatOneByteStringEquals(MacroAssembler* masm, | 
| 2820                                                    Register left, | 2809                                                    Register left, | 
| 2821                                                    Register right, | 2810                                                    Register right, | 
| 2822                                                    Register scratch1, | 2811                                                    Register scratch1, | 
| 2823                                                    Register scratch2) { | 2812                                                    Register scratch2) { | 
| 2824   Register length = scratch1; | 2813   Register length = scratch1; | 
| 2825 | 2814 | 
| 2826   // Compare lengths. | 2815   // Compare lengths. | 
| 2827   Label strings_not_equal, check_zero_length; | 2816   Label strings_not_equal, check_zero_length; | 
| 2828   __ LoadP(length, FieldMemOperand(left, String::kLengthOffset)); | 2817   __ LoadP(length, FieldMemOperand(left, String::kLengthOffset)); | 
| 2829   __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset)); | 2818   __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset)); | 
| 2830   __ cmp(length, scratch2); | 2819   __ CmpP(length, scratch2); | 
| 2831   __ beq(&check_zero_length); | 2820   __ beq(&check_zero_length); | 
| 2832   __ bind(&strings_not_equal); | 2821   __ bind(&strings_not_equal); | 
| 2833   __ LoadSmiLiteral(r3, Smi::FromInt(NOT_EQUAL)); | 2822   __ LoadSmiLiteral(r2, Smi::FromInt(NOT_EQUAL)); | 
| 2834   __ Ret(); | 2823   __ Ret(); | 
| 2835 | 2824 | 
| 2836   // Check if the length is zero. | 2825   // Check if the length is zero. | 
| 2837   Label compare_chars; | 2826   Label compare_chars; | 
| 2838   __ bind(&check_zero_length); | 2827   __ bind(&check_zero_length); | 
| 2839   STATIC_ASSERT(kSmiTag == 0); | 2828   STATIC_ASSERT(kSmiTag == 0); | 
| 2840   __ cmpi(length, Operand::Zero()); | 2829   __ CmpP(length, Operand::Zero()); | 
| 2841   __ bne(&compare_chars); | 2830   __ bne(&compare_chars); | 
| 2842   __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL)); | 2831   __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL)); | 
| 2843   __ Ret(); | 2832   __ Ret(); | 
| 2844 | 2833 | 
| 2845   // Compare characters. | 2834   // Compare characters. | 
| 2846   __ bind(&compare_chars); | 2835   __ bind(&compare_chars); | 
| 2847   GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, | 2836   GenerateOneByteCharsCompareLoop(masm, left, right, length, scratch2, | 
| 2848                                   &strings_not_equal); | 2837                                   &strings_not_equal); | 
| 2849 | 2838 | 
| 2850   // Characters are equal. | 2839   // Characters are equal. | 
| 2851   __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL)); | 2840   __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL)); | 
| 2852   __ Ret(); | 2841   __ Ret(); | 
| 2853 } | 2842 } | 
| 2854 | 2843 | 
| 2855 |  | 
| 2856 void StringHelper::GenerateCompareFlatOneByteStrings( | 2844 void StringHelper::GenerateCompareFlatOneByteStrings( | 
| 2857     MacroAssembler* masm, Register left, Register right, Register scratch1, | 2845     MacroAssembler* masm, Register left, Register right, Register scratch1, | 
| 2858     Register scratch2, Register scratch3) { | 2846     Register scratch2, Register scratch3) { | 
| 2859   Label result_not_equal, compare_lengths; | 2847   Label skip, result_not_equal, compare_lengths; | 
| 2860   // Find minimum length and length difference. | 2848   // Find minimum length and length difference. | 
| 2861   __ LoadP(scratch1, FieldMemOperand(left, String::kLengthOffset)); | 2849   __ LoadP(scratch1, FieldMemOperand(left, String::kLengthOffset)); | 
| 2862   __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset)); | 2850   __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset)); | 
| 2863   __ sub(scratch3, scratch1, scratch2, LeaveOE, SetRC); | 2851   __ SubP(scratch3, scratch1, scratch2 /*, LeaveOE, SetRC*/); | 
|  | 2852   // Removing RC looks okay here. | 
| 2864   Register length_delta = scratch3; | 2853   Register length_delta = scratch3; | 
| 2865   if (CpuFeatures::IsSupported(ISELECT)) { | 2854   __ ble(&skip, Label::kNear); | 
| 2866     __ isel(gt, scratch1, scratch2, scratch1, cr0); | 2855   __ LoadRR(scratch1, scratch2); | 
| 2867   } else { | 2856   __ bind(&skip); | 
| 2868     Label skip; |  | 
| 2869     __ ble(&skip, cr0); |  | 
| 2870     __ mr(scratch1, scratch2); |  | 
| 2871     __ bind(&skip); |  | 
| 2872   } |  | 
| 2873   Register min_length = scratch1; | 2857   Register min_length = scratch1; | 
| 2874   STATIC_ASSERT(kSmiTag == 0); | 2858   STATIC_ASSERT(kSmiTag == 0); | 
| 2875   __ cmpi(min_length, Operand::Zero()); | 2859   __ CmpP(min_length, Operand::Zero()); | 
| 2876   __ beq(&compare_lengths); | 2860   __ beq(&compare_lengths); | 
| 2877 | 2861 | 
| 2878   // Compare loop. | 2862   // Compare loop. | 
| 2879   GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2, | 2863   GenerateOneByteCharsCompareLoop(masm, left, right, min_length, scratch2, | 
| 2880                                   &result_not_equal); | 2864                                   &result_not_equal); | 
| 2881 | 2865 | 
| 2882   // Compare lengths - strings up to min-length are equal. | 2866   // Compare lengths - strings up to min-length are equal. | 
| 2883   __ bind(&compare_lengths); | 2867   __ bind(&compare_lengths); | 
| 2884   DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); | 2868   DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); | 
| 2885   // Use length_delta as result if it's zero. | 2869   // Use length_delta as result if it's zero. | 
| 2886   __ mr(r3, length_delta); | 2870   __ LoadRR(r2, length_delta); | 
| 2887   __ cmpi(r3, Operand::Zero()); | 2871   __ CmpP(length_delta, Operand::Zero()); | 
| 2888   __ bind(&result_not_equal); | 2872   __ bind(&result_not_equal); | 
| 2889   // Conditionally update the result based either on length_delta or | 2873   // Conditionally update the result based either on length_delta or | 
| 2890   // the last comparion performed in the loop above. | 2874   // the last comparion performed in the loop above. | 
| 2891   if (CpuFeatures::IsSupported(ISELECT)) { | 2875   Label less_equal, equal; | 
| 2892     __ LoadSmiLiteral(r4, Smi::FromInt(GREATER)); | 2876   __ ble(&less_equal); | 
| 2893     __ LoadSmiLiteral(r5, Smi::FromInt(LESS)); | 2877   __ LoadSmiLiteral(r2, Smi::FromInt(GREATER)); | 
| 2894     __ isel(eq, r3, r0, r4); | 2878   __ Ret(); | 
| 2895     __ isel(lt, r3, r5, r3); | 2879   __ bind(&less_equal); | 
| 2896     __ Ret(); | 2880   __ beq(&equal); | 
| 2897   } else { | 2881   __ LoadSmiLiteral(r2, Smi::FromInt(LESS)); | 
| 2898     Label less_equal, equal; | 2882   __ bind(&equal); | 
| 2899     __ ble(&less_equal); | 2883   __ Ret(); | 
| 2900     __ LoadSmiLiteral(r3, Smi::FromInt(GREATER)); |  | 
| 2901     __ Ret(); |  | 
| 2902     __ bind(&less_equal); |  | 
| 2903     __ beq(&equal); |  | 
| 2904     __ LoadSmiLiteral(r3, Smi::FromInt(LESS)); |  | 
| 2905     __ bind(&equal); |  | 
| 2906     __ Ret(); |  | 
| 2907   } |  | 
| 2908 } | 2884 } | 
| 2909 | 2885 | 
| 2910 |  | 
| 2911 void StringHelper::GenerateOneByteCharsCompareLoop( | 2886 void StringHelper::GenerateOneByteCharsCompareLoop( | 
| 2912     MacroAssembler* masm, Register left, Register right, Register length, | 2887     MacroAssembler* masm, Register left, Register right, Register length, | 
| 2913     Register scratch1, Label* chars_not_equal) { | 2888     Register scratch1, Label* chars_not_equal) { | 
| 2914   // Change index to run from -length to -1 by adding length to string | 2889   // Change index to run from -length to -1 by adding length to string | 
| 2915   // start. This means that loop ends when index reaches zero, which | 2890   // start. This means that loop ends when index reaches zero, which | 
| 2916   // doesn't need an additional compare. | 2891   // doesn't need an additional compare. | 
| 2917   __ SmiUntag(length); | 2892   __ SmiUntag(length); | 
| 2918   __ addi(scratch1, length, | 2893   __ AddP(scratch1, length, | 
| 2919           Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | 2894           Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | 
| 2920   __ add(left, left, scratch1); | 2895   __ AddP(left, scratch1); | 
| 2921   __ add(right, right, scratch1); | 2896   __ AddP(right, scratch1); | 
| 2922   __ subfic(length, length, Operand::Zero()); | 2897   __ LoadComplementRR(length, length); | 
| 2923   Register index = length;  // index = -length; | 2898   Register index = length;  // index = -length; | 
| 2924 | 2899 | 
| 2925   // Compare loop. | 2900   // Compare loop. | 
| 2926   Label loop; | 2901   Label loop; | 
| 2927   __ bind(&loop); | 2902   __ bind(&loop); | 
| 2928   __ lbzx(scratch1, MemOperand(left, index)); | 2903   __ LoadlB(scratch1, MemOperand(left, index)); | 
| 2929   __ lbzx(r0, MemOperand(right, index)); | 2904   __ LoadlB(r0, MemOperand(right, index)); | 
| 2930   __ cmp(scratch1, r0); | 2905   __ CmpP(scratch1, r0); | 
| 2931   __ bne(chars_not_equal); | 2906   __ bne(chars_not_equal); | 
| 2932   __ addi(index, index, Operand(1)); | 2907   __ AddP(index, Operand(1)); | 
| 2933   __ cmpi(index, Operand::Zero()); | 2908   __ CmpP(index, Operand::Zero()); | 
| 2934   __ bne(&loop); | 2909   __ bne(&loop); | 
| 2935 } | 2910 } | 
| 2936 | 2911 | 
| 2937 |  | 
| 2938 void StringCompareStub::Generate(MacroAssembler* masm) { | 2912 void StringCompareStub::Generate(MacroAssembler* masm) { | 
| 2939   // ----------- S t a t e ------------- | 2913   // ----------- S t a t e ------------- | 
| 2940   //  -- r4    : left | 2914   //  -- r3    : left | 
| 2941   //  -- r3    : right | 2915   //  -- r2    : right | 
| 2942   //  -- lr    : return address | 2916   //  -- r14   : return address | 
| 2943   // ----------------------------------- | 2917   // ----------------------------------- | 
| 2944   __ AssertString(r4); |  | 
| 2945   __ AssertString(r3); | 2918   __ AssertString(r3); | 
|  | 2919   __ AssertString(r2); | 
| 2946 | 2920 | 
| 2947   Label not_same; | 2921   Label not_same; | 
| 2948   __ cmp(r3, r4); | 2922   __ CmpP(r2, r3); | 
| 2949   __ bne(¬_same); | 2923   __ bne(¬_same); | 
| 2950   __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL)); | 2924   __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL)); | 
| 2951   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r4, | 2925   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r3, | 
| 2952                       r5); | 2926                       r4); | 
| 2953   __ Ret(); | 2927   __ Ret(); | 
| 2954 | 2928 | 
| 2955   __ bind(¬_same); | 2929   __ bind(¬_same); | 
| 2956 | 2930 | 
| 2957   // Check that both objects are sequential one-byte strings. | 2931   // Check that both objects are sequential one-byte strings. | 
| 2958   Label runtime; | 2932   Label runtime; | 
| 2959   __ JumpIfNotBothSequentialOneByteStrings(r4, r3, r5, r6, &runtime); | 2933   __ JumpIfNotBothSequentialOneByteStrings(r3, r2, r4, r5, &runtime); | 
| 2960 | 2934 | 
| 2961   // Compare flat one-byte strings natively. | 2935   // Compare flat one-byte strings natively. | 
| 2962   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r5, | 2936   __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r4, | 
| 2963                       r6); | 2937                       r5); | 
| 2964   StringHelper::GenerateCompareFlatOneByteStrings(masm, r4, r3, r5, r6, r7); | 2938   StringHelper::GenerateCompareFlatOneByteStrings(masm, r3, r2, r4, r5, r6); | 
| 2965 | 2939 | 
| 2966   // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) | 2940   // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) | 
| 2967   // tagged as a small integer. | 2941   // tagged as a small integer. | 
| 2968   __ bind(&runtime); | 2942   __ bind(&runtime); | 
| 2969   __ Push(r4, r3); | 2943   __ Push(r3, r2); | 
| 2970   __ TailCallRuntime(Runtime::kStringCompare); | 2944   __ TailCallRuntime(Runtime::kStringCompare); | 
| 2971 } | 2945 } | 
| 2972 | 2946 | 
| 2973 |  | 
| 2974 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { | 2947 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { | 
| 2975   // ----------- S t a t e ------------- | 2948   // ----------- S t a t e ------------- | 
| 2976   //  -- r4    : left | 2949   //  -- r3    : left | 
| 2977   //  -- r3    : right | 2950   //  -- r2    : right | 
| 2978   //  -- lr    : return address | 2951   // r3: second string | 
| 2979   // ----------------------------------- | 2952   // ----------------------------------- | 
| 2980 | 2953 | 
| 2981   // Load r5 with the allocation site.  We stick an undefined dummy value here | 2954   // Load r4 with the allocation site.  We stick an undefined dummy value here | 
| 2982   // and replace it with the real allocation site later when we instantiate this | 2955   // and replace it with the real allocation site later when we instantiate this | 
| 2983   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate(). | 2956   // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate(). | 
| 2984   __ Move(r5, handle(isolate()->heap()->undefined_value())); | 2957   __ Move(r4, handle(isolate()->heap()->undefined_value())); | 
| 2985 | 2958 | 
| 2986   // Make sure that we actually patched the allocation site. | 2959   // Make sure that we actually patched the allocation site. | 
| 2987   if (FLAG_debug_code) { | 2960   if (FLAG_debug_code) { | 
| 2988     __ TestIfSmi(r5, r0); | 2961     __ TestIfSmi(r4); | 
| 2989     __ Assert(ne, kExpectedAllocationSite, cr0); | 2962     __ Assert(ne, kExpectedAllocationSite, cr0); | 
| 2990     __ push(r5); | 2963     __ push(r4); | 
| 2991     __ LoadP(r5, FieldMemOperand(r5, HeapObject::kMapOffset)); | 2964     __ LoadP(r4, FieldMemOperand(r4, HeapObject::kMapOffset)); | 
| 2992     __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex); | 2965     __ CompareRoot(r4, Heap::kAllocationSiteMapRootIndex); | 
| 2993     __ cmp(r5, ip); | 2966     __ pop(r4); | 
| 2994     __ pop(r5); |  | 
| 2995     __ Assert(eq, kExpectedAllocationSite); | 2967     __ Assert(eq, kExpectedAllocationSite); | 
| 2996   } | 2968   } | 
| 2997 | 2969 | 
| 2998   // Tail call into the stub that handles binary operations with allocation | 2970   // Tail call into the stub that handles binary operations with allocation | 
| 2999   // sites. | 2971   // sites. | 
| 3000   BinaryOpWithAllocationSiteStub stub(isolate(), state()); | 2972   BinaryOpWithAllocationSiteStub stub(isolate(), state()); | 
| 3001   __ TailCallStub(&stub); | 2973   __ TailCallStub(&stub); | 
| 3002 } | 2974 } | 
| 3003 | 2975 | 
| 3004 |  | 
| 3005 void CompareICStub::GenerateBooleans(MacroAssembler* masm) { | 2976 void CompareICStub::GenerateBooleans(MacroAssembler* masm) { | 
| 3006   DCHECK_EQ(CompareICState::BOOLEAN, state()); | 2977   DCHECK_EQ(CompareICState::BOOLEAN, state()); | 
| 3007   Label miss; | 2978   Label miss; | 
| 3008 | 2979 | 
| 3009   __ CheckMap(r4, r5, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK); | 2980   __ CheckMap(r3, r4, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK); | 
| 3010   __ CheckMap(r3, r6, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK); | 2981   __ CheckMap(r2, r5, Heap::kBooleanMapRootIndex, &miss, DO_SMI_CHECK); | 
| 3011   if (!Token::IsEqualityOp(op())) { | 2982   if (!Token::IsEqualityOp(op())) { | 
| 3012     __ LoadP(r4, FieldMemOperand(r4, Oddball::kToNumberOffset)); |  | 
| 3013     __ AssertSmi(r4); |  | 
| 3014     __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset)); | 2983     __ LoadP(r3, FieldMemOperand(r3, Oddball::kToNumberOffset)); | 
| 3015     __ AssertSmi(r3); | 2984     __ AssertSmi(r3); | 
|  | 2985     __ LoadP(r2, FieldMemOperand(r2, Oddball::kToNumberOffset)); | 
|  | 2986     __ AssertSmi(r2); | 
| 3016   } | 2987   } | 
| 3017   __ sub(r3, r4, r3); | 2988   __ SubP(r2, r3, r2); | 
| 3018   __ Ret(); | 2989   __ Ret(); | 
| 3019 | 2990 | 
| 3020   __ bind(&miss); | 2991   __ bind(&miss); | 
| 3021   GenerateMiss(masm); | 2992   GenerateMiss(masm); | 
| 3022 } | 2993 } | 
| 3023 | 2994 | 
| 3024 |  | 
| 3025 void CompareICStub::GenerateSmis(MacroAssembler* masm) { | 2995 void CompareICStub::GenerateSmis(MacroAssembler* masm) { | 
| 3026   DCHECK(state() == CompareICState::SMI); | 2996   DCHECK(state() == CompareICState::SMI); | 
| 3027   Label miss; | 2997   Label miss; | 
| 3028   __ orx(r5, r4, r3); | 2998   __ OrP(r4, r3, r2); | 
| 3029   __ JumpIfNotSmi(r5, &miss); | 2999   __ JumpIfNotSmi(r4, &miss); | 
| 3030 | 3000 | 
| 3031   if (GetCondition() == eq) { | 3001   if (GetCondition() == eq) { | 
| 3032     // For equality we do not care about the sign of the result. | 3002     // For equality we do not care about the sign of the result. | 
| 3033     // __ sub(r3, r3, r4, SetCC); | 3003     // __ sub(r2, r2, r3, SetCC); | 
| 3034     __ sub(r3, r3, r4); | 3004     __ SubP(r2, r2, r3); | 
| 3035   } else { | 3005   } else { | 
| 3036     // Untag before subtracting to avoid handling overflow. | 3006     // Untag before subtracting to avoid handling overflow. | 
| 3037     __ SmiUntag(r4); |  | 
| 3038     __ SmiUntag(r3); | 3007     __ SmiUntag(r3); | 
| 3039     __ sub(r3, r4, r3); | 3008     __ SmiUntag(r2); | 
|  | 3009     __ SubP(r2, r3, r2); | 
| 3040   } | 3010   } | 
| 3041   __ Ret(); | 3011   __ Ret(); | 
| 3042 | 3012 | 
| 3043   __ bind(&miss); | 3013   __ bind(&miss); | 
| 3044   GenerateMiss(masm); | 3014   GenerateMiss(masm); | 
| 3045 } | 3015 } | 
| 3046 | 3016 | 
| 3047 |  | 
| 3048 void CompareICStub::GenerateNumbers(MacroAssembler* masm) { | 3017 void CompareICStub::GenerateNumbers(MacroAssembler* masm) { | 
| 3049   DCHECK(state() == CompareICState::NUMBER); | 3018   DCHECK(state() == CompareICState::NUMBER); | 
| 3050 | 3019 | 
| 3051   Label generic_stub; | 3020   Label generic_stub; | 
| 3052   Label unordered, maybe_undefined1, maybe_undefined2; | 3021   Label unordered, maybe_undefined1, maybe_undefined2; | 
| 3053   Label miss; | 3022   Label miss; | 
| 3054   Label equal, less_than; | 3023   Label equal, less_than; | 
| 3055 | 3024 | 
| 3056   if (left() == CompareICState::SMI) { | 3025   if (left() == CompareICState::SMI) { | 
| 3057     __ JumpIfNotSmi(r4, &miss); | 3026     __ JumpIfNotSmi(r3, &miss); | 
| 3058   } | 3027   } | 
| 3059   if (right() == CompareICState::SMI) { | 3028   if (right() == CompareICState::SMI) { | 
| 3060     __ JumpIfNotSmi(r3, &miss); | 3029     __ JumpIfNotSmi(r2, &miss); | 
| 3061   } | 3030   } | 
| 3062 | 3031 | 
| 3063   // Inlining the double comparison and falling back to the general compare | 3032   // Inlining the double comparison and falling back to the general compare | 
| 3064   // stub if NaN is involved. | 3033   // stub if NaN is involved. | 
| 3065   // Load left and right operand. | 3034   // Load left and right operand. | 
| 3066   Label done, left, left_smi, right_smi; | 3035   Label done, left, left_smi, right_smi; | 
| 3067   __ JumpIfSmi(r3, &right_smi); | 3036   __ JumpIfSmi(r2, &right_smi); | 
| 3068   __ CheckMap(r3, r5, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, | 3037   __ CheckMap(r2, r4, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, | 
| 3069               DONT_DO_SMI_CHECK); | 3038               DONT_DO_SMI_CHECK); | 
| 3070   __ lfd(d1, FieldMemOperand(r3, HeapNumber::kValueOffset)); | 3039   __ LoadDouble(d1, FieldMemOperand(r2, HeapNumber::kValueOffset)); | 
| 3071   __ b(&left); | 3040   __ b(&left); | 
| 3072   __ bind(&right_smi); | 3041   __ bind(&right_smi); | 
| 3073   __ SmiToDouble(d1, r3); | 3042   __ SmiToDouble(d1, r2); | 
| 3074 | 3043 | 
| 3075   __ bind(&left); | 3044   __ bind(&left); | 
| 3076   __ JumpIfSmi(r4, &left_smi); | 3045   __ JumpIfSmi(r3, &left_smi); | 
| 3077   __ CheckMap(r4, r5, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, | 3046   __ CheckMap(r3, r4, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, | 
| 3078               DONT_DO_SMI_CHECK); | 3047               DONT_DO_SMI_CHECK); | 
| 3079   __ lfd(d0, FieldMemOperand(r4, HeapNumber::kValueOffset)); | 3048   __ LoadDouble(d0, FieldMemOperand(r3, HeapNumber::kValueOffset)); | 
| 3080   __ b(&done); | 3049   __ b(&done); | 
| 3081   __ bind(&left_smi); | 3050   __ bind(&left_smi); | 
| 3082   __ SmiToDouble(d0, r4); | 3051   __ SmiToDouble(d0, r3); | 
| 3083 | 3052 | 
| 3084   __ bind(&done); | 3053   __ bind(&done); | 
| 3085 | 3054 | 
| 3086   // Compare operands | 3055   // Compare operands | 
| 3087   __ fcmpu(d0, d1); | 3056   __ cdbr(d0, d1); | 
| 3088 | 3057 | 
| 3089   // Don't base result on status bits when a NaN is involved. | 3058   // Don't base result on status bits when a NaN is involved. | 
| 3090   __ bunordered(&unordered); | 3059   __ bunordered(&unordered); | 
| 3091 | 3060 | 
| 3092   // Return a result of -1, 0, or 1, based on status bits. | 3061   // Return a result of -1, 0, or 1, based on status bits. | 
| 3093   if (CpuFeatures::IsSupported(ISELECT)) { | 3062   __ beq(&equal); | 
| 3094     DCHECK(EQUAL == 0); | 3063   __ blt(&less_than); | 
| 3095     __ li(r4, Operand(GREATER)); | 3064   //  assume greater than | 
| 3096     __ li(r5, Operand(LESS)); | 3065   __ LoadImmP(r2, Operand(GREATER)); | 
| 3097     __ isel(eq, r3, r0, r4); | 3066   __ Ret(); | 
| 3098     __ isel(lt, r3, r5, r3); | 3067   __ bind(&equal); | 
| 3099     __ Ret(); | 3068   __ LoadImmP(r2, Operand(EQUAL)); | 
| 3100   } else { | 3069   __ Ret(); | 
| 3101     __ beq(&equal); | 3070   __ bind(&less_than); | 
| 3102     __ blt(&less_than); | 3071   __ LoadImmP(r2, Operand(LESS)); | 
| 3103     //  assume greater than | 3072   __ Ret(); | 
| 3104     __ li(r3, Operand(GREATER)); |  | 
| 3105     __ Ret(); |  | 
| 3106     __ bind(&equal); |  | 
| 3107     __ li(r3, Operand(EQUAL)); |  | 
| 3108     __ Ret(); |  | 
| 3109     __ bind(&less_than); |  | 
| 3110     __ li(r3, Operand(LESS)); |  | 
| 3111     __ Ret(); |  | 
| 3112   } |  | 
| 3113 | 3073 | 
| 3114   __ bind(&unordered); | 3074   __ bind(&unordered); | 
| 3115   __ bind(&generic_stub); | 3075   __ bind(&generic_stub); | 
| 3116   CompareICStub stub(isolate(), op(), CompareICState::GENERIC, | 3076   CompareICStub stub(isolate(), op(), CompareICState::GENERIC, | 
| 3117                      CompareICState::GENERIC, CompareICState::GENERIC); | 3077                      CompareICState::GENERIC, CompareICState::GENERIC); | 
| 3118   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | 3078   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | 
| 3119 | 3079 | 
| 3120   __ bind(&maybe_undefined1); | 3080   __ bind(&maybe_undefined1); | 
| 3121   if (Token::IsOrderedRelationalCompareOp(op())) { | 3081   if (Token::IsOrderedRelationalCompareOp(op())) { | 
| 3122     __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); | 3082     __ CompareRoot(r2, Heap::kUndefinedValueRootIndex); | 
| 3123     __ bne(&miss); | 3083     __ bne(&miss); | 
| 3124     __ JumpIfSmi(r4, &unordered); | 3084     __ JumpIfSmi(r3, &unordered); | 
| 3125     __ CompareObjectType(r4, r5, r5, HEAP_NUMBER_TYPE); | 3085     __ CompareObjectType(r3, r4, r4, HEAP_NUMBER_TYPE); | 
| 3126     __ bne(&maybe_undefined2); | 3086     __ bne(&maybe_undefined2); | 
| 3127     __ b(&unordered); | 3087     __ b(&unordered); | 
| 3128   } | 3088   } | 
| 3129 | 3089 | 
| 3130   __ bind(&maybe_undefined2); | 3090   __ bind(&maybe_undefined2); | 
| 3131   if (Token::IsOrderedRelationalCompareOp(op())) { | 3091   if (Token::IsOrderedRelationalCompareOp(op())) { | 
| 3132     __ CompareRoot(r4, Heap::kUndefinedValueRootIndex); | 3092     __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); | 
| 3133     __ beq(&unordered); | 3093     __ beq(&unordered); | 
| 3134   } | 3094   } | 
| 3135 | 3095 | 
| 3136   __ bind(&miss); | 3096   __ bind(&miss); | 
| 3137   GenerateMiss(masm); | 3097   GenerateMiss(masm); | 
| 3138 } | 3098 } | 
| 3139 | 3099 | 
| 3140 |  | 
| 3141 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) { | 3100 void CompareICStub::GenerateInternalizedStrings(MacroAssembler* masm) { | 
| 3142   DCHECK(state() == CompareICState::INTERNALIZED_STRING); | 3101   DCHECK(state() == CompareICState::INTERNALIZED_STRING); | 
| 3143   Label miss, not_equal; | 3102   Label miss, not_equal; | 
| 3144 | 3103 | 
| 3145   // Registers containing left and right operands respectively. | 3104   // Registers containing left and right operands respectively. | 
| 3146   Register left = r4; | 3105   Register left = r3; | 
| 3147   Register right = r3; | 3106   Register right = r2; | 
| 3148   Register tmp1 = r5; | 3107   Register tmp1 = r4; | 
| 3149   Register tmp2 = r6; | 3108   Register tmp2 = r5; | 
| 3150 | 3109 | 
| 3151   // Check that both operands are heap objects. | 3110   // Check that both operands are heap objects. | 
| 3152   __ JumpIfEitherSmi(left, right, &miss); | 3111   __ JumpIfEitherSmi(left, right, &miss); | 
| 3153 | 3112 | 
| 3154   // Check that both operands are symbols. | 3113   // Check that both operands are symbols. | 
| 3155   __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 3114   __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 
| 3156   __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 3115   __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 
| 3157   __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 3116   __ LoadlB(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 
| 3158   __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 3117   __ LoadlB(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 
| 3159   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); | 3118   STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); | 
| 3160   __ orx(tmp1, tmp1, tmp2); | 3119   __ OrP(tmp1, tmp1, tmp2); | 
| 3161   __ andi(r0, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask)); | 3120   __ AndP(r0, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask)); | 
| 3162   __ bne(&miss, cr0); | 3121   __ bne(&miss); | 
| 3163 | 3122 | 
| 3164   // Internalized strings are compared by identity. | 3123   // Internalized strings are compared by identity. | 
| 3165   __ cmp(left, right); | 3124   __ CmpP(left, right); | 
| 3166   __ bne(¬_equal); | 3125   __ bne(¬_equal); | 
| 3167   // Make sure r3 is non-zero. At this point input operands are | 3126   // Make sure r2 is non-zero. At this point input operands are | 
| 3168   // guaranteed to be non-zero. | 3127   // guaranteed to be non-zero. | 
| 3169   DCHECK(right.is(r3)); | 3128   DCHECK(right.is(r2)); | 
| 3170   STATIC_ASSERT(EQUAL == 0); | 3129   STATIC_ASSERT(EQUAL == 0); | 
| 3171   STATIC_ASSERT(kSmiTag == 0); | 3130   STATIC_ASSERT(kSmiTag == 0); | 
| 3172   __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL)); | 3131   __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL)); | 
| 3173   __ bind(¬_equal); | 3132   __ bind(¬_equal); | 
| 3174   __ Ret(); | 3133   __ Ret(); | 
| 3175 | 3134 | 
| 3176   __ bind(&miss); | 3135   __ bind(&miss); | 
| 3177   GenerateMiss(masm); | 3136   GenerateMiss(masm); | 
| 3178 } | 3137 } | 
| 3179 | 3138 | 
| 3180 |  | 
| 3181 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) { | 3139 void CompareICStub::GenerateUniqueNames(MacroAssembler* masm) { | 
| 3182   DCHECK(state() == CompareICState::UNIQUE_NAME); | 3140   DCHECK(state() == CompareICState::UNIQUE_NAME); | 
| 3183   DCHECK(GetCondition() == eq); | 3141   DCHECK(GetCondition() == eq); | 
| 3184   Label miss; | 3142   Label miss; | 
| 3185 | 3143 | 
| 3186   // Registers containing left and right operands respectively. | 3144   // Registers containing left and right operands respectively. | 
| 3187   Register left = r4; | 3145   Register left = r3; | 
| 3188   Register right = r3; | 3146   Register right = r2; | 
| 3189   Register tmp1 = r5; | 3147   Register tmp1 = r4; | 
| 3190   Register tmp2 = r6; | 3148   Register tmp2 = r5; | 
| 3191 | 3149 | 
| 3192   // Check that both operands are heap objects. | 3150   // Check that both operands are heap objects. | 
| 3193   __ JumpIfEitherSmi(left, right, &miss); | 3151   __ JumpIfEitherSmi(left, right, &miss); | 
| 3194 | 3152 | 
| 3195   // Check that both operands are unique names. This leaves the instance | 3153   // Check that both operands are unique names. This leaves the instance | 
| 3196   // types loaded in tmp1 and tmp2. | 3154   // types loaded in tmp1 and tmp2. | 
| 3197   __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 3155   __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 
| 3198   __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 3156   __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 
| 3199   __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 3157   __ LoadlB(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 
| 3200   __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 3158   __ LoadlB(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 
| 3201 | 3159 | 
| 3202   __ JumpIfNotUniqueNameInstanceType(tmp1, &miss); | 3160   __ JumpIfNotUniqueNameInstanceType(tmp1, &miss); | 
| 3203   __ JumpIfNotUniqueNameInstanceType(tmp2, &miss); | 3161   __ JumpIfNotUniqueNameInstanceType(tmp2, &miss); | 
| 3204 | 3162 | 
| 3205   // Unique names are compared by identity. | 3163   // Unique names are compared by identity. | 
| 3206   __ cmp(left, right); | 3164   __ CmpP(left, right); | 
| 3207   __ bne(&miss); | 3165   __ bne(&miss); | 
| 3208   // Make sure r3 is non-zero. At this point input operands are | 3166   // Make sure r2 is non-zero. At this point input operands are | 
| 3209   // guaranteed to be non-zero. | 3167   // guaranteed to be non-zero. | 
| 3210   DCHECK(right.is(r3)); | 3168   DCHECK(right.is(r2)); | 
| 3211   STATIC_ASSERT(EQUAL == 0); | 3169   STATIC_ASSERT(EQUAL == 0); | 
| 3212   STATIC_ASSERT(kSmiTag == 0); | 3170   STATIC_ASSERT(kSmiTag == 0); | 
| 3213   __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL)); | 3171   __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL)); | 
| 3214   __ Ret(); | 3172   __ Ret(); | 
| 3215 | 3173 | 
| 3216   __ bind(&miss); | 3174   __ bind(&miss); | 
| 3217   GenerateMiss(masm); | 3175   GenerateMiss(masm); | 
| 3218 } | 3176 } | 
| 3219 | 3177 | 
| 3220 |  | 
| 3221 void CompareICStub::GenerateStrings(MacroAssembler* masm) { | 3178 void CompareICStub::GenerateStrings(MacroAssembler* masm) { | 
| 3222   DCHECK(state() == CompareICState::STRING); | 3179   DCHECK(state() == CompareICState::STRING); | 
| 3223   Label miss, not_identical, is_symbol; | 3180   Label miss, not_identical, is_symbol; | 
| 3224 | 3181 | 
| 3225   bool equality = Token::IsEqualityOp(op()); | 3182   bool equality = Token::IsEqualityOp(op()); | 
| 3226 | 3183 | 
| 3227   // Registers containing left and right operands respectively. | 3184   // Registers containing left and right operands respectively. | 
| 3228   Register left = r4; | 3185   Register left = r3; | 
| 3229   Register right = r3; | 3186   Register right = r2; | 
| 3230   Register tmp1 = r5; | 3187   Register tmp1 = r4; | 
| 3231   Register tmp2 = r6; | 3188   Register tmp2 = r5; | 
| 3232   Register tmp3 = r7; | 3189   Register tmp3 = r6; | 
| 3233   Register tmp4 = r8; | 3190   Register tmp4 = r7; | 
| 3234 | 3191 | 
| 3235   // Check that both operands are heap objects. | 3192   // Check that both operands are heap objects. | 
| 3236   __ JumpIfEitherSmi(left, right, &miss); | 3193   __ JumpIfEitherSmi(left, right, &miss); | 
| 3237 | 3194 | 
| 3238   // Check that both operands are strings. This leaves the instance | 3195   // Check that both operands are strings. This leaves the instance | 
| 3239   // types loaded in tmp1 and tmp2. | 3196   // types loaded in tmp1 and tmp2. | 
| 3240   __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 3197   __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 
| 3241   __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 3198   __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 
| 3242   __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 3199   __ LoadlB(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 
| 3243   __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 3200   __ LoadlB(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 
| 3244   STATIC_ASSERT(kNotStringTag != 0); | 3201   STATIC_ASSERT(kNotStringTag != 0); | 
| 3245   __ orx(tmp3, tmp1, tmp2); | 3202   __ OrP(tmp3, tmp1, tmp2); | 
| 3246   __ andi(r0, tmp3, Operand(kIsNotStringMask)); | 3203   __ AndP(r0, tmp3, Operand(kIsNotStringMask)); | 
| 3247   __ bne(&miss, cr0); | 3204   __ bne(&miss); | 
| 3248 | 3205 | 
| 3249   // Fast check for identical strings. | 3206   // Fast check for identical strings. | 
| 3250   __ cmp(left, right); | 3207   __ CmpP(left, right); | 
| 3251   STATIC_ASSERT(EQUAL == 0); | 3208   STATIC_ASSERT(EQUAL == 0); | 
| 3252   STATIC_ASSERT(kSmiTag == 0); | 3209   STATIC_ASSERT(kSmiTag == 0); | 
| 3253   __ bne(¬_identical); | 3210   __ bne(¬_identical); | 
| 3254   __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL)); | 3211   __ LoadSmiLiteral(r2, Smi::FromInt(EQUAL)); | 
| 3255   __ Ret(); | 3212   __ Ret(); | 
| 3256   __ bind(¬_identical); | 3213   __ bind(¬_identical); | 
| 3257 | 3214 | 
| 3258   // Handle not identical strings. | 3215   // Handle not identical strings. | 
| 3259 | 3216 | 
| 3260   // Check that both strings are internalized strings. If they are, we're done | 3217   // Check that both strings are internalized strings. If they are, we're done | 
| 3261   // because we already know they are not identical. We know they are both | 3218   // because we already know they are not identical. We know they are both | 
| 3262   // strings. | 3219   // strings. | 
| 3263   if (equality) { | 3220   if (equality) { | 
| 3264     DCHECK(GetCondition() == eq); | 3221     DCHECK(GetCondition() == eq); | 
| 3265     STATIC_ASSERT(kInternalizedTag == 0); | 3222     STATIC_ASSERT(kInternalizedTag == 0); | 
| 3266     __ orx(tmp3, tmp1, tmp2); | 3223     __ OrP(tmp3, tmp1, tmp2); | 
| 3267     __ andi(r0, tmp3, Operand(kIsNotInternalizedMask)); | 3224     __ AndP(r0, tmp3, Operand(kIsNotInternalizedMask)); | 
| 3268     // Make sure r3 is non-zero. At this point input operands are | 3225     __ bne(&is_symbol); | 
|  | 3226     // Make sure r2 is non-zero. At this point input operands are | 
| 3269     // guaranteed to be non-zero. | 3227     // guaranteed to be non-zero. | 
| 3270     DCHECK(right.is(r3)); | 3228     DCHECK(right.is(r2)); | 
| 3271     __ Ret(eq, cr0); | 3229     __ Ret(); | 
|  | 3230     __ bind(&is_symbol); | 
| 3272   } | 3231   } | 
| 3273 | 3232 | 
| 3274   // Check that both strings are sequential one-byte. | 3233   // Check that both strings are sequential one-byte. | 
| 3275   Label runtime; | 3234   Label runtime; | 
| 3276   __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4, | 3235   __ JumpIfBothInstanceTypesAreNotSequentialOneByte(tmp1, tmp2, tmp3, tmp4, | 
| 3277                                                     &runtime); | 3236                                                     &runtime); | 
| 3278 | 3237 | 
| 3279   // Compare flat one-byte strings. Returns when done. | 3238   // Compare flat one-byte strings. Returns when done. | 
| 3280   if (equality) { | 3239   if (equality) { | 
| 3281     StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, | 3240     StringHelper::GenerateFlatOneByteStringEquals(masm, left, right, tmp1, | 
| 3282                                                   tmp2); | 3241                                                   tmp2); | 
| 3283   } else { | 3242   } else { | 
| 3284     StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1, | 3243     StringHelper::GenerateCompareFlatOneByteStrings(masm, left, right, tmp1, | 
| 3285                                                     tmp2, tmp3); | 3244                                                     tmp2, tmp3); | 
| 3286   } | 3245   } | 
| 3287 | 3246 | 
| 3288   // Handle more complex cases in runtime. | 3247   // Handle more complex cases in runtime. | 
| 3289   __ bind(&runtime); | 3248   __ bind(&runtime); | 
| 3290   __ Push(left, right); | 3249   __ Push(left, right); | 
| 3291   if (equality) { | 3250   if (equality) { | 
| 3292     __ TailCallRuntime(Runtime::kStringEquals); | 3251     __ TailCallRuntime(Runtime::kStringEquals); | 
| 3293   } else { | 3252   } else { | 
| 3294     __ TailCallRuntime(Runtime::kStringCompare); | 3253     __ TailCallRuntime(Runtime::kStringCompare); | 
| 3295   } | 3254   } | 
| 3296 | 3255 | 
| 3297   __ bind(&miss); | 3256   __ bind(&miss); | 
| 3298   GenerateMiss(masm); | 3257   GenerateMiss(masm); | 
| 3299 } | 3258 } | 
| 3300 | 3259 | 
| 3301 |  | 
| 3302 void CompareICStub::GenerateReceivers(MacroAssembler* masm) { | 3260 void CompareICStub::GenerateReceivers(MacroAssembler* masm) { | 
| 3303   DCHECK_EQ(CompareICState::RECEIVER, state()); | 3261   DCHECK_EQ(CompareICState::RECEIVER, state()); | 
| 3304   Label miss; | 3262   Label miss; | 
| 3305   __ and_(r5, r4, r3); | 3263   __ AndP(r4, r3, r2); | 
| 3306   __ JumpIfSmi(r5, &miss); | 3264   __ JumpIfSmi(r4, &miss); | 
| 3307 | 3265 | 
| 3308   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); | 3266   STATIC_ASSERT(LAST_TYPE == LAST_JS_RECEIVER_TYPE); | 
| 3309   __ CompareObjectType(r3, r5, r5, FIRST_JS_RECEIVER_TYPE); | 3267   __ CompareObjectType(r2, r4, r4, FIRST_JS_RECEIVER_TYPE); | 
| 3310   __ blt(&miss); | 3268   __ blt(&miss); | 
| 3311   __ CompareObjectType(r4, r5, r5, FIRST_JS_RECEIVER_TYPE); | 3269   __ CompareObjectType(r3, r4, r4, FIRST_JS_RECEIVER_TYPE); | 
| 3312   __ blt(&miss); | 3270   __ blt(&miss); | 
| 3313 | 3271 | 
| 3314   DCHECK(GetCondition() == eq); | 3272   DCHECK(GetCondition() == eq); | 
| 3315   __ sub(r3, r3, r4); | 3273   __ SubP(r2, r2, r3); | 
| 3316   __ Ret(); | 3274   __ Ret(); | 
| 3317 | 3275 | 
| 3318   __ bind(&miss); | 3276   __ bind(&miss); | 
| 3319   GenerateMiss(masm); | 3277   GenerateMiss(masm); | 
| 3320 } | 3278 } | 
| 3321 | 3279 | 
| 3322 |  | 
| 3323 void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) { | 3280 void CompareICStub::GenerateKnownReceivers(MacroAssembler* masm) { | 
| 3324   Label miss; | 3281   Label miss; | 
| 3325   Handle<WeakCell> cell = Map::WeakCellForMap(known_map_); | 3282   Handle<WeakCell> cell = Map::WeakCellForMap(known_map_); | 
| 3326   __ and_(r5, r4, r3); | 3283   __ AndP(r4, r3, r2); | 
| 3327   __ JumpIfSmi(r5, &miss); | 3284   __ JumpIfSmi(r4, &miss); | 
| 3328   __ GetWeakValue(r7, cell); | 3285   __ GetWeakValue(r6, cell); | 
|  | 3286   __ LoadP(r4, FieldMemOperand(r2, HeapObject::kMapOffset)); | 
| 3329   __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset)); | 3287   __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset)); | 
| 3330   __ LoadP(r6, FieldMemOperand(r4, HeapObject::kMapOffset)); | 3288   __ CmpP(r4, r6); | 
| 3331   __ cmp(r5, r7); |  | 
| 3332   __ bne(&miss); | 3289   __ bne(&miss); | 
| 3333   __ cmp(r6, r7); | 3290   __ CmpP(r5, r6); | 
| 3334   __ bne(&miss); | 3291   __ bne(&miss); | 
| 3335 | 3292 | 
| 3336   if (Token::IsEqualityOp(op())) { | 3293   if (Token::IsEqualityOp(op())) { | 
| 3337     __ sub(r3, r3, r4); | 3294     __ SubP(r2, r2, r3); | 
| 3338     __ Ret(); | 3295     __ Ret(); | 
| 3339   } else { | 3296   } else { | 
| 3340     if (op() == Token::LT || op() == Token::LTE) { | 3297     if (op() == Token::LT || op() == Token::LTE) { | 
| 3341       __ LoadSmiLiteral(r5, Smi::FromInt(GREATER)); | 3298       __ LoadSmiLiteral(r4, Smi::FromInt(GREATER)); | 
| 3342     } else { | 3299     } else { | 
| 3343       __ LoadSmiLiteral(r5, Smi::FromInt(LESS)); | 3300       __ LoadSmiLiteral(r4, Smi::FromInt(LESS)); | 
| 3344     } | 3301     } | 
| 3345     __ Push(r4, r3, r5); | 3302     __ Push(r3, r2, r4); | 
| 3346     __ TailCallRuntime(Runtime::kCompare); | 3303     __ TailCallRuntime(Runtime::kCompare); | 
| 3347   } | 3304   } | 
| 3348 | 3305 | 
| 3349   __ bind(&miss); | 3306   __ bind(&miss); | 
| 3350   GenerateMiss(masm); | 3307   GenerateMiss(masm); | 
| 3351 } | 3308 } | 
| 3352 | 3309 | 
| 3353 |  | 
| 3354 void CompareICStub::GenerateMiss(MacroAssembler* masm) { | 3310 void CompareICStub::GenerateMiss(MacroAssembler* masm) { | 
| 3355   { | 3311   { | 
| 3356     // Call the runtime system in a fresh internal frame. | 3312     // Call the runtime system in a fresh internal frame. | 
| 3357     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 3313     FrameScope scope(masm, StackFrame::INTERNAL); | 
| 3358     __ Push(r4, r3); | 3314     __ Push(r3, r2); | 
| 3359     __ Push(r4, r3); | 3315     __ Push(r3, r2); | 
| 3360     __ LoadSmiLiteral(r0, Smi::FromInt(op())); | 3316     __ LoadSmiLiteral(r0, Smi::FromInt(op())); | 
| 3361     __ push(r0); | 3317     __ push(r0); | 
| 3362     __ CallRuntime(Runtime::kCompareIC_Miss); | 3318     __ CallRuntime(Runtime::kCompareIC_Miss); | 
| 3363     // Compute the entry point of the rewritten stub. | 3319     // Compute the entry point of the rewritten stub. | 
| 3364     __ addi(r5, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); | 3320     __ AddP(r4, r2, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
| 3365     // Restore registers. | 3321     // Restore registers. | 
| 3366     __ Pop(r4, r3); | 3322     __ Pop(r3, r2); | 
| 3367   } | 3323   } | 
| 3368 | 3324 | 
| 3369   __ JumpToJSEntry(r5); | 3325   __ JumpToJSEntry(r4); | 
| 3370 } | 3326 } | 
| 3371 | 3327 | 
| 3372 |  | 
| 3373 // This stub is paired with DirectCEntryStub::GenerateCall | 3328 // This stub is paired with DirectCEntryStub::GenerateCall | 
| 3374 void DirectCEntryStub::Generate(MacroAssembler* masm) { | 3329 void DirectCEntryStub::Generate(MacroAssembler* masm) { | 
| 3375   // Place the return address on the stack, making the call | 3330   __ CleanseP(r14); | 
| 3376   // GC safe. The RegExp backend also relies on this. | 3331 | 
| 3377   __ mflr(r0); | 3332   // Statement positions are expected to be recorded when the target | 
| 3378   __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize)); | 3333   // address is loaded. | 
| 3379   __ Call(ip);  // Call the C++ function. | 3334   __ positions_recorder()->WriteRecordedPositions(); | 
| 3380   __ LoadP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize)); | 3335 | 
| 3381   __ mtlr(r0); | 3336   __ b(ip);  // Callee will return to R14 directly | 
| 3382   __ blr(); |  | 
| 3383 } | 3337 } | 
| 3384 | 3338 | 
|  | 3339 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) { | 
|  | 3340 #if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR) | 
|  | 3341   // Native AIX/S390X Linux use a function descriptor. | 
|  | 3342   __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize)); | 
|  | 3343   __ LoadP(target, MemOperand(target, 0));  // Instruction address | 
|  | 3344 #else | 
|  | 3345   // ip needs to be set for DirectCEentryStub::Generate, and also | 
|  | 3346   // for ABI_CALL_VIA_IP. | 
|  | 3347   __ Move(ip, target); | 
|  | 3348 #endif | 
| 3385 | 3349 | 
| 3386 void DirectCEntryStub::GenerateCall(MacroAssembler* masm, Register target) { | 3350   __ call(GetCode(), RelocInfo::CODE_TARGET);  // Call the stub. | 
| 3387   if (ABI_USES_FUNCTION_DESCRIPTORS) { |  | 
| 3388     // AIX/PPC64BE Linux use a function descriptor. |  | 
| 3389     __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize)); |  | 
| 3390     __ LoadP(ip, MemOperand(target, 0));  // Instruction address |  | 
| 3391   } else { |  | 
| 3392     // ip needs to be set for DirectCEentryStub::Generate, and also |  | 
| 3393     // for ABI_CALL_VIA_IP. |  | 
| 3394     __ Move(ip, target); |  | 
| 3395   } |  | 
| 3396 |  | 
| 3397   intptr_t code = reinterpret_cast<intptr_t>(GetCode().location()); |  | 
| 3398   __ mov(r0, Operand(code, RelocInfo::CODE_TARGET)); |  | 
| 3399   __ Call(r0);  // Call the stub. |  | 
| 3400 } | 3351 } | 
| 3401 | 3352 | 
| 3402 |  | 
| 3403 void NameDictionaryLookupStub::GenerateNegativeLookup( | 3353 void NameDictionaryLookupStub::GenerateNegativeLookup( | 
| 3404     MacroAssembler* masm, Label* miss, Label* done, Register receiver, | 3354     MacroAssembler* masm, Label* miss, Label* done, Register receiver, | 
| 3405     Register properties, Handle<Name> name, Register scratch0) { | 3355     Register properties, Handle<Name> name, Register scratch0) { | 
| 3406   DCHECK(name->IsUniqueName()); | 3356   DCHECK(name->IsUniqueName()); | 
| 3407   // If names of slots in range from 1 to kProbes - 1 for the hash value are | 3357   // If names of slots in range from 1 to kProbes - 1 for the hash value are | 
| 3408   // not equal to the name and kProbes-th slot is not used (its name is the | 3358   // not equal to the name and kProbes-th slot is not used (its name is the | 
| 3409   // undefined value), it guarantees the hash table doesn't contain the | 3359   // undefined value), it guarantees the hash table doesn't contain the | 
| 3410   // property. It's true even if some slots represent deleted properties | 3360   // property. It's true even if some slots represent deleted properties | 
| 3411   // (their names are the hole value). | 3361   // (their names are the hole value). | 
| 3412   for (int i = 0; i < kInlinedProbes; i++) { | 3362   for (int i = 0; i < kInlinedProbes; i++) { | 
| 3413     // scratch0 points to properties hash. | 3363     // scratch0 points to properties hash. | 
| 3414     // Compute the masked index: (hash + i + i * i) & mask. | 3364     // Compute the masked index: (hash + i + i * i) & mask. | 
| 3415     Register index = scratch0; | 3365     Register index = scratch0; | 
| 3416     // Capacity is smi 2^n. | 3366     // Capacity is smi 2^n. | 
| 3417     __ LoadP(index, FieldMemOperand(properties, kCapacityOffset)); | 3367     __ LoadP(index, FieldMemOperand(properties, kCapacityOffset)); | 
| 3418     __ subi(index, index, Operand(1)); | 3368     __ SubP(index, Operand(1)); | 
| 3419     __ LoadSmiLiteral( | 3369     __ LoadSmiLiteral( | 
| 3420         ip, Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))); | 3370         ip, Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i))); | 
| 3421     __ and_(index, index, ip); | 3371     __ AndP(index, ip); | 
| 3422 | 3372 | 
| 3423     // Scale the index by multiplying by the entry size. | 3373     // Scale the index by multiplying by the entry size. | 
| 3424     STATIC_ASSERT(NameDictionary::kEntrySize == 3); | 3374     STATIC_ASSERT(NameDictionary::kEntrySize == 3); | 
| 3425     __ ShiftLeftImm(ip, index, Operand(1)); | 3375     __ ShiftLeftP(ip, index, Operand(1)); | 
| 3426     __ add(index, index, ip);  // index *= 3. | 3376     __ AddP(index, ip);  // index *= 3. | 
| 3427 | 3377 | 
| 3428     Register entity_name = scratch0; | 3378     Register entity_name = scratch0; | 
| 3429     // Having undefined at this place means the name is not contained. | 3379     // Having undefined at this place means the name is not contained. | 
| 3430     Register tmp = properties; | 3380     Register tmp = properties; | 
| 3431     __ SmiToPtrArrayOffset(ip, index); | 3381     __ SmiToPtrArrayOffset(ip, index); | 
| 3432     __ add(tmp, properties, ip); | 3382     __ AddP(tmp, properties, ip); | 
| 3433     __ LoadP(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); | 3383     __ LoadP(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); | 
| 3434 | 3384 | 
| 3435     DCHECK(!tmp.is(entity_name)); | 3385     DCHECK(!tmp.is(entity_name)); | 
| 3436     __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); | 3386     __ CompareRoot(entity_name, Heap::kUndefinedValueRootIndex); | 
| 3437     __ cmp(entity_name, tmp); |  | 
| 3438     __ beq(done); | 3387     __ beq(done); | 
| 3439 | 3388 | 
| 3440     // Load the hole ready for use below: |  | 
| 3441     __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); |  | 
| 3442 |  | 
| 3443     // Stop if found the property. | 3389     // Stop if found the property. | 
| 3444     __ Cmpi(entity_name, Operand(Handle<Name>(name)), r0); | 3390     __ CmpP(entity_name, Operand(Handle<Name>(name))); | 
| 3445     __ beq(miss); | 3391     __ beq(miss); | 
| 3446 | 3392 | 
| 3447     Label good; | 3393     Label good; | 
| 3448     __ cmp(entity_name, tmp); | 3394     __ CompareRoot(entity_name, Heap::kTheHoleValueRootIndex); | 
| 3449     __ beq(&good); | 3395     __ beq(&good); | 
| 3450 | 3396 | 
| 3451     // Check if the entry name is not a unique name. | 3397     // Check if the entry name is not a unique name. | 
| 3452     __ LoadP(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); | 3398     __ LoadP(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); | 
| 3453     __ lbz(entity_name, FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); | 3399     __ LoadlB(entity_name, | 
|  | 3400               FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); | 
| 3454     __ JumpIfNotUniqueNameInstanceType(entity_name, miss); | 3401     __ JumpIfNotUniqueNameInstanceType(entity_name, miss); | 
| 3455     __ bind(&good); | 3402     __ bind(&good); | 
| 3456 | 3403 | 
| 3457     // Restore the properties. | 3404     // Restore the properties. | 
| 3458     __ LoadP(properties, | 3405     __ LoadP(properties, | 
| 3459              FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 3406              FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 
| 3460   } | 3407   } | 
| 3461 | 3408 | 
| 3462   const int spill_mask = (r0.bit() | r9.bit() | r8.bit() | r7.bit() | r6.bit() | | 3409   const int spill_mask = (r0.bit() | r8.bit() | r7.bit() | r6.bit() | r5.bit() | | 
| 3463                           r5.bit() | r4.bit() | r3.bit()); | 3410                           r4.bit() | r3.bit() | r2.bit()); | 
| 3464 | 3411 | 
| 3465   __ mflr(r0); | 3412   __ LoadRR(r0, r14); | 
| 3466   __ MultiPush(spill_mask); | 3413   __ MultiPush(spill_mask); | 
| 3467 | 3414 | 
| 3468   __ LoadP(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 3415   __ LoadP(r2, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 
| 3469   __ mov(r4, Operand(Handle<Name>(name))); | 3416   __ mov(r3, Operand(Handle<Name>(name))); | 
| 3470   NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP); | 3417   NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP); | 
| 3471   __ CallStub(&stub); | 3418   __ CallStub(&stub); | 
| 3472   __ cmpi(r3, Operand::Zero()); | 3419   __ CmpP(r2, Operand::Zero()); | 
| 3473 | 3420 | 
| 3474   __ MultiPop(spill_mask);  // MultiPop does not touch condition flags | 3421   __ MultiPop(spill_mask);  // MultiPop does not touch condition flags | 
| 3475   __ mtlr(r0); | 3422   __ LoadRR(r14, r0); | 
| 3476 | 3423 | 
| 3477   __ beq(done); | 3424   __ beq(done); | 
| 3478   __ bne(miss); | 3425   __ bne(miss); | 
| 3479 } | 3426 } | 
| 3480 | 3427 | 
| 3481 |  | 
| 3482 // Probe the name dictionary in the |elements| register. Jump to the | 3428 // Probe the name dictionary in the |elements| register. Jump to the | 
| 3483 // |done| label if a property with the given name is found. Jump to | 3429 // |done| label if a property with the given name is found. Jump to | 
| 3484 // the |miss| label otherwise. | 3430 // the |miss| label otherwise. | 
| 3485 // If lookup was successful |scratch2| will be equal to elements + 4 * index. | 3431 // If lookup was successful |scratch2| will be equal to elements + 4 * index. | 
| 3486 void NameDictionaryLookupStub::GeneratePositiveLookup( | 3432 void NameDictionaryLookupStub::GeneratePositiveLookup( | 
| 3487     MacroAssembler* masm, Label* miss, Label* done, Register elements, | 3433     MacroAssembler* masm, Label* miss, Label* done, Register elements, | 
| 3488     Register name, Register scratch1, Register scratch2) { | 3434     Register name, Register scratch1, Register scratch2) { | 
| 3489   DCHECK(!elements.is(scratch1)); | 3435   DCHECK(!elements.is(scratch1)); | 
| 3490   DCHECK(!elements.is(scratch2)); | 3436   DCHECK(!elements.is(scratch2)); | 
| 3491   DCHECK(!name.is(scratch1)); | 3437   DCHECK(!name.is(scratch1)); | 
| 3492   DCHECK(!name.is(scratch2)); | 3438   DCHECK(!name.is(scratch2)); | 
| 3493 | 3439 | 
| 3494   __ AssertName(name); | 3440   __ AssertName(name); | 
| 3495 | 3441 | 
| 3496   // Compute the capacity mask. | 3442   // Compute the capacity mask. | 
| 3497   __ LoadP(scratch1, FieldMemOperand(elements, kCapacityOffset)); | 3443   __ LoadP(scratch1, FieldMemOperand(elements, kCapacityOffset)); | 
| 3498   __ SmiUntag(scratch1);  // convert smi to int | 3444   __ SmiUntag(scratch1);  // convert smi to int | 
| 3499   __ subi(scratch1, scratch1, Operand(1)); | 3445   __ SubP(scratch1, Operand(1)); | 
| 3500 | 3446 | 
| 3501   // Generate an unrolled loop that performs a few probes before | 3447   // Generate an unrolled loop that performs a few probes before | 
| 3502   // giving up. Measurements done on Gmail indicate that 2 probes | 3448   // giving up. Measurements done on Gmail indicate that 2 probes | 
| 3503   // cover ~93% of loads from dictionaries. | 3449   // cover ~93% of loads from dictionaries. | 
| 3504   for (int i = 0; i < kInlinedProbes; i++) { | 3450   for (int i = 0; i < kInlinedProbes; i++) { | 
| 3505     // Compute the masked index: (hash + i + i * i) & mask. | 3451     // Compute the masked index: (hash + i + i * i) & mask. | 
| 3506     __ lwz(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); | 3452     __ LoadlW(scratch2, FieldMemOperand(name, String::kHashFieldOffset)); | 
| 3507     if (i > 0) { | 3453     if (i > 0) { | 
| 3508       // Add the probe offset (i + i * i) left shifted to avoid right shifting | 3454       // Add the probe offset (i + i * i) left shifted to avoid right shifting | 
| 3509       // the hash in a separate instruction. The value hash + i + i * i is right | 3455       // the hash in a separate instruction. The value hash + i + i * i is right | 
| 3510       // shifted in the following and instruction. | 3456       // shifted in the following and instruction. | 
| 3511       DCHECK(NameDictionary::GetProbeOffset(i) < | 3457       DCHECK(NameDictionary::GetProbeOffset(i) < | 
| 3512              1 << (32 - Name::kHashFieldOffset)); | 3458              1 << (32 - Name::kHashFieldOffset)); | 
| 3513       __ addi(scratch2, scratch2, | 3459       __ AddP(scratch2, | 
| 3514               Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift)); | 3460               Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift)); | 
| 3515     } | 3461     } | 
| 3516     __ srwi(scratch2, scratch2, Operand(Name::kHashShift)); | 3462     __ srl(scratch2, Operand(String::kHashShift)); | 
| 3517     __ and_(scratch2, scratch1, scratch2); | 3463     __ AndP(scratch2, scratch1); | 
| 3518 | 3464 | 
| 3519     // Scale the index by multiplying by the entry size. | 3465     // Scale the index by multiplying by the entry size. | 
| 3520     STATIC_ASSERT(NameDictionary::kEntrySize == 3); | 3466     STATIC_ASSERT(NameDictionary::kEntrySize == 3); | 
| 3521     // scratch2 = scratch2 * 3. | 3467     // scratch2 = scratch2 * 3. | 
| 3522     __ ShiftLeftImm(ip, scratch2, Operand(1)); | 3468     __ ShiftLeftP(ip, scratch2, Operand(1)); | 
| 3523     __ add(scratch2, scratch2, ip); | 3469     __ AddP(scratch2, ip); | 
| 3524 | 3470 | 
| 3525     // Check if the key is identical to the name. | 3471     // Check if the key is identical to the name. | 
| 3526     __ ShiftLeftImm(ip, scratch2, Operand(kPointerSizeLog2)); | 3472     __ ShiftLeftP(ip, scratch2, Operand(kPointerSizeLog2)); | 
| 3527     __ add(scratch2, elements, ip); | 3473     __ AddP(scratch2, elements, ip); | 
| 3528     __ LoadP(ip, FieldMemOperand(scratch2, kElementsStartOffset)); | 3474     __ LoadP(ip, FieldMemOperand(scratch2, kElementsStartOffset)); | 
| 3529     __ cmp(name, ip); | 3475     __ CmpP(name, ip); | 
| 3530     __ beq(done); | 3476     __ beq(done); | 
| 3531   } | 3477   } | 
| 3532 | 3478 | 
| 3533   const int spill_mask = (r0.bit() | r9.bit() | r8.bit() | r7.bit() | r6.bit() | | 3479   const int spill_mask = (r0.bit() | r8.bit() | r7.bit() | r6.bit() | r5.bit() | | 
| 3534                           r5.bit() | r4.bit() | r3.bit()) & | 3480                           r4.bit() | r3.bit() | r2.bit()) & | 
| 3535                          ~(scratch1.bit() | scratch2.bit()); | 3481                          ~(scratch1.bit() | scratch2.bit()); | 
| 3536 | 3482 | 
| 3537   __ mflr(r0); | 3483   __ LoadRR(r0, r14); | 
| 3538   __ MultiPush(spill_mask); | 3484   __ MultiPush(spill_mask); | 
| 3539   if (name.is(r3)) { | 3485   if (name.is(r2)) { | 
| 3540     DCHECK(!elements.is(r4)); | 3486     DCHECK(!elements.is(r3)); | 
| 3541     __ mr(r4, name); | 3487     __ LoadRR(r3, name); | 
| 3542     __ mr(r3, elements); | 3488     __ LoadRR(r2, elements); | 
| 3543   } else { | 3489   } else { | 
| 3544     __ mr(r3, elements); | 3490     __ LoadRR(r2, elements); | 
| 3545     __ mr(r4, name); | 3491     __ LoadRR(r3, name); | 
| 3546   } | 3492   } | 
| 3547   NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP); | 3493   NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP); | 
| 3548   __ CallStub(&stub); | 3494   __ CallStub(&stub); | 
| 3549   __ cmpi(r3, Operand::Zero()); | 3495   __ LoadRR(r1, r2); | 
| 3550   __ mr(scratch2, r5); | 3496   __ LoadRR(scratch2, r4); | 
| 3551   __ MultiPop(spill_mask); | 3497   __ MultiPop(spill_mask); | 
| 3552   __ mtlr(r0); | 3498   __ LoadRR(r14, r0); | 
| 3553 | 3499 | 
|  | 3500   __ CmpP(r1, Operand::Zero()); | 
| 3554   __ bne(done); | 3501   __ bne(done); | 
| 3555   __ beq(miss); | 3502   __ beq(miss); | 
| 3556 } | 3503 } | 
| 3557 | 3504 | 
| 3558 |  | 
| 3559 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { | 3505 void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { | 
| 3560   // This stub overrides SometimesSetsUpAFrame() to return false.  That means | 3506   // This stub overrides SometimesSetsUpAFrame() to return false.  That means | 
| 3561   // we cannot call anything that could cause a GC from this stub. | 3507   // we cannot call anything that could cause a GC from this stub. | 
| 3562   // Registers: | 3508   // Registers: | 
| 3563   //  result: NameDictionary to probe | 3509   //  result: NameDictionary to probe | 
| 3564   //  r4: key | 3510   //  r3: key | 
| 3565   //  dictionary: NameDictionary to probe. | 3511   //  dictionary: NameDictionary to probe. | 
| 3566   //  index: will hold an index of entry if lookup is successful. | 3512   //  index: will hold an index of entry if lookup is successful. | 
| 3567   //         might alias with result_. | 3513   //         might alias with result_. | 
| 3568   // Returns: | 3514   // Returns: | 
| 3569   //  result_ is zero if lookup failed, non zero otherwise. | 3515   //  result_ is zero if lookup failed, non zero otherwise. | 
| 3570 | 3516 | 
| 3571   Register result = r3; | 3517   Register result = r2; | 
| 3572   Register dictionary = r3; | 3518   Register dictionary = r2; | 
| 3573   Register key = r4; | 3519   Register key = r3; | 
| 3574   Register index = r5; | 3520   Register index = r4; | 
| 3575   Register mask = r6; | 3521   Register mask = r5; | 
| 3576   Register hash = r7; | 3522   Register hash = r6; | 
| 3577   Register undefined = r8; | 3523   Register undefined = r7; | 
| 3578   Register entry_key = r9; | 3524   Register entry_key = r8; | 
| 3579   Register scratch = r9; | 3525   Register scratch = r8; | 
| 3580 | 3526 | 
| 3581   Label in_dictionary, maybe_in_dictionary, not_in_dictionary; | 3527   Label in_dictionary, maybe_in_dictionary, not_in_dictionary; | 
| 3582 | 3528 | 
| 3583   __ LoadP(mask, FieldMemOperand(dictionary, kCapacityOffset)); | 3529   __ LoadP(mask, FieldMemOperand(dictionary, kCapacityOffset)); | 
| 3584   __ SmiUntag(mask); | 3530   __ SmiUntag(mask); | 
| 3585   __ subi(mask, mask, Operand(1)); | 3531   __ SubP(mask, Operand(1)); | 
| 3586 | 3532 | 
| 3587   __ lwz(hash, FieldMemOperand(key, Name::kHashFieldOffset)); | 3533   __ LoadlW(hash, FieldMemOperand(key, String::kHashFieldOffset)); | 
| 3588 | 3534 | 
| 3589   __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); | 3535   __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); | 
| 3590 | 3536 | 
| 3591   for (int i = kInlinedProbes; i < kTotalProbes; i++) { | 3537   for (int i = kInlinedProbes; i < kTotalProbes; i++) { | 
| 3592     // Compute the masked index: (hash + i + i * i) & mask. | 3538     // Compute the masked index: (hash + i + i * i) & mask. | 
| 3593     // Capacity is smi 2^n. | 3539     // Capacity is smi 2^n. | 
| 3594     if (i > 0) { | 3540     if (i > 0) { | 
| 3595       // Add the probe offset (i + i * i) left shifted to avoid right shifting | 3541       // Add the probe offset (i + i * i) left shifted to avoid right shifting | 
| 3596       // the hash in a separate instruction. The value hash + i + i * i is right | 3542       // the hash in a separate instruction. The value hash + i + i * i is right | 
| 3597       // shifted in the following and instruction. | 3543       // shifted in the following and instruction. | 
| 3598       DCHECK(NameDictionary::GetProbeOffset(i) < | 3544       DCHECK(NameDictionary::GetProbeOffset(i) < | 
| 3599              1 << (32 - Name::kHashFieldOffset)); | 3545              1 << (32 - Name::kHashFieldOffset)); | 
| 3600       __ addi(index, hash, | 3546       __ AddP(index, hash, | 
| 3601               Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift)); | 3547               Operand(NameDictionary::GetProbeOffset(i) << Name::kHashShift)); | 
| 3602     } else { | 3548     } else { | 
| 3603       __ mr(index, hash); | 3549       __ LoadRR(index, hash); | 
| 3604     } | 3550     } | 
| 3605     __ srwi(r0, index, Operand(Name::kHashShift)); | 3551     __ ShiftRight(r0, index, Operand(String::kHashShift)); | 
| 3606     __ and_(index, mask, r0); | 3552     __ AndP(index, r0, mask); | 
| 3607 | 3553 | 
| 3608     // Scale the index by multiplying by the entry size. | 3554     // Scale the index by multiplying by the entry size. | 
| 3609     STATIC_ASSERT(NameDictionary::kEntrySize == 3); | 3555     STATIC_ASSERT(NameDictionary::kEntrySize == 3); | 
| 3610     __ ShiftLeftImm(scratch, index, Operand(1)); | 3556     __ ShiftLeftP(scratch, index, Operand(1)); | 
| 3611     __ add(index, index, scratch);  // index *= 3. | 3557     __ AddP(index, scratch);  // index *= 3. | 
| 3612 | 3558 | 
| 3613     __ ShiftLeftImm(scratch, index, Operand(kPointerSizeLog2)); | 3559     __ ShiftLeftP(scratch, index, Operand(kPointerSizeLog2)); | 
| 3614     __ add(index, dictionary, scratch); | 3560     __ AddP(index, dictionary, scratch); | 
| 3615     __ LoadP(entry_key, FieldMemOperand(index, kElementsStartOffset)); | 3561     __ LoadP(entry_key, FieldMemOperand(index, kElementsStartOffset)); | 
| 3616 | 3562 | 
| 3617     // Having undefined at this place means the name is not contained. | 3563     // Having undefined at this place means the name is not contained. | 
| 3618     __ cmp(entry_key, undefined); | 3564     __ CmpP(entry_key, undefined); | 
| 3619     __ beq(¬_in_dictionary); | 3565     __ beq(¬_in_dictionary); | 
| 3620 | 3566 | 
| 3621     // Stop if found the property. | 3567     // Stop if found the property. | 
| 3622     __ cmp(entry_key, key); | 3568     __ CmpP(entry_key, key); | 
| 3623     __ beq(&in_dictionary); | 3569     __ beq(&in_dictionary); | 
| 3624 | 3570 | 
| 3625     if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) { | 3571     if (i != kTotalProbes - 1 && mode() == NEGATIVE_LOOKUP) { | 
| 3626       // Check if the entry name is not a unique name. | 3572       // Check if the entry name is not a unique name. | 
| 3627       __ LoadP(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); | 3573       __ LoadP(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); | 
| 3628       __ lbz(entry_key, FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); | 3574       __ LoadlB(entry_key, | 
|  | 3575                 FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); | 
| 3629       __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary); | 3576       __ JumpIfNotUniqueNameInstanceType(entry_key, &maybe_in_dictionary); | 
| 3630     } | 3577     } | 
| 3631   } | 3578   } | 
| 3632 | 3579 | 
| 3633   __ bind(&maybe_in_dictionary); | 3580   __ bind(&maybe_in_dictionary); | 
| 3634   // If we are doing negative lookup then probing failure should be | 3581   // If we are doing negative lookup then probing failure should be | 
| 3635   // treated as a lookup success. For positive lookup probing failure | 3582   // treated as a lookup success. For positive lookup probing failure | 
| 3636   // should be treated as lookup failure. | 3583   // should be treated as lookup failure. | 
| 3637   if (mode() == POSITIVE_LOOKUP) { | 3584   if (mode() == POSITIVE_LOOKUP) { | 
| 3638     __ li(result, Operand::Zero()); | 3585     __ LoadImmP(result, Operand::Zero()); | 
| 3639     __ Ret(); | 3586     __ Ret(); | 
| 3640   } | 3587   } | 
| 3641 | 3588 | 
| 3642   __ bind(&in_dictionary); | 3589   __ bind(&in_dictionary); | 
| 3643   __ li(result, Operand(1)); | 3590   __ LoadImmP(result, Operand(1)); | 
| 3644   __ Ret(); | 3591   __ Ret(); | 
| 3645 | 3592 | 
| 3646   __ bind(¬_in_dictionary); | 3593   __ bind(¬_in_dictionary); | 
| 3647   __ li(result, Operand::Zero()); | 3594   __ LoadImmP(result, Operand::Zero()); | 
| 3648   __ Ret(); | 3595   __ Ret(); | 
| 3649 } | 3596 } | 
| 3650 | 3597 | 
| 3651 |  | 
| 3652 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( | 3598 void StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime( | 
| 3653     Isolate* isolate) { | 3599     Isolate* isolate) { | 
| 3654   StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs); | 3600   StoreBufferOverflowStub stub1(isolate, kDontSaveFPRegs); | 
| 3655   stub1.GetCode(); | 3601   stub1.GetCode(); | 
| 3656   // Hydrogen code stubs need stub2 at snapshot time. | 3602   // Hydrogen code stubs need stub2 at snapshot time. | 
| 3657   StoreBufferOverflowStub stub2(isolate, kSaveFPRegs); | 3603   StoreBufferOverflowStub stub2(isolate, kSaveFPRegs); | 
| 3658   stub2.GetCode(); | 3604   stub2.GetCode(); | 
| 3659 } | 3605 } | 
| 3660 | 3606 | 
| 3661 |  | 
| 3662 // Takes the input in 3 registers: address_ value_ and object_.  A pointer to | 3607 // Takes the input in 3 registers: address_ value_ and object_.  A pointer to | 
| 3663 // the value has just been written into the object, now this stub makes sure | 3608 // the value has just been written into the object, now this stub makes sure | 
| 3664 // we keep the GC informed.  The word in the object where the value has been | 3609 // we keep the GC informed.  The word in the object where the value has been | 
| 3665 // written is in the address register. | 3610 // written is in the address register. | 
| 3666 void RecordWriteStub::Generate(MacroAssembler* masm) { | 3611 void RecordWriteStub::Generate(MacroAssembler* masm) { | 
| 3667   Label skip_to_incremental_noncompacting; | 3612   Label skip_to_incremental_noncompacting; | 
| 3668   Label skip_to_incremental_compacting; | 3613   Label skip_to_incremental_compacting; | 
| 3669 | 3614 | 
| 3670   // The first two branch instructions are generated with labels so as to | 3615   // The first two branch instructions are generated with labels so as to | 
| 3671   // get the offset fixed up correctly by the bind(Label*) call.  We patch | 3616   // get the offset fixed up correctly by the bind(Label*) call.  We patch | 
| 3672   // it back and forth between branch condition True and False | 3617   // it back and forth between branch condition True and False | 
| 3673   // when we start and stop incremental heap marking. | 3618   // when we start and stop incremental heap marking. | 
| 3674   // See RecordWriteStub::Patch for details. | 3619   // See RecordWriteStub::Patch for details. | 
| 3675 | 3620 | 
| 3676   // Clear the bit, branch on True for NOP action initially | 3621   // Clear the bit, branch on True for NOP action initially | 
| 3677   __ crclr(Assembler::encode_crbit(cr2, CR_LT)); | 3622   __ b(CC_NOP, &skip_to_incremental_noncompacting); | 
| 3678   __ blt(&skip_to_incremental_noncompacting, cr2); | 3623   __ b(CC_NOP, &skip_to_incremental_compacting); | 
| 3679   __ blt(&skip_to_incremental_compacting, cr2); |  | 
| 3680 | 3624 | 
| 3681   if (remembered_set_action() == EMIT_REMEMBERED_SET) { | 3625   if (remembered_set_action() == EMIT_REMEMBERED_SET) { | 
| 3682     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(), | 3626     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(), | 
| 3683                            MacroAssembler::kReturnAtEnd); | 3627                            MacroAssembler::kReturnAtEnd); | 
| 3684   } | 3628   } | 
| 3685   __ Ret(); | 3629   __ Ret(); | 
| 3686 | 3630 | 
| 3687   __ bind(&skip_to_incremental_noncompacting); | 3631   __ bind(&skip_to_incremental_noncompacting); | 
| 3688   GenerateIncremental(masm, INCREMENTAL); | 3632   GenerateIncremental(masm, INCREMENTAL); | 
| 3689 | 3633 | 
| 3690   __ bind(&skip_to_incremental_compacting); | 3634   __ bind(&skip_to_incremental_compacting); | 
| 3691   GenerateIncremental(masm, INCREMENTAL_COMPACTION); | 3635   GenerateIncremental(masm, INCREMENTAL_COMPACTION); | 
| 3692 | 3636 | 
| 3693   // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. | 3637   // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. | 
| 3694   // Will be checked in IncrementalMarking::ActivateGeneratedStub. | 3638   // Will be checked in IncrementalMarking::ActivateGeneratedStub. | 
| 3695   // patching not required on PPC as the initial path is effectively NOP | 3639   // patching not required on S390 as the initial path is effectively NOP | 
| 3696 } | 3640 } | 
| 3697 | 3641 | 
| 3698 |  | 
| 3699 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { | 3642 void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { | 
| 3700   regs_.Save(masm); | 3643   regs_.Save(masm); | 
| 3701 | 3644 | 
| 3702   if (remembered_set_action() == EMIT_REMEMBERED_SET) { | 3645   if (remembered_set_action() == EMIT_REMEMBERED_SET) { | 
| 3703     Label dont_need_remembered_set; | 3646     Label dont_need_remembered_set; | 
| 3704 | 3647 | 
| 3705     __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0)); | 3648     __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0)); | 
| 3706     __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value. | 3649     __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value. | 
| 3707                            regs_.scratch0(), &dont_need_remembered_set); | 3650                            regs_.scratch0(), &dont_need_remembered_set); | 
| 3708 | 3651 | 
| (...skipping 12 matching lines...) Expand all  Loading... | 
| 3721     __ bind(&dont_need_remembered_set); | 3664     __ bind(&dont_need_remembered_set); | 
| 3722   } | 3665   } | 
| 3723 | 3666 | 
| 3724   CheckNeedsToInformIncrementalMarker( | 3667   CheckNeedsToInformIncrementalMarker( | 
| 3725       masm, kReturnOnNoNeedToInformIncrementalMarker, mode); | 3668       masm, kReturnOnNoNeedToInformIncrementalMarker, mode); | 
| 3726   InformIncrementalMarker(masm); | 3669   InformIncrementalMarker(masm); | 
| 3727   regs_.Restore(masm); | 3670   regs_.Restore(masm); | 
| 3728   __ Ret(); | 3671   __ Ret(); | 
| 3729 } | 3672 } | 
| 3730 | 3673 | 
| 3731 |  | 
| 3732 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { | 3674 void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { | 
| 3733   regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode()); | 3675   regs_.SaveCallerSaveRegisters(masm, save_fp_regs_mode()); | 
| 3734   int argument_count = 3; | 3676   int argument_count = 3; | 
| 3735   __ PrepareCallCFunction(argument_count, regs_.scratch0()); | 3677   __ PrepareCallCFunction(argument_count, regs_.scratch0()); | 
| 3736   Register address = | 3678   Register address = | 
| 3737       r3.is(regs_.address()) ? regs_.scratch0() : regs_.address(); | 3679       r2.is(regs_.address()) ? regs_.scratch0() : regs_.address(); | 
| 3738   DCHECK(!address.is(regs_.object())); | 3680   DCHECK(!address.is(regs_.object())); | 
| 3739   DCHECK(!address.is(r3)); | 3681   DCHECK(!address.is(r2)); | 
| 3740   __ mr(address, regs_.address()); | 3682   __ LoadRR(address, regs_.address()); | 
| 3741   __ mr(r3, regs_.object()); | 3683   __ LoadRR(r2, regs_.object()); | 
| 3742   __ mr(r4, address); | 3684   __ LoadRR(r3, address); | 
| 3743   __ mov(r5, Operand(ExternalReference::isolate_address(isolate()))); | 3685   __ mov(r4, Operand(ExternalReference::isolate_address(isolate()))); | 
| 3744 | 3686 | 
| 3745   AllowExternalCallThatCantCauseGC scope(masm); | 3687   AllowExternalCallThatCantCauseGC scope(masm); | 
| 3746   __ CallCFunction( | 3688   __ CallCFunction( | 
| 3747       ExternalReference::incremental_marking_record_write_function(isolate()), | 3689       ExternalReference::incremental_marking_record_write_function(isolate()), | 
| 3748       argument_count); | 3690       argument_count); | 
| 3749   regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode()); | 3691   regs_.RestoreCallerSaveRegisters(masm, save_fp_regs_mode()); | 
| 3750 } | 3692 } | 
| 3751 | 3693 | 
| 3752 |  | 
| 3753 void RecordWriteStub::CheckNeedsToInformIncrementalMarker( | 3694 void RecordWriteStub::CheckNeedsToInformIncrementalMarker( | 
| 3754     MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need, | 3695     MacroAssembler* masm, OnNoNeedToInformIncrementalMarker on_no_need, | 
| 3755     Mode mode) { | 3696     Mode mode) { | 
| 3756   Label on_black; | 3697   Label on_black; | 
| 3757   Label need_incremental; | 3698   Label need_incremental; | 
| 3758   Label need_incremental_pop_scratch; | 3699   Label need_incremental_pop_scratch; | 
| 3759 | 3700 | 
| 3760   DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0); | 3701   DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0); | 
| 3761   __ lis(r0, Operand((~Page::kPageAlignmentMask >> 16))); | 3702   __ AndP(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask)); | 
| 3762   __ and_(regs_.scratch0(), regs_.object(), r0); |  | 
| 3763   __ LoadP( | 3703   __ LoadP( | 
| 3764       regs_.scratch1(), | 3704       regs_.scratch1(), | 
| 3765       MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset)); | 3705       MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset)); | 
| 3766   __ subi(regs_.scratch1(), regs_.scratch1(), Operand(1)); | 3706   __ SubP(regs_.scratch1(), regs_.scratch1(), Operand(1)); | 
| 3767   __ StoreP( | 3707   __ StoreP( | 
| 3768       regs_.scratch1(), | 3708       regs_.scratch1(), | 
| 3769       MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset)); | 3709       MemOperand(regs_.scratch0(), MemoryChunk::kWriteBarrierCounterOffset)); | 
| 3770   __ cmpi(regs_.scratch1(), Operand::Zero());  // PPC, we could do better here | 3710   __ CmpP(regs_.scratch1(), Operand::Zero());  // S390, we could do better here | 
| 3771   __ blt(&need_incremental); | 3711   __ blt(&need_incremental); | 
| 3772 | 3712 | 
| 3773   // Let's look at the color of the object:  If it is not black we don't have | 3713   // Let's look at the color of the object:  If it is not black we don't have | 
| 3774   // to inform the incremental marker. | 3714   // to inform the incremental marker. | 
| 3775   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); | 3715   __ JumpIfBlack(regs_.object(), regs_.scratch0(), regs_.scratch1(), &on_black); | 
| 3776 | 3716 | 
| 3777   regs_.Restore(masm); | 3717   regs_.Restore(masm); | 
| 3778   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { | 3718   if (on_no_need == kUpdateRememberedSetOnNoNeedToInformIncrementalMarker) { | 
| 3779     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(), | 3719     __ RememberedSetHelper(object(), address(), value(), save_fp_regs_mode(), | 
| 3780                            MacroAssembler::kReturnAtEnd); | 3720                            MacroAssembler::kReturnAtEnd); | 
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 3822   } | 3762   } | 
| 3823 | 3763 | 
| 3824   __ bind(&need_incremental_pop_scratch); | 3764   __ bind(&need_incremental_pop_scratch); | 
| 3825   __ Pop(regs_.object(), regs_.address()); | 3765   __ Pop(regs_.object(), regs_.address()); | 
| 3826 | 3766 | 
| 3827   __ bind(&need_incremental); | 3767   __ bind(&need_incremental); | 
| 3828 | 3768 | 
| 3829   // Fall through when we need to inform the incremental marker. | 3769   // Fall through when we need to inform the incremental marker. | 
| 3830 } | 3770 } | 
| 3831 | 3771 | 
| 3832 |  | 
| 3833 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { | 3772 void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { | 
| 3834   CEntryStub ces(isolate(), 1, kSaveFPRegs); | 3773   CEntryStub ces(isolate(), 1, kSaveFPRegs); | 
| 3835   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET); | 3774   __ Call(ces.GetCode(), RelocInfo::CODE_TARGET); | 
| 3836   int parameter_count_offset = | 3775   int parameter_count_offset = | 
| 3837       StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; | 3776       StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; | 
| 3838   __ LoadP(r4, MemOperand(fp, parameter_count_offset)); | 3777   __ LoadP(r3, MemOperand(fp, parameter_count_offset)); | 
| 3839   if (function_mode() == JS_FUNCTION_STUB_MODE) { | 3778   if (function_mode() == JS_FUNCTION_STUB_MODE) { | 
| 3840     __ addi(r4, r4, Operand(1)); | 3779     __ AddP(r3, Operand(1)); | 
| 3841   } | 3780   } | 
| 3842   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); | 3781   masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); | 
| 3843   __ slwi(r4, r4, Operand(kPointerSizeLog2)); | 3782   __ ShiftLeftP(r3, r3, Operand(kPointerSizeLog2)); | 
| 3844   __ add(sp, sp, r4); | 3783   __ la(sp, MemOperand(r3, sp)); | 
| 3845   __ Ret(); | 3784   __ Ret(); | 
| 3846 } | 3785 } | 
| 3847 | 3786 | 
| 3848 |  | 
| 3849 void LoadICTrampolineStub::Generate(MacroAssembler* masm) { | 3787 void LoadICTrampolineStub::Generate(MacroAssembler* masm) { | 
| 3850   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister()); | 3788   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister()); | 
| 3851   LoadICStub stub(isolate(), state()); | 3789   LoadICStub stub(isolate(), state()); | 
| 3852   stub.GenerateForTrampoline(masm); | 3790   stub.GenerateForTrampoline(masm); | 
| 3853 } | 3791 } | 
| 3854 | 3792 | 
| 3855 |  | 
| 3856 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) { | 3793 void KeyedLoadICTrampolineStub::Generate(MacroAssembler* masm) { | 
| 3857   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister()); | 3794   __ EmitLoadTypeFeedbackVector(LoadWithVectorDescriptor::VectorRegister()); | 
| 3858   KeyedLoadICStub stub(isolate(), state()); | 3795   KeyedLoadICStub stub(isolate(), state()); | 
| 3859   stub.GenerateForTrampoline(masm); | 3796   stub.GenerateForTrampoline(masm); | 
| 3860 } | 3797 } | 
| 3861 | 3798 | 
| 3862 |  | 
| 3863 void CallICTrampolineStub::Generate(MacroAssembler* masm) { | 3799 void CallICTrampolineStub::Generate(MacroAssembler* masm) { | 
| 3864   __ EmitLoadTypeFeedbackVector(r5); | 3800   __ EmitLoadTypeFeedbackVector(r4); | 
| 3865   CallICStub stub(isolate(), state()); | 3801   CallICStub stub(isolate(), state()); | 
| 3866   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | 3802   __ Jump(stub.GetCode(), RelocInfo::CODE_TARGET); | 
| 3867 } | 3803 } | 
| 3868 | 3804 | 
| 3869 |  | 
| 3870 void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); } | 3805 void LoadICStub::Generate(MacroAssembler* masm) { GenerateImpl(masm, false); } | 
| 3871 | 3806 | 
| 3872 |  | 
| 3873 void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) { | 3807 void LoadICStub::GenerateForTrampoline(MacroAssembler* masm) { | 
| 3874   GenerateImpl(masm, true); | 3808   GenerateImpl(masm, true); | 
| 3875 } | 3809 } | 
| 3876 | 3810 | 
| 3877 |  | 
| 3878 static void HandleArrayCases(MacroAssembler* masm, Register feedback, | 3811 static void HandleArrayCases(MacroAssembler* masm, Register feedback, | 
| 3879                              Register receiver_map, Register scratch1, | 3812                              Register receiver_map, Register scratch1, | 
| 3880                              Register scratch2, bool is_polymorphic, | 3813                              Register scratch2, bool is_polymorphic, | 
| 3881                              Label* miss) { | 3814                              Label* miss) { | 
| 3882   // feedback initially contains the feedback array | 3815   // feedback initially contains the feedback array | 
| 3883   Label next_loop, prepare_next; | 3816   Label next_loop, prepare_next; | 
| 3884   Label start_polymorphic; | 3817   Label start_polymorphic; | 
| 3885 | 3818 | 
| 3886   Register cached_map = scratch1; | 3819   Register cached_map = scratch1; | 
| 3887 | 3820 | 
| 3888   __ LoadP(cached_map, | 3821   __ LoadP(cached_map, | 
| 3889            FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0))); | 3822            FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(0))); | 
| 3890   __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset)); | 3823   __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset)); | 
| 3891   __ cmp(receiver_map, cached_map); | 3824   __ CmpP(receiver_map, cached_map); | 
| 3892   __ bne(&start_polymorphic); | 3825   __ bne(&start_polymorphic, Label::kNear); | 
| 3893   // found, now call handler. | 3826   // found, now call handler. | 
| 3894   Register handler = feedback; | 3827   Register handler = feedback; | 
| 3895   __ LoadP(handler, | 3828   __ LoadP(handler, | 
| 3896            FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1))); | 3829            FieldMemOperand(feedback, FixedArray::OffsetOfElementAt(1))); | 
| 3897   __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag)); | 3830   __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
| 3898   __ Jump(ip); | 3831   __ Jump(ip); | 
| 3899 | 3832 | 
| 3900 |  | 
| 3901   Register length = scratch2; | 3833   Register length = scratch2; | 
| 3902   __ bind(&start_polymorphic); | 3834   __ bind(&start_polymorphic); | 
| 3903   __ LoadP(length, FieldMemOperand(feedback, FixedArray::kLengthOffset)); | 3835   __ LoadP(length, FieldMemOperand(feedback, FixedArray::kLengthOffset)); | 
| 3904   if (!is_polymorphic) { | 3836   if (!is_polymorphic) { | 
| 3905     // If the IC could be monomorphic we have to make sure we don't go past the | 3837     // If the IC could be monomorphic we have to make sure we don't go past the | 
| 3906     // end of the feedback array. | 3838     // end of the feedback array. | 
| 3907     __ CmpSmiLiteral(length, Smi::FromInt(2), r0); | 3839     __ CmpSmiLiteral(length, Smi::FromInt(2), r0); | 
| 3908     __ beq(miss); | 3840     __ beq(miss); | 
| 3909   } | 3841   } | 
| 3910 | 3842 | 
| 3911   Register too_far = length; | 3843   Register too_far = length; | 
| 3912   Register pointer_reg = feedback; | 3844   Register pointer_reg = feedback; | 
| 3913 | 3845 | 
| 3914   // +-----+------+------+-----+-----+ ... ----+ | 3846   // +-----+------+------+-----+-----+ ... ----+ | 
| 3915   // | map | len  | wm0  | h0  | wm1 |      hN | | 3847   // | map | len  | wm0  | h0  | wm1 |      hN | | 
| 3916   // +-----+------+------+-----+-----+ ... ----+ | 3848   // +-----+------+------+-----+-----+ ... ----+ | 
| 3917   //                 0      1     2        len-1 | 3849   //                 0      1     2        len-1 | 
| 3918   //                              ^              ^ | 3850   //                              ^              ^ | 
| 3919   //                              |              | | 3851   //                              |              | | 
| 3920   //                         pointer_reg      too_far | 3852   //                         pointer_reg      too_far | 
| 3921   //                         aka feedback     scratch2 | 3853   //                         aka feedback     scratch2 | 
| 3922   // also need receiver_map | 3854   // also need receiver_map | 
| 3923   // use cached_map (scratch1) to look in the weak map values. | 3855   // use cached_map (scratch1) to look in the weak map values. | 
| 3924   __ SmiToPtrArrayOffset(r0, length); | 3856   __ SmiToPtrArrayOffset(r0, length); | 
| 3925   __ add(too_far, feedback, r0); | 3857   __ AddP(too_far, feedback, r0); | 
| 3926   __ addi(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 3858   __ AddP(too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 
| 3927   __ addi(pointer_reg, feedback, | 3859   __ AddP(pointer_reg, feedback, | 
| 3928           Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag)); | 3860           Operand(FixedArray::OffsetOfElementAt(2) - kHeapObjectTag)); | 
| 3929 | 3861 | 
| 3930   __ bind(&next_loop); | 3862   __ bind(&next_loop); | 
| 3931   __ LoadP(cached_map, MemOperand(pointer_reg)); | 3863   __ LoadP(cached_map, MemOperand(pointer_reg)); | 
| 3932   __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset)); | 3864   __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset)); | 
| 3933   __ cmp(receiver_map, cached_map); | 3865   __ CmpP(receiver_map, cached_map); | 
| 3934   __ bne(&prepare_next); | 3866   __ bne(&prepare_next, Label::kNear); | 
| 3935   __ LoadP(handler, MemOperand(pointer_reg, kPointerSize)); | 3867   __ LoadP(handler, MemOperand(pointer_reg, kPointerSize)); | 
| 3936   __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag)); | 3868   __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
| 3937   __ Jump(ip); | 3869   __ Jump(ip); | 
| 3938 | 3870 | 
| 3939   __ bind(&prepare_next); | 3871   __ bind(&prepare_next); | 
| 3940   __ addi(pointer_reg, pointer_reg, Operand(kPointerSize * 2)); | 3872   __ AddP(pointer_reg, Operand(kPointerSize * 2)); | 
| 3941   __ cmp(pointer_reg, too_far); | 3873   __ CmpP(pointer_reg, too_far); | 
| 3942   __ blt(&next_loop); | 3874   __ blt(&next_loop, Label::kNear); | 
| 3943 | 3875 | 
| 3944   // We exhausted our array of map handler pairs. | 3876   // We exhausted our array of map handler pairs. | 
| 3945   __ b(miss); | 3877   __ b(miss); | 
| 3946 } | 3878 } | 
| 3947 | 3879 | 
| 3948 |  | 
| 3949 static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver, | 3880 static void HandleMonomorphicCase(MacroAssembler* masm, Register receiver, | 
| 3950                                   Register receiver_map, Register feedback, | 3881                                   Register receiver_map, Register feedback, | 
| 3951                                   Register vector, Register slot, | 3882                                   Register vector, Register slot, | 
| 3952                                   Register scratch, Label* compare_map, | 3883                                   Register scratch, Label* compare_map, | 
| 3953                                   Label* load_smi_map, Label* try_array) { | 3884                                   Label* load_smi_map, Label* try_array) { | 
| 3954   __ JumpIfSmi(receiver, load_smi_map); | 3885   __ JumpIfSmi(receiver, load_smi_map); | 
| 3955   __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 3886   __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 
| 3956   __ bind(compare_map); | 3887   __ bind(compare_map); | 
| 3957   Register cached_map = scratch; | 3888   Register cached_map = scratch; | 
| 3958   // Move the weak map into the weak_cell register. | 3889   // Move the weak map into the weak_cell register. | 
| 3959   __ LoadP(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset)); | 3890   __ LoadP(cached_map, FieldMemOperand(feedback, WeakCell::kValueOffset)); | 
| 3960   __ cmp(cached_map, receiver_map); | 3891   __ CmpP(cached_map, receiver_map); | 
| 3961   __ bne(try_array); | 3892   __ bne(try_array); | 
| 3962   Register handler = feedback; | 3893   Register handler = feedback; | 
| 3963   __ SmiToPtrArrayOffset(r0, slot); | 3894   __ SmiToPtrArrayOffset(r1, slot); | 
| 3964   __ add(handler, vector, r0); |  | 
| 3965   __ LoadP(handler, | 3895   __ LoadP(handler, | 
| 3966            FieldMemOperand(handler, FixedArray::kHeaderSize + kPointerSize)); | 3896            FieldMemOperand(r1, vector, FixedArray::kHeaderSize + kPointerSize)); | 
| 3967   __ addi(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag)); | 3897   __ AddP(ip, handler, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
| 3968   __ Jump(ip); | 3898   __ Jump(ip); | 
| 3969 } | 3899 } | 
| 3970 | 3900 | 
|  | 3901 void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { | 
|  | 3902   Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // r3 | 
|  | 3903   Register name = LoadWithVectorDescriptor::NameRegister();          // r4 | 
|  | 3904   Register vector = LoadWithVectorDescriptor::VectorRegister();      // r5 | 
|  | 3905   Register slot = LoadWithVectorDescriptor::SlotRegister();          // r2 | 
|  | 3906   Register feedback = r6; | 
|  | 3907   Register receiver_map = r7; | 
|  | 3908   Register scratch1 = r8; | 
| 3971 | 3909 | 
| 3972 void LoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { | 3910   __ SmiToPtrArrayOffset(r1, slot); | 
| 3973   Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // r4 | 3911   __ LoadP(feedback, FieldMemOperand(r1, vector, FixedArray::kHeaderSize)); | 
| 3974   Register name = LoadWithVectorDescriptor::NameRegister();          // r5 |  | 
| 3975   Register vector = LoadWithVectorDescriptor::VectorRegister();      // r6 |  | 
| 3976   Register slot = LoadWithVectorDescriptor::SlotRegister();          // r3 |  | 
| 3977   Register feedback = r7; |  | 
| 3978   Register receiver_map = r8; |  | 
| 3979   Register scratch1 = r9; |  | 
| 3980 |  | 
| 3981   __ SmiToPtrArrayOffset(r0, slot); |  | 
| 3982   __ add(feedback, vector, r0); |  | 
| 3983   __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); |  | 
| 3984 | 3912 | 
| 3985   // Try to quickly handle the monomorphic case without knowing for sure | 3913   // Try to quickly handle the monomorphic case without knowing for sure | 
| 3986   // if we have a weak cell in feedback. We do know it's safe to look | 3914   // if we have a weak cell in feedback. We do know it's safe to look | 
| 3987   // at WeakCell::kValueOffset. | 3915   // at WeakCell::kValueOffset. | 
| 3988   Label try_array, load_smi_map, compare_map; | 3916   Label try_array, load_smi_map, compare_map; | 
| 3989   Label not_array, miss; | 3917   Label not_array, miss; | 
| 3990   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, | 3918   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, | 
| 3991                         scratch1, &compare_map, &load_smi_map, &try_array); | 3919                         scratch1, &compare_map, &load_smi_map, &try_array); | 
| 3992 | 3920 | 
| 3993   // Is it a fixed array? | 3921   // Is it a fixed array? | 
| 3994   __ bind(&try_array); | 3922   __ bind(&try_array); | 
| 3995   __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset)); | 3923   __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset)); | 
| 3996   __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex); | 3924   __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex); | 
| 3997   __ bne(¬_array); | 3925   __ bne(¬_array, Label::kNear); | 
| 3998   HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, true, &miss); | 3926   HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss); | 
| 3999 | 3927 | 
| 4000   __ bind(¬_array); | 3928   __ bind(¬_array); | 
| 4001   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex); | 3929   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex); | 
| 4002   __ bne(&miss); | 3930   __ bne(&miss); | 
| 4003   Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags( | 3931   Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags( | 
| 4004       Code::ComputeHandlerFlags(Code::LOAD_IC)); | 3932       Code::ComputeHandlerFlags(Code::LOAD_IC)); | 
| 4005   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags, | 3933   masm->isolate()->stub_cache()->GenerateProbe(masm, Code::LOAD_IC, code_flags, | 
| 4006                                                receiver, name, feedback, | 3934                                                receiver, name, feedback, | 
| 4007                                                receiver_map, scratch1, r10); | 3935                                                receiver_map, scratch1, r9); | 
| 4008 | 3936 | 
| 4009   __ bind(&miss); | 3937   __ bind(&miss); | 
| 4010   LoadIC::GenerateMiss(masm); | 3938   LoadIC::GenerateMiss(masm); | 
| 4011 | 3939 | 
| 4012   __ bind(&load_smi_map); | 3940   __ bind(&load_smi_map); | 
| 4013   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); | 3941   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); | 
| 4014   __ b(&compare_map); | 3942   __ b(&compare_map); | 
| 4015 } | 3943 } | 
| 4016 | 3944 | 
| 4017 |  | 
| 4018 void KeyedLoadICStub::Generate(MacroAssembler* masm) { | 3945 void KeyedLoadICStub::Generate(MacroAssembler* masm) { | 
| 4019   GenerateImpl(masm, false); | 3946   GenerateImpl(masm, false); | 
| 4020 } | 3947 } | 
| 4021 | 3948 | 
| 4022 |  | 
| 4023 void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) { | 3949 void KeyedLoadICStub::GenerateForTrampoline(MacroAssembler* masm) { | 
| 4024   GenerateImpl(masm, true); | 3950   GenerateImpl(masm, true); | 
| 4025 } | 3951 } | 
| 4026 | 3952 | 
|  | 3953 void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { | 
|  | 3954   Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // r3 | 
|  | 3955   Register key = LoadWithVectorDescriptor::NameRegister();           // r4 | 
|  | 3956   Register vector = LoadWithVectorDescriptor::VectorRegister();      // r5 | 
|  | 3957   Register slot = LoadWithVectorDescriptor::SlotRegister();          // r2 | 
|  | 3958   Register feedback = r6; | 
|  | 3959   Register receiver_map = r7; | 
|  | 3960   Register scratch1 = r8; | 
| 4027 | 3961 | 
| 4028 void KeyedLoadICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { | 3962   __ SmiToPtrArrayOffset(r1, slot); | 
| 4029   Register receiver = LoadWithVectorDescriptor::ReceiverRegister();  // r4 | 3963   __ LoadP(feedback, FieldMemOperand(r1, vector, FixedArray::kHeaderSize)); | 
| 4030   Register key = LoadWithVectorDescriptor::NameRegister();           // r5 |  | 
| 4031   Register vector = LoadWithVectorDescriptor::VectorRegister();      // r6 |  | 
| 4032   Register slot = LoadWithVectorDescriptor::SlotRegister();          // r3 |  | 
| 4033   Register feedback = r7; |  | 
| 4034   Register receiver_map = r8; |  | 
| 4035   Register scratch1 = r9; |  | 
| 4036 |  | 
| 4037   __ SmiToPtrArrayOffset(r0, slot); |  | 
| 4038   __ add(feedback, vector, r0); |  | 
| 4039   __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); |  | 
| 4040 | 3964 | 
| 4041   // Try to quickly handle the monomorphic case without knowing for sure | 3965   // Try to quickly handle the monomorphic case without knowing for sure | 
| 4042   // if we have a weak cell in feedback. We do know it's safe to look | 3966   // if we have a weak cell in feedback. We do know it's safe to look | 
| 4043   // at WeakCell::kValueOffset. | 3967   // at WeakCell::kValueOffset. | 
| 4044   Label try_array, load_smi_map, compare_map; | 3968   Label try_array, load_smi_map, compare_map; | 
| 4045   Label not_array, miss; | 3969   Label not_array, miss; | 
| 4046   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, | 3970   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, | 
| 4047                         scratch1, &compare_map, &load_smi_map, &try_array); | 3971                         scratch1, &compare_map, &load_smi_map, &try_array); | 
| 4048 | 3972 | 
| 4049   __ bind(&try_array); | 3973   __ bind(&try_array); | 
| 4050   // Is it a fixed array? | 3974   // Is it a fixed array? | 
| 4051   __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset)); | 3975   __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset)); | 
| 4052   __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex); | 3976   __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex); | 
| 4053   __ bne(¬_array); | 3977   __ bne(¬_array); | 
| 4054 | 3978 | 
| 4055   // We have a polymorphic element handler. | 3979   // We have a polymorphic element handler. | 
| 4056   Label polymorphic, try_poly_name; | 3980   Label polymorphic, try_poly_name; | 
| 4057   __ bind(&polymorphic); | 3981   __ bind(&polymorphic); | 
| 4058   HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, true, &miss); | 3982   HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, true, &miss); | 
| 4059 | 3983 | 
| 4060   __ bind(¬_array); | 3984   __ bind(¬_array); | 
| 4061   // Is it generic? | 3985   // Is it generic? | 
| 4062   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex); | 3986   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex); | 
| 4063   __ bne(&try_poly_name); | 3987   __ bne(&try_poly_name); | 
| 4064   Handle<Code> megamorphic_stub = | 3988   Handle<Code> megamorphic_stub = | 
| 4065       KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState()); | 3989       KeyedLoadIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState()); | 
| 4066   __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET); | 3990   __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET); | 
| 4067 | 3991 | 
| 4068   __ bind(&try_poly_name); | 3992   __ bind(&try_poly_name); | 
| 4069   // We might have a name in feedback, and a fixed array in the next slot. | 3993   // We might have a name in feedback, and a fixed array in the next slot. | 
| 4070   __ cmp(key, feedback); | 3994   __ CmpP(key, feedback); | 
| 4071   __ bne(&miss); | 3995   __ bne(&miss); | 
| 4072   // If the name comparison succeeded, we know we have a fixed array with | 3996   // If the name comparison succeeded, we know we have a fixed array with | 
| 4073   // at least one map/handler pair. | 3997   // at least one map/handler pair. | 
| 4074   __ SmiToPtrArrayOffset(r0, slot); | 3998   __ SmiToPtrArrayOffset(r1, slot); | 
| 4075   __ add(feedback, vector, r0); |  | 
| 4076   __ LoadP(feedback, | 3999   __ LoadP(feedback, | 
| 4077            FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize)); | 4000            FieldMemOperand(r1, vector, FixedArray::kHeaderSize + kPointerSize)); | 
| 4078   HandleArrayCases(masm, feedback, receiver_map, scratch1, r10, false, &miss); | 4001   HandleArrayCases(masm, feedback, receiver_map, scratch1, r9, false, &miss); | 
| 4079 | 4002 | 
| 4080   __ bind(&miss); | 4003   __ bind(&miss); | 
| 4081   KeyedLoadIC::GenerateMiss(masm); | 4004   KeyedLoadIC::GenerateMiss(masm); | 
| 4082 | 4005 | 
| 4083   __ bind(&load_smi_map); | 4006   __ bind(&load_smi_map); | 
| 4084   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); | 4007   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); | 
| 4085   __ b(&compare_map); | 4008   __ b(&compare_map); | 
| 4086 } | 4009 } | 
| 4087 | 4010 | 
| 4088 |  | 
| 4089 void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) { | 4011 void VectorStoreICTrampolineStub::Generate(MacroAssembler* masm) { | 
| 4090   __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister()); | 4012   __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister()); | 
| 4091   VectorStoreICStub stub(isolate(), state()); | 4013   VectorStoreICStub stub(isolate(), state()); | 
| 4092   stub.GenerateForTrampoline(masm); | 4014   stub.GenerateForTrampoline(masm); | 
| 4093 } | 4015 } | 
| 4094 | 4016 | 
| 4095 |  | 
| 4096 void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) { | 4017 void VectorKeyedStoreICTrampolineStub::Generate(MacroAssembler* masm) { | 
| 4097   __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister()); | 4018   __ EmitLoadTypeFeedbackVector(VectorStoreICDescriptor::VectorRegister()); | 
| 4098   VectorKeyedStoreICStub stub(isolate(), state()); | 4019   VectorKeyedStoreICStub stub(isolate(), state()); | 
| 4099   stub.GenerateForTrampoline(masm); | 4020   stub.GenerateForTrampoline(masm); | 
| 4100 } | 4021 } | 
| 4101 | 4022 | 
| 4102 |  | 
| 4103 void VectorStoreICStub::Generate(MacroAssembler* masm) { | 4023 void VectorStoreICStub::Generate(MacroAssembler* masm) { | 
| 4104   GenerateImpl(masm, false); | 4024   GenerateImpl(masm, false); | 
| 4105 } | 4025 } | 
| 4106 | 4026 | 
| 4107 |  | 
| 4108 void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) { | 4027 void VectorStoreICStub::GenerateForTrampoline(MacroAssembler* masm) { | 
| 4109   GenerateImpl(masm, true); | 4028   GenerateImpl(masm, true); | 
| 4110 } | 4029 } | 
| 4111 | 4030 | 
| 4112 |  | 
| 4113 void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { | 4031 void VectorStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { | 
| 4114   Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // r4 | 4032   Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // r3 | 
| 4115   Register key = VectorStoreICDescriptor::NameRegister();           // r5 | 4033   Register key = VectorStoreICDescriptor::NameRegister();           // r4 | 
| 4116   Register vector = VectorStoreICDescriptor::VectorRegister();      // r6 | 4034   Register vector = VectorStoreICDescriptor::VectorRegister();      // r5 | 
| 4117   Register slot = VectorStoreICDescriptor::SlotRegister();          // r7 | 4035   Register slot = VectorStoreICDescriptor::SlotRegister();          // r6 | 
| 4118   DCHECK(VectorStoreICDescriptor::ValueRegister().is(r3));          // r3 | 4036   DCHECK(VectorStoreICDescriptor::ValueRegister().is(r2));          // r2 | 
| 4119   Register feedback = r8; | 4037   Register feedback = r7; | 
| 4120   Register receiver_map = r9; | 4038   Register receiver_map = r8; | 
| 4121   Register scratch1 = r10; | 4039   Register scratch1 = r9; | 
| 4122 | 4040 | 
| 4123   __ SmiToPtrArrayOffset(r0, slot); | 4041   __ SmiToPtrArrayOffset(r0, slot); | 
| 4124   __ add(feedback, vector, r0); | 4042   __ AddP(feedback, vector, r0); | 
| 4125   __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); | 4043   __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); | 
| 4126 | 4044 | 
| 4127   // Try to quickly handle the monomorphic case without knowing for sure | 4045   // Try to quickly handle the monomorphic case without knowing for sure | 
| 4128   // if we have a weak cell in feedback. We do know it's safe to look | 4046   // if we have a weak cell in feedback. We do know it's safe to look | 
| 4129   // at WeakCell::kValueOffset. | 4047   // at WeakCell::kValueOffset. | 
| 4130   Label try_array, load_smi_map, compare_map; | 4048   Label try_array, load_smi_map, compare_map; | 
| 4131   Label not_array, miss; | 4049   Label not_array, miss; | 
| 4132   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, | 4050   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, | 
| 4133                         scratch1, &compare_map, &load_smi_map, &try_array); | 4051                         scratch1, &compare_map, &load_smi_map, &try_array); | 
| 4134 | 4052 | 
| 4135   // Is it a fixed array? | 4053   // Is it a fixed array? | 
| 4136   __ bind(&try_array); | 4054   __ bind(&try_array); | 
| 4137   __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset)); | 4055   __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset)); | 
| 4138   __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex); | 4056   __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex); | 
| 4139   __ bne(¬_array); | 4057   __ bne(¬_array); | 
| 4140 | 4058 | 
| 4141   Register scratch2 = r11; | 4059   Register scratch2 = ip; | 
| 4142   HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true, | 4060   HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, true, | 
| 4143                    &miss); | 4061                    &miss); | 
| 4144 | 4062 | 
| 4145   __ bind(¬_array); | 4063   __ bind(¬_array); | 
| 4146   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex); | 4064   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex); | 
| 4147   __ bne(&miss); | 4065   __ bne(&miss); | 
| 4148   Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags( | 4066   Code::Flags code_flags = Code::RemoveTypeAndHolderFromFlags( | 
| 4149       Code::ComputeHandlerFlags(Code::STORE_IC)); | 4067       Code::ComputeHandlerFlags(Code::STORE_IC)); | 
| 4150   masm->isolate()->stub_cache()->GenerateProbe( | 4068   masm->isolate()->stub_cache()->GenerateProbe( | 
| 4151       masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map, | 4069       masm, Code::STORE_IC, code_flags, receiver, key, feedback, receiver_map, | 
| 4152       scratch1, scratch2); | 4070       scratch1, scratch2); | 
| 4153 | 4071 | 
| 4154   __ bind(&miss); | 4072   __ bind(&miss); | 
| 4155   StoreIC::GenerateMiss(masm); | 4073   StoreIC::GenerateMiss(masm); | 
| 4156 | 4074 | 
| 4157   __ bind(&load_smi_map); | 4075   __ bind(&load_smi_map); | 
| 4158   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); | 4076   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); | 
| 4159   __ b(&compare_map); | 4077   __ b(&compare_map); | 
| 4160 } | 4078 } | 
| 4161 | 4079 | 
| 4162 |  | 
| 4163 void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) { | 4080 void VectorKeyedStoreICStub::Generate(MacroAssembler* masm) { | 
| 4164   GenerateImpl(masm, false); | 4081   GenerateImpl(masm, false); | 
| 4165 } | 4082 } | 
| 4166 | 4083 | 
| 4167 |  | 
| 4168 void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) { | 4084 void VectorKeyedStoreICStub::GenerateForTrampoline(MacroAssembler* masm) { | 
| 4169   GenerateImpl(masm, true); | 4085   GenerateImpl(masm, true); | 
| 4170 } | 4086 } | 
| 4171 | 4087 | 
| 4172 |  | 
| 4173 static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback, | 4088 static void HandlePolymorphicStoreCase(MacroAssembler* masm, Register feedback, | 
| 4174                                        Register receiver_map, Register scratch1, | 4089                                        Register receiver_map, Register scratch1, | 
| 4175                                        Register scratch2, Label* miss) { | 4090                                        Register scratch2, Label* miss) { | 
| 4176   // feedback initially contains the feedback array | 4091   // feedback initially contains the feedback array | 
| 4177   Label next_loop, prepare_next; | 4092   Label next_loop, prepare_next; | 
| 4178   Label start_polymorphic; | 4093   Label start_polymorphic; | 
| 4179   Label transition_call; | 4094   Label transition_call; | 
| 4180 | 4095 | 
| 4181   Register cached_map = scratch1; | 4096   Register cached_map = scratch1; | 
| 4182   Register too_far = scratch2; | 4097   Register too_far = scratch2; | 
| 4183   Register pointer_reg = feedback; | 4098   Register pointer_reg = feedback; | 
| 4184   __ LoadP(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset)); | 4099   __ LoadP(too_far, FieldMemOperand(feedback, FixedArray::kLengthOffset)); | 
| 4185 | 4100 | 
| 4186   // +-----+------+------+-----+-----+-----+ ... ----+ | 4101   // +-----+------+------+-----+-----+-----+ ... ----+ | 
| 4187   // | map | len  | wm0  | wt0 | h0  | wm1 |      hN | | 4102   // | map | len  | wm0  | wt0 | h0  | wm1 |      hN | | 
| 4188   // +-----+------+------+-----+-----+ ----+ ... ----+ | 4103   // +-----+------+------+-----+-----+ ----+ ... ----+ | 
| 4189   //                 0      1     2              len-1 | 4104   //                 0      1     2              len-1 | 
| 4190   //                 ^                                 ^ | 4105   //                 ^                                 ^ | 
| 4191   //                 |                                 | | 4106   //                 |                                 | | 
| 4192   //             pointer_reg                        too_far | 4107   //             pointer_reg                        too_far | 
| 4193   //             aka feedback                       scratch2 | 4108   //             aka feedback                       scratch2 | 
| 4194   // also need receiver_map | 4109   // also need receiver_map | 
| 4195   // use cached_map (scratch1) to look in the weak map values. | 4110   // use cached_map (scratch1) to look in the weak map values. | 
| 4196   __ SmiToPtrArrayOffset(r0, too_far); | 4111   __ SmiToPtrArrayOffset(r0, too_far); | 
| 4197   __ add(too_far, feedback, r0); | 4112   __ AddP(too_far, feedback, r0); | 
| 4198   __ addi(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 4113   __ AddP(too_far, too_far, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 
| 4199   __ addi(pointer_reg, feedback, | 4114   __ AddP(pointer_reg, feedback, | 
| 4200           Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag)); | 4115           Operand(FixedArray::OffsetOfElementAt(0) - kHeapObjectTag)); | 
| 4201 | 4116 | 
| 4202   __ bind(&next_loop); | 4117   __ bind(&next_loop); | 
| 4203   __ LoadP(cached_map, MemOperand(pointer_reg)); | 4118   __ LoadP(cached_map, MemOperand(pointer_reg)); | 
| 4204   __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset)); | 4119   __ LoadP(cached_map, FieldMemOperand(cached_map, WeakCell::kValueOffset)); | 
| 4205   __ cmp(receiver_map, cached_map); | 4120   __ CmpP(receiver_map, cached_map); | 
| 4206   __ bne(&prepare_next); | 4121   __ bne(&prepare_next); | 
| 4207   // Is it a transitioning store? | 4122   // Is it a transitioning store? | 
| 4208   __ LoadP(too_far, MemOperand(pointer_reg, kPointerSize)); | 4123   __ LoadP(too_far, MemOperand(pointer_reg, kPointerSize)); | 
| 4209   __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex); | 4124   __ CompareRoot(too_far, Heap::kUndefinedValueRootIndex); | 
| 4210   __ bne(&transition_call); | 4125   __ bne(&transition_call); | 
| 4211   __ LoadP(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2)); | 4126   __ LoadP(pointer_reg, MemOperand(pointer_reg, kPointerSize * 2)); | 
| 4212   __ addi(ip, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag)); | 4127   __ AddP(ip, pointer_reg, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
| 4213   __ Jump(ip); | 4128   __ Jump(ip); | 
| 4214 | 4129 | 
| 4215   __ bind(&transition_call); | 4130   __ bind(&transition_call); | 
| 4216   __ LoadP(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset)); | 4131   __ LoadP(too_far, FieldMemOperand(too_far, WeakCell::kValueOffset)); | 
| 4217   __ JumpIfSmi(too_far, miss); | 4132   __ JumpIfSmi(too_far, miss); | 
| 4218 | 4133 | 
| 4219   __ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2)); | 4134   __ LoadP(receiver_map, MemOperand(pointer_reg, kPointerSize * 2)); | 
| 4220 | 4135 | 
| 4221   // Load the map into the correct register. | 4136   // Load the map into the correct register. | 
| 4222   DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister())); | 4137   DCHECK(feedback.is(VectorStoreTransitionDescriptor::MapRegister())); | 
| 4223   __ mr(feedback, too_far); | 4138   __ LoadRR(feedback, too_far); | 
| 4224 | 4139 | 
| 4225   __ addi(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag)); | 4140   __ AddP(ip, receiver_map, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
| 4226   __ Jump(ip); | 4141   __ Jump(ip); | 
| 4227 | 4142 | 
| 4228   __ bind(&prepare_next); | 4143   __ bind(&prepare_next); | 
| 4229   __ addi(pointer_reg, pointer_reg, Operand(kPointerSize * 3)); | 4144   __ AddP(pointer_reg, pointer_reg, Operand(kPointerSize * 3)); | 
| 4230   __ cmpl(pointer_reg, too_far); | 4145   __ CmpLogicalP(pointer_reg, too_far); | 
| 4231   __ blt(&next_loop); | 4146   __ blt(&next_loop); | 
| 4232 | 4147 | 
| 4233   // We exhausted our array of map handler pairs. | 4148   // We exhausted our array of map handler pairs. | 
| 4234   __ b(miss); | 4149   __ b(miss); | 
| 4235 } | 4150 } | 
| 4236 | 4151 | 
| 4237 |  | 
| 4238 void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { | 4152 void VectorKeyedStoreICStub::GenerateImpl(MacroAssembler* masm, bool in_frame) { | 
| 4239   Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // r4 | 4153   Register receiver = VectorStoreICDescriptor::ReceiverRegister();  // r3 | 
| 4240   Register key = VectorStoreICDescriptor::NameRegister();           // r5 | 4154   Register key = VectorStoreICDescriptor::NameRegister();           // r4 | 
| 4241   Register vector = VectorStoreICDescriptor::VectorRegister();      // r6 | 4155   Register vector = VectorStoreICDescriptor::VectorRegister();      // r5 | 
| 4242   Register slot = VectorStoreICDescriptor::SlotRegister();          // r7 | 4156   Register slot = VectorStoreICDescriptor::SlotRegister();          // r6 | 
| 4243   DCHECK(VectorStoreICDescriptor::ValueRegister().is(r3));          // r3 | 4157   DCHECK(VectorStoreICDescriptor::ValueRegister().is(r2));          // r2 | 
| 4244   Register feedback = r8; | 4158   Register feedback = r7; | 
| 4245   Register receiver_map = r9; | 4159   Register receiver_map = r8; | 
| 4246   Register scratch1 = r10; | 4160   Register scratch1 = r9; | 
| 4247 | 4161 | 
| 4248   __ SmiToPtrArrayOffset(r0, slot); | 4162   __ SmiToPtrArrayOffset(r0, slot); | 
| 4249   __ add(feedback, vector, r0); | 4163   __ AddP(feedback, vector, r0); | 
| 4250   __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); | 4164   __ LoadP(feedback, FieldMemOperand(feedback, FixedArray::kHeaderSize)); | 
| 4251 | 4165 | 
| 4252   // Try to quickly handle the monomorphic case without knowing for sure | 4166   // Try to quickly handle the monomorphic case without knowing for sure | 
| 4253   // if we have a weak cell in feedback. We do know it's safe to look | 4167   // if we have a weak cell in feedback. We do know it's safe to look | 
| 4254   // at WeakCell::kValueOffset. | 4168   // at WeakCell::kValueOffset. | 
| 4255   Label try_array, load_smi_map, compare_map; | 4169   Label try_array, load_smi_map, compare_map; | 
| 4256   Label not_array, miss; | 4170   Label not_array, miss; | 
| 4257   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, | 4171   HandleMonomorphicCase(masm, receiver, receiver_map, feedback, vector, slot, | 
| 4258                         scratch1, &compare_map, &load_smi_map, &try_array); | 4172                         scratch1, &compare_map, &load_smi_map, &try_array); | 
| 4259 | 4173 | 
| 4260   __ bind(&try_array); | 4174   __ bind(&try_array); | 
| 4261   // Is it a fixed array? | 4175   // Is it a fixed array? | 
| 4262   __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset)); | 4176   __ LoadP(scratch1, FieldMemOperand(feedback, HeapObject::kMapOffset)); | 
| 4263   __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex); | 4177   __ CompareRoot(scratch1, Heap::kFixedArrayMapRootIndex); | 
| 4264   __ bne(¬_array); | 4178   __ bne(¬_array); | 
| 4265 | 4179 | 
| 4266   // We have a polymorphic element handler. | 4180   // We have a polymorphic element handler. | 
| 4267   Label polymorphic, try_poly_name; | 4181   Label polymorphic, try_poly_name; | 
| 4268   __ bind(&polymorphic); | 4182   __ bind(&polymorphic); | 
| 4269 | 4183 | 
| 4270   Register scratch2 = r11; | 4184   Register scratch2 = ip; | 
| 4271 | 4185 | 
| 4272   HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2, | 4186   HandlePolymorphicStoreCase(masm, feedback, receiver_map, scratch1, scratch2, | 
| 4273                              &miss); | 4187                              &miss); | 
| 4274 | 4188 | 
| 4275   __ bind(¬_array); | 4189   __ bind(¬_array); | 
| 4276   // Is it generic? | 4190   // Is it generic? | 
| 4277   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex); | 4191   __ CompareRoot(feedback, Heap::kmegamorphic_symbolRootIndex); | 
| 4278   __ bne(&try_poly_name); | 4192   __ bne(&try_poly_name); | 
| 4279   Handle<Code> megamorphic_stub = | 4193   Handle<Code> megamorphic_stub = | 
| 4280       KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState()); | 4194       KeyedStoreIC::ChooseMegamorphicStub(masm->isolate(), GetExtraICState()); | 
| 4281   __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET); | 4195   __ Jump(megamorphic_stub, RelocInfo::CODE_TARGET); | 
| 4282 | 4196 | 
| 4283   __ bind(&try_poly_name); | 4197   __ bind(&try_poly_name); | 
| 4284   // We might have a name in feedback, and a fixed array in the next slot. | 4198   // We might have a name in feedback, and a fixed array in the next slot. | 
| 4285   __ cmp(key, feedback); | 4199   __ CmpP(key, feedback); | 
| 4286   __ bne(&miss); | 4200   __ bne(&miss); | 
| 4287   // If the name comparison succeeded, we know we have a fixed array with | 4201   // If the name comparison succeeded, we know we have a fixed array with | 
| 4288   // at least one map/handler pair. | 4202   // at least one map/handler pair. | 
| 4289   __ SmiToPtrArrayOffset(r0, slot); | 4203   __ SmiToPtrArrayOffset(r0, slot); | 
| 4290   __ add(feedback, vector, r0); | 4204   __ AddP(feedback, vector, r0); | 
| 4291   __ LoadP(feedback, | 4205   __ LoadP(feedback, | 
| 4292            FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize)); | 4206            FieldMemOperand(feedback, FixedArray::kHeaderSize + kPointerSize)); | 
| 4293   HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false, | 4207   HandleArrayCases(masm, feedback, receiver_map, scratch1, scratch2, false, | 
| 4294                    &miss); | 4208                    &miss); | 
| 4295 | 4209 | 
| 4296   __ bind(&miss); | 4210   __ bind(&miss); | 
| 4297   KeyedStoreIC::GenerateMiss(masm); | 4211   KeyedStoreIC::GenerateMiss(masm); | 
| 4298 | 4212 | 
| 4299   __ bind(&load_smi_map); | 4213   __ bind(&load_smi_map); | 
| 4300   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); | 4214   __ LoadRoot(receiver_map, Heap::kHeapNumberMapRootIndex); | 
| 4301   __ b(&compare_map); | 4215   __ b(&compare_map); | 
| 4302 } | 4216 } | 
| 4303 | 4217 | 
| 4304 |  | 
| 4305 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { | 4218 void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { | 
| 4306   if (masm->isolate()->function_entry_hook() != NULL) { | 4219   if (masm->isolate()->function_entry_hook() != NULL) { | 
| 4307     PredictableCodeSizeScope predictable(masm, | 4220     PredictableCodeSizeScope predictable(masm, | 
| 4308 #if V8_TARGET_ARCH_PPC64 | 4221 #if V8_TARGET_ARCH_S390X | 
| 4309                                          14 * Assembler::kInstrSize); | 4222                                          40); | 
|  | 4223 #elif V8_HOST_ARCH_S390 | 
|  | 4224                                          36); | 
| 4310 #else | 4225 #else | 
| 4311                                          11 * Assembler::kInstrSize); | 4226                                          32); | 
| 4312 #endif | 4227 #endif | 
| 4313     ProfileEntryHookStub stub(masm->isolate()); | 4228     ProfileEntryHookStub stub(masm->isolate()); | 
| 4314     __ mflr(r0); | 4229     __ CleanseP(r14); | 
| 4315     __ Push(r0, ip); | 4230     __ Push(r14, ip); | 
| 4316     __ CallStub(&stub); | 4231     __ CallStub(&stub);  // BRASL | 
| 4317     __ Pop(r0, ip); | 4232     __ Pop(r14, ip); | 
| 4318     __ mtlr(r0); |  | 
| 4319   } | 4233   } | 
| 4320 } | 4234 } | 
| 4321 | 4235 | 
| 4322 |  | 
| 4323 void ProfileEntryHookStub::Generate(MacroAssembler* masm) { | 4236 void ProfileEntryHookStub::Generate(MacroAssembler* masm) { | 
| 4324   // The entry hook is a "push lr, ip" instruction, followed by a call. | 4237 // The entry hook is a "push lr" instruction (LAY+ST/STG), followed by a call. | 
|  | 4238 #if V8_TARGET_ARCH_S390X | 
| 4325   const int32_t kReturnAddressDistanceFromFunctionStart = | 4239   const int32_t kReturnAddressDistanceFromFunctionStart = | 
| 4326       Assembler::kCallTargetAddressOffset + 3 * Assembler::kInstrSize; | 4240       Assembler::kCallTargetAddressOffset + 18;  // LAY + STG * 2 | 
|  | 4241 #elif V8_HOST_ARCH_S390 | 
|  | 4242   const int32_t kReturnAddressDistanceFromFunctionStart = | 
|  | 4243       Assembler::kCallTargetAddressOffset + 18;  // NILH + LAY + ST * 2 | 
|  | 4244 #else | 
|  | 4245   const int32_t kReturnAddressDistanceFromFunctionStart = | 
|  | 4246       Assembler::kCallTargetAddressOffset + 14;  // LAY + ST * 2 | 
|  | 4247 #endif | 
| 4327 | 4248 | 
| 4328   // This should contain all kJSCallerSaved registers. | 4249   // This should contain all kJSCallerSaved registers. | 
| 4329   const RegList kSavedRegs = kJSCallerSaved |  // Caller saved registers. | 4250   const RegList kSavedRegs = kJSCallerSaved |  // Caller saved registers. | 
| 4330                              r15.bit();        // Saved stack pointer. | 4251                              r7.bit();         // Saved stack pointer. | 
| 4331 | 4252 | 
| 4332   // We also save lr, so the count here is one higher than the mask indicates. | 4253   // We also save r14+ip, so count here is one higher than the mask indicates. | 
| 4333   const int32_t kNumSavedRegs = kNumJSCallerSaved + 2; | 4254   const int32_t kNumSavedRegs = kNumJSCallerSaved + 3; | 
| 4334 | 4255 | 
| 4335   // Save all caller-save registers as this may be called from anywhere. | 4256   // Save all caller-save registers as this may be called from anywhere. | 
| 4336   __ mflr(ip); | 4257   __ CleanseP(r14); | 
|  | 4258   __ LoadRR(ip, r14); | 
| 4337   __ MultiPush(kSavedRegs | ip.bit()); | 4259   __ MultiPush(kSavedRegs | ip.bit()); | 
| 4338 | 4260 | 
| 4339   // Compute the function's address for the first argument. | 4261   // Compute the function's address for the first argument. | 
| 4340   __ subi(r3, ip, Operand(kReturnAddressDistanceFromFunctionStart)); | 4262 | 
|  | 4263   __ SubP(r2, ip, Operand(kReturnAddressDistanceFromFunctionStart)); | 
| 4341 | 4264 | 
| 4342   // The caller's return address is two slots above the saved temporaries. | 4265   // The caller's return address is two slots above the saved temporaries. | 
| 4343   // Grab that for the second argument to the hook. | 4266   // Grab that for the second argument to the hook. | 
| 4344   __ addi(r4, sp, Operand((kNumSavedRegs + 1) * kPointerSize)); | 4267   __ lay(r3, MemOperand(sp, kNumSavedRegs * kPointerSize)); | 
| 4345 | 4268 | 
| 4346   // Align the stack if necessary. | 4269   // Align the stack if necessary. | 
| 4347   int frame_alignment = masm->ActivationFrameAlignment(); | 4270   int frame_alignment = masm->ActivationFrameAlignment(); | 
| 4348   if (frame_alignment > kPointerSize) { | 4271   if (frame_alignment > kPointerSize) { | 
| 4349     __ mr(r15, sp); | 4272     __ LoadRR(r7, sp); | 
| 4350     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); | 4273     DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); | 
| 4351     __ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment))); | 4274     __ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment))); | 
| 4352   } | 4275   } | 
| 4353 | 4276 | 
| 4354 #if !defined(USE_SIMULATOR) | 4277 #if !defined(USE_SIMULATOR) | 
| 4355   uintptr_t entry_hook = | 4278   uintptr_t entry_hook = | 
| 4356       reinterpret_cast<uintptr_t>(isolate()->function_entry_hook()); | 4279       reinterpret_cast<uintptr_t>(isolate()->function_entry_hook()); | 
| 4357 #else | 4280   __ mov(ip, Operand(entry_hook)); | 
|  | 4281 | 
|  | 4282 #if ABI_USES_FUNCTION_DESCRIPTORS | 
|  | 4283   // Function descriptor | 
|  | 4284   __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize)); | 
|  | 4285   __ LoadP(ip, MemOperand(ip, 0)); | 
|  | 4286 // ip already set. | 
|  | 4287 #endif | 
|  | 4288 #endif | 
|  | 4289 | 
|  | 4290   // zLinux ABI requires caller's frame to have sufficient space for callee | 
|  | 4291   // preserved regsiter save area. | 
|  | 4292   __ LoadImmP(r0, Operand::Zero()); | 
|  | 4293   __ StoreP(r0, MemOperand(sp, -kCalleeRegisterSaveAreaSize - | 
|  | 4294                                    kNumRequiredStackFrameSlots * kPointerSize)); | 
|  | 4295   __ lay(sp, MemOperand(sp, -kCalleeRegisterSaveAreaSize - | 
|  | 4296                                 kNumRequiredStackFrameSlots * kPointerSize)); | 
|  | 4297 #if defined(USE_SIMULATOR) | 
| 4358   // Under the simulator we need to indirect the entry hook through a | 4298   // Under the simulator we need to indirect the entry hook through a | 
| 4359   // trampoline function at a known address. | 4299   // trampoline function at a known address. | 
|  | 4300   // It additionally takes an isolate as a third parameter | 
|  | 4301   __ mov(r4, Operand(ExternalReference::isolate_address(isolate()))); | 
|  | 4302 | 
| 4360   ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline)); | 4303   ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline)); | 
| 4361   ExternalReference entry_hook = ExternalReference( | 4304   __ mov(ip, Operand(ExternalReference( | 
| 4362       &dispatcher, ExternalReference::BUILTIN_CALL, isolate()); | 4305                  &dispatcher, ExternalReference::BUILTIN_CALL, isolate()))); | 
| 4363 |  | 
| 4364   // It additionally takes an isolate as a third parameter |  | 
| 4365   __ mov(r5, Operand(ExternalReference::isolate_address(isolate()))); |  | 
| 4366 #endif | 4306 #endif | 
| 4367 |  | 
| 4368   __ mov(ip, Operand(entry_hook)); |  | 
| 4369 |  | 
| 4370   if (ABI_USES_FUNCTION_DESCRIPTORS) { |  | 
| 4371     __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize)); |  | 
| 4372     __ LoadP(ip, MemOperand(ip, 0)); |  | 
| 4373   } |  | 
| 4374   // ip set above, so nothing more to do for ABI_CALL_VIA_IP. |  | 
| 4375 |  | 
| 4376   // PPC LINUX ABI: |  | 
| 4377   __ li(r0, Operand::Zero()); |  | 
| 4378   __ StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize)); |  | 
| 4379 |  | 
| 4380   __ Call(ip); | 4307   __ Call(ip); | 
| 4381 | 4308 | 
| 4382   __ addi(sp, sp, Operand(kNumRequiredStackFrameSlots * kPointerSize)); | 4309   // zLinux ABI requires caller's frame to have sufficient space for callee | 
|  | 4310   // preserved regsiter save area. | 
|  | 4311   __ la(sp, MemOperand(sp, kCalleeRegisterSaveAreaSize + | 
|  | 4312                                kNumRequiredStackFrameSlots * kPointerSize)); | 
| 4383 | 4313 | 
| 4384   // Restore the stack pointer if needed. | 4314   // Restore the stack pointer if needed. | 
| 4385   if (frame_alignment > kPointerSize) { | 4315   if (frame_alignment > kPointerSize) { | 
| 4386     __ mr(sp, r15); | 4316     __ LoadRR(sp, r7); | 
| 4387   } | 4317   } | 
| 4388 | 4318 | 
| 4389   // Also pop lr to get Ret(0). | 4319   // Also pop lr to get Ret(0). | 
| 4390   __ MultiPop(kSavedRegs | ip.bit()); | 4320   __ MultiPop(kSavedRegs | ip.bit()); | 
| 4391   __ mtlr(ip); | 4321   __ LoadRR(r14, ip); | 
| 4392   __ Ret(); | 4322   __ Ret(); | 
| 4393 } | 4323 } | 
| 4394 | 4324 | 
| 4395 |  | 
| 4396 template <class T> | 4325 template <class T> | 
| 4397 static void CreateArrayDispatch(MacroAssembler* masm, | 4326 static void CreateArrayDispatch(MacroAssembler* masm, | 
| 4398                                 AllocationSiteOverrideMode mode) { | 4327                                 AllocationSiteOverrideMode mode) { | 
| 4399   if (mode == DISABLE_ALLOCATION_SITES) { | 4328   if (mode == DISABLE_ALLOCATION_SITES) { | 
| 4400     T stub(masm->isolate(), GetInitialFastElementsKind(), mode); | 4329     T stub(masm->isolate(), GetInitialFastElementsKind(), mode); | 
| 4401     __ TailCallStub(&stub); | 4330     __ TailCallStub(&stub); | 
| 4402   } else if (mode == DONT_OVERRIDE) { | 4331   } else if (mode == DONT_OVERRIDE) { | 
| 4403     int last_index = | 4332     int last_index = | 
| 4404         GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND); | 4333         GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND); | 
| 4405     for (int i = 0; i <= last_index; ++i) { | 4334     for (int i = 0; i <= last_index; ++i) { | 
| 4406       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); | 4335       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); | 
| 4407       __ Cmpi(r6, Operand(kind), r0); | 4336       __ CmpP(r5, Operand(kind)); | 
| 4408       T stub(masm->isolate(), kind); | 4337       T stub(masm->isolate(), kind); | 
| 4409       __ TailCallStub(&stub, eq); | 4338       __ TailCallStub(&stub, eq); | 
| 4410     } | 4339     } | 
| 4411 | 4340 | 
| 4412     // If we reached this point there is a problem. | 4341     // If we reached this point there is a problem. | 
| 4413     __ Abort(kUnexpectedElementsKindInArrayConstructor); | 4342     __ Abort(kUnexpectedElementsKindInArrayConstructor); | 
| 4414   } else { | 4343   } else { | 
| 4415     UNREACHABLE(); | 4344     UNREACHABLE(); | 
| 4416   } | 4345   } | 
| 4417 } | 4346 } | 
| 4418 | 4347 | 
| 4419 |  | 
| 4420 static void CreateArrayDispatchOneArgument(MacroAssembler* masm, | 4348 static void CreateArrayDispatchOneArgument(MacroAssembler* masm, | 
| 4421                                            AllocationSiteOverrideMode mode) { | 4349                                            AllocationSiteOverrideMode mode) { | 
| 4422   // r5 - allocation site (if mode != DISABLE_ALLOCATION_SITES) | 4350   // r4 - allocation site (if mode != DISABLE_ALLOCATION_SITES) | 
| 4423   // r6 - kind (if mode != DISABLE_ALLOCATION_SITES) | 4351   // r5 - kind (if mode != DISABLE_ALLOCATION_SITES) | 
| 4424   // r3 - number of arguments | 4352   // r2 - number of arguments | 
| 4425   // r4 - constructor? | 4353   // r3 - constructor? | 
| 4426   // sp[0] - last argument | 4354   // sp[0] - last argument | 
| 4427   Label normal_sequence; | 4355   Label normal_sequence; | 
| 4428   if (mode == DONT_OVERRIDE) { | 4356   if (mode == DONT_OVERRIDE) { | 
| 4429     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | 4357     STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); | 
| 4430     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | 4358     STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); | 
| 4431     STATIC_ASSERT(FAST_ELEMENTS == 2); | 4359     STATIC_ASSERT(FAST_ELEMENTS == 2); | 
| 4432     STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); | 4360     STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3); | 
| 4433     STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4); | 4361     STATIC_ASSERT(FAST_DOUBLE_ELEMENTS == 4); | 
| 4434     STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); | 4362     STATIC_ASSERT(FAST_HOLEY_DOUBLE_ELEMENTS == 5); | 
| 4435 | 4363 | 
| 4436     // is the low bit set? If so, we are holey and that is good. | 4364     // is the low bit set? If so, we are holey and that is good. | 
| 4437     __ andi(r0, r6, Operand(1)); | 4365     __ AndP(r0, r5, Operand(1)); | 
| 4438     __ bne(&normal_sequence, cr0); | 4366     __ bne(&normal_sequence); | 
| 4439   } | 4367   } | 
| 4440 | 4368 | 
| 4441   // look at the first argument | 4369   // look at the first argument | 
| 4442   __ LoadP(r8, MemOperand(sp, 0)); | 4370   __ LoadP(r7, MemOperand(sp, 0)); | 
| 4443   __ cmpi(r8, Operand::Zero()); | 4371   __ CmpP(r7, Operand::Zero()); | 
| 4444   __ beq(&normal_sequence); | 4372   __ beq(&normal_sequence); | 
| 4445 | 4373 | 
| 4446   if (mode == DISABLE_ALLOCATION_SITES) { | 4374   if (mode == DISABLE_ALLOCATION_SITES) { | 
| 4447     ElementsKind initial = GetInitialFastElementsKind(); | 4375     ElementsKind initial = GetInitialFastElementsKind(); | 
| 4448     ElementsKind holey_initial = GetHoleyElementsKind(initial); | 4376     ElementsKind holey_initial = GetHoleyElementsKind(initial); | 
| 4449 | 4377 | 
| 4450     ArraySingleArgumentConstructorStub stub_holey( | 4378     ArraySingleArgumentConstructorStub stub_holey( | 
| 4451         masm->isolate(), holey_initial, DISABLE_ALLOCATION_SITES); | 4379         masm->isolate(), holey_initial, DISABLE_ALLOCATION_SITES); | 
| 4452     __ TailCallStub(&stub_holey); | 4380     __ TailCallStub(&stub_holey); | 
| 4453 | 4381 | 
| 4454     __ bind(&normal_sequence); | 4382     __ bind(&normal_sequence); | 
| 4455     ArraySingleArgumentConstructorStub stub(masm->isolate(), initial, | 4383     ArraySingleArgumentConstructorStub stub(masm->isolate(), initial, | 
| 4456                                             DISABLE_ALLOCATION_SITES); | 4384                                             DISABLE_ALLOCATION_SITES); | 
| 4457     __ TailCallStub(&stub); | 4385     __ TailCallStub(&stub); | 
| 4458   } else if (mode == DONT_OVERRIDE) { | 4386   } else if (mode == DONT_OVERRIDE) { | 
| 4459     // We are going to create a holey array, but our kind is non-holey. | 4387     // We are going to create a holey array, but our kind is non-holey. | 
| 4460     // Fix kind and retry (only if we have an allocation site in the slot). | 4388     // Fix kind and retry (only if we have an allocation site in the slot). | 
| 4461     __ addi(r6, r6, Operand(1)); | 4389     __ AddP(r5, r5, Operand(1)); | 
| 4462 |  | 
| 4463     if (FLAG_debug_code) { | 4390     if (FLAG_debug_code) { | 
| 4464       __ LoadP(r8, FieldMemOperand(r5, 0)); | 4391       __ LoadP(r7, FieldMemOperand(r4, 0)); | 
| 4465       __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex); | 4392       __ CompareRoot(r7, Heap::kAllocationSiteMapRootIndex); | 
| 4466       __ Assert(eq, kExpectedAllocationSite); | 4393       __ Assert(eq, kExpectedAllocationSite); | 
| 4467     } | 4394     } | 
| 4468 | 4395 | 
| 4469     // Save the resulting elements kind in type info. We can't just store r6 | 4396     // Save the resulting elements kind in type info. We can't just store r5 | 
| 4470     // in the AllocationSite::transition_info field because elements kind is | 4397     // in the AllocationSite::transition_info field because elements kind is | 
| 4471     // restricted to a portion of the field...upper bits need to be left alone. | 4398     // restricted to a portion of the field...upper bits need to be left alone. | 
| 4472     STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); | 4399     STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); | 
| 4473     __ LoadP(r7, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset)); | 4400     __ LoadP(r6, FieldMemOperand(r4, AllocationSite::kTransitionInfoOffset)); | 
| 4474     __ AddSmiLiteral(r7, r7, Smi::FromInt(kFastElementsKindPackedToHoley), r0); | 4401     __ AddSmiLiteral(r6, r6, Smi::FromInt(kFastElementsKindPackedToHoley), r0); | 
| 4475     __ StoreP(r7, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset), | 4402     __ StoreP(r6, FieldMemOperand(r4, AllocationSite::kTransitionInfoOffset)); | 
| 4476               r0); |  | 
| 4477 | 4403 | 
| 4478     __ bind(&normal_sequence); | 4404     __ bind(&normal_sequence); | 
| 4479     int last_index = | 4405     int last_index = | 
| 4480         GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND); | 4406         GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND); | 
| 4481     for (int i = 0; i <= last_index; ++i) { | 4407     for (int i = 0; i <= last_index; ++i) { | 
| 4482       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); | 4408       ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); | 
| 4483       __ mov(r0, Operand(kind)); | 4409       __ CmpP(r5, Operand(kind)); | 
| 4484       __ cmp(r6, r0); |  | 
| 4485       ArraySingleArgumentConstructorStub stub(masm->isolate(), kind); | 4410       ArraySingleArgumentConstructorStub stub(masm->isolate(), kind); | 
| 4486       __ TailCallStub(&stub, eq); | 4411       __ TailCallStub(&stub, eq); | 
| 4487     } | 4412     } | 
| 4488 | 4413 | 
| 4489     // If we reached this point there is a problem. | 4414     // If we reached this point there is a problem. | 
| 4490     __ Abort(kUnexpectedElementsKindInArrayConstructor); | 4415     __ Abort(kUnexpectedElementsKindInArrayConstructor); | 
| 4491   } else { | 4416   } else { | 
| 4492     UNREACHABLE(); | 4417     UNREACHABLE(); | 
| 4493   } | 4418   } | 
| 4494 } | 4419 } | 
| 4495 | 4420 | 
| 4496 |  | 
| 4497 template <class T> | 4421 template <class T> | 
| 4498 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { | 4422 static void ArrayConstructorStubAheadOfTimeHelper(Isolate* isolate) { | 
| 4499   int to_index = | 4423   int to_index = | 
| 4500       GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND); | 4424       GetSequenceIndexFromFastElementsKind(TERMINAL_FAST_ELEMENTS_KIND); | 
| 4501   for (int i = 0; i <= to_index; ++i) { | 4425   for (int i = 0; i <= to_index; ++i) { | 
| 4502     ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); | 4426     ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); | 
| 4503     T stub(isolate, kind); | 4427     T stub(isolate, kind); | 
| 4504     stub.GetCode(); | 4428     stub.GetCode(); | 
| 4505     if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { | 4429     if (AllocationSite::GetMode(kind) != DONT_TRACK_ALLOCATION_SITE) { | 
| 4506       T stub1(isolate, kind, DISABLE_ALLOCATION_SITES); | 4430       T stub1(isolate, kind, DISABLE_ALLOCATION_SITES); | 
| 4507       stub1.GetCode(); | 4431       stub1.GetCode(); | 
| 4508     } | 4432     } | 
| 4509   } | 4433   } | 
| 4510 } | 4434 } | 
| 4511 | 4435 | 
| 4512 |  | 
| 4513 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) { | 4436 void ArrayConstructorStubBase::GenerateStubsAheadOfTime(Isolate* isolate) { | 
| 4514   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>( | 4437   ArrayConstructorStubAheadOfTimeHelper<ArrayNoArgumentConstructorStub>( | 
| 4515       isolate); | 4438       isolate); | 
| 4516   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>( | 4439   ArrayConstructorStubAheadOfTimeHelper<ArraySingleArgumentConstructorStub>( | 
| 4517       isolate); | 4440       isolate); | 
| 4518   ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>( | 4441   ArrayConstructorStubAheadOfTimeHelper<ArrayNArgumentsConstructorStub>( | 
| 4519       isolate); | 4442       isolate); | 
| 4520 } | 4443 } | 
| 4521 | 4444 | 
| 4522 |  | 
| 4523 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime( | 4445 void InternalArrayConstructorStubBase::GenerateStubsAheadOfTime( | 
| 4524     Isolate* isolate) { | 4446     Isolate* isolate) { | 
| 4525   ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS}; | 4447   ElementsKind kinds[2] = {FAST_ELEMENTS, FAST_HOLEY_ELEMENTS}; | 
| 4526   for (int i = 0; i < 2; i++) { | 4448   for (int i = 0; i < 2; i++) { | 
| 4527     // For internal arrays we only need a few things | 4449     // For internal arrays we only need a few things | 
| 4528     InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]); | 4450     InternalArrayNoArgumentConstructorStub stubh1(isolate, kinds[i]); | 
| 4529     stubh1.GetCode(); | 4451     stubh1.GetCode(); | 
| 4530     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]); | 4452     InternalArraySingleArgumentConstructorStub stubh2(isolate, kinds[i]); | 
| 4531     stubh2.GetCode(); | 4453     stubh2.GetCode(); | 
| 4532     InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]); | 4454     InternalArrayNArgumentsConstructorStub stubh3(isolate, kinds[i]); | 
| 4533     stubh3.GetCode(); | 4455     stubh3.GetCode(); | 
| 4534   } | 4456   } | 
| 4535 } | 4457 } | 
| 4536 | 4458 | 
| 4537 |  | 
| 4538 void ArrayConstructorStub::GenerateDispatchToArrayStub( | 4459 void ArrayConstructorStub::GenerateDispatchToArrayStub( | 
| 4539     MacroAssembler* masm, AllocationSiteOverrideMode mode) { | 4460     MacroAssembler* masm, AllocationSiteOverrideMode mode) { | 
| 4540   if (argument_count() == ANY) { | 4461   if (argument_count() == ANY) { | 
| 4541     Label not_zero_case, not_one_case; | 4462     Label not_zero_case, not_one_case; | 
| 4542     __ cmpi(r3, Operand::Zero()); | 4463     __ CmpP(r2, Operand::Zero()); | 
| 4543     __ bne(¬_zero_case); | 4464     __ bne(¬_zero_case); | 
| 4544     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode); | 4465     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode); | 
| 4545 | 4466 | 
| 4546     __ bind(¬_zero_case); | 4467     __ bind(¬_zero_case); | 
| 4547     __ cmpi(r3, Operand(1)); | 4468     __ CmpP(r2, Operand(1)); | 
| 4548     __ bgt(¬_one_case); | 4469     __ bgt(¬_one_case); | 
| 4549     CreateArrayDispatchOneArgument(masm, mode); | 4470     CreateArrayDispatchOneArgument(masm, mode); | 
| 4550 | 4471 | 
| 4551     __ bind(¬_one_case); | 4472     __ bind(¬_one_case); | 
| 4552     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode); | 4473     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode); | 
| 4553   } else if (argument_count() == NONE) { | 4474   } else if (argument_count() == NONE) { | 
| 4554     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode); | 4475     CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode); | 
| 4555   } else if (argument_count() == ONE) { | 4476   } else if (argument_count() == ONE) { | 
| 4556     CreateArrayDispatchOneArgument(masm, mode); | 4477     CreateArrayDispatchOneArgument(masm, mode); | 
| 4557   } else if (argument_count() == MORE_THAN_ONE) { | 4478   } else if (argument_count() == MORE_THAN_ONE) { | 
| 4558     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode); | 4479     CreateArrayDispatch<ArrayNArgumentsConstructorStub>(masm, mode); | 
| 4559   } else { | 4480   } else { | 
| 4560     UNREACHABLE(); | 4481     UNREACHABLE(); | 
| 4561   } | 4482   } | 
| 4562 } | 4483 } | 
| 4563 | 4484 | 
| 4564 |  | 
| 4565 void ArrayConstructorStub::Generate(MacroAssembler* masm) { | 4485 void ArrayConstructorStub::Generate(MacroAssembler* masm) { | 
| 4566   // ----------- S t a t e ------------- | 4486   // ----------- S t a t e ------------- | 
| 4567   //  -- r3 : argc (only if argument_count() == ANY) | 4487   //  -- r2 : argc (only if argument_count() == ANY) | 
| 4568   //  -- r4 : constructor | 4488   //  -- r3 : constructor | 
| 4569   //  -- r5 : AllocationSite or undefined | 4489   //  -- r4 : AllocationSite or undefined | 
| 4570   //  -- r6 : new target | 4490   //  -- r5 : new target | 
| 4571   //  -- sp[0] : return address | 4491   //  -- sp[0] : return address | 
| 4572   //  -- sp[4] : last argument | 4492   //  -- sp[4] : last argument | 
| 4573   // ----------------------------------- | 4493   // ----------------------------------- | 
| 4574 | 4494 | 
| 4575   if (FLAG_debug_code) { | 4495   if (FLAG_debug_code) { | 
| 4576     // The array construct code is only set for the global and natives | 4496     // The array construct code is only set for the global and natives | 
| 4577     // builtin Array functions which always have maps. | 4497     // builtin Array functions which always have maps. | 
| 4578 | 4498 | 
| 4579     // Initial map for the builtin Array function should be a map. | 4499     // Initial map for the builtin Array function should be a map. | 
| 4580     __ LoadP(r7, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset)); | 4500     __ LoadP(r6, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset)); | 
| 4581     // Will both indicate a NULL and a Smi. | 4501     // Will both indicate a NULL and a Smi. | 
| 4582     __ TestIfSmi(r7, r0); | 4502     __ TestIfSmi(r6); | 
| 4583     __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0); | 4503     __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0); | 
| 4584     __ CompareObjectType(r7, r7, r8, MAP_TYPE); | 4504     __ CompareObjectType(r6, r6, r7, MAP_TYPE); | 
| 4585     __ Assert(eq, kUnexpectedInitialMapForArrayFunction); | 4505     __ Assert(eq, kUnexpectedInitialMapForArrayFunction); | 
| 4586 | 4506 | 
| 4587     // We should either have undefined in r5 or a valid AllocationSite | 4507     // We should either have undefined in r4 or a valid AllocationSite | 
| 4588     __ AssertUndefinedOrAllocationSite(r5, r7); | 4508     __ AssertUndefinedOrAllocationSite(r4, r6); | 
| 4589   } | 4509   } | 
| 4590 | 4510 | 
| 4591   // Enter the context of the Array function. | 4511   // Enter the context of the Array function. | 
| 4592   __ LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset)); | 4512   __ LoadP(cp, FieldMemOperand(r3, JSFunction::kContextOffset)); | 
| 4593 | 4513 | 
| 4594   Label subclassing; | 4514   Label subclassing; | 
| 4595   __ cmp(r6, r4); | 4515   __ CmpP(r5, r3); | 
| 4596   __ bne(&subclassing); | 4516   __ bne(&subclassing, Label::kNear); | 
| 4597 | 4517 | 
| 4598   Label no_info; | 4518   Label no_info; | 
| 4599   // Get the elements kind and case on that. | 4519   // Get the elements kind and case on that. | 
| 4600   __ CompareRoot(r5, Heap::kUndefinedValueRootIndex); | 4520   __ CompareRoot(r4, Heap::kUndefinedValueRootIndex); | 
| 4601   __ beq(&no_info); | 4521   __ beq(&no_info); | 
| 4602 | 4522 | 
| 4603   __ LoadP(r6, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset)); | 4523   __ LoadP(r5, FieldMemOperand(r4, AllocationSite::kTransitionInfoOffset)); | 
| 4604   __ SmiUntag(r6); | 4524   __ SmiUntag(r5); | 
| 4605   STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); | 4525   STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); | 
| 4606   __ And(r6, r6, Operand(AllocationSite::ElementsKindBits::kMask)); | 4526   __ AndP(r5, Operand(AllocationSite::ElementsKindBits::kMask)); | 
| 4607   GenerateDispatchToArrayStub(masm, DONT_OVERRIDE); | 4527   GenerateDispatchToArrayStub(masm, DONT_OVERRIDE); | 
| 4608 | 4528 | 
| 4609   __ bind(&no_info); | 4529   __ bind(&no_info); | 
| 4610   GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES); | 4530   GenerateDispatchToArrayStub(masm, DISABLE_ALLOCATION_SITES); | 
| 4611 | 4531 | 
| 4612   __ bind(&subclassing); | 4532   __ bind(&subclassing); | 
| 4613   switch (argument_count()) { | 4533   switch (argument_count()) { | 
| 4614     case ANY: | 4534     case ANY: | 
| 4615     case MORE_THAN_ONE: | 4535     case MORE_THAN_ONE: | 
| 4616       __ ShiftLeftImm(r0, r3, Operand(kPointerSizeLog2)); | 4536       __ ShiftLeftP(r1, r2, Operand(kPointerSizeLog2)); | 
| 4617       __ StorePX(r4, MemOperand(sp, r0)); | 4537       __ StoreP(r3, MemOperand(sp, r1)); | 
| 4618       __ addi(r3, r3, Operand(3)); | 4538       __ AddP(r2, r2, Operand(3)); | 
| 4619       break; | 4539       break; | 
| 4620     case NONE: | 4540     case NONE: | 
| 4621       __ StoreP(r4, MemOperand(sp, 0 * kPointerSize)); | 4541       __ StoreP(r3, MemOperand(sp, 0 * kPointerSize)); | 
| 4622       __ li(r3, Operand(3)); | 4542       __ LoadImmP(r2, Operand(3)); | 
| 4623       break; | 4543       break; | 
| 4624     case ONE: | 4544     case ONE: | 
| 4625       __ StoreP(r4, MemOperand(sp, 1 * kPointerSize)); | 4545       __ StoreP(r3, MemOperand(sp, 1 * kPointerSize)); | 
| 4626       __ li(r3, Operand(4)); | 4546       __ LoadImmP(r2, Operand(4)); | 
| 4627       break; | 4547       break; | 
| 4628   } | 4548   } | 
| 4629 | 4549 | 
| 4630   __ Push(r6, r5); | 4550   __ Push(r5, r4); | 
| 4631   __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate())); | 4551   __ JumpToExternalReference(ExternalReference(Runtime::kNewArray, isolate())); | 
| 4632 } | 4552 } | 
| 4633 | 4553 | 
| 4634 |  | 
| 4635 void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm, | 4554 void InternalArrayConstructorStub::GenerateCase(MacroAssembler* masm, | 
| 4636                                                 ElementsKind kind) { | 4555                                                 ElementsKind kind) { | 
| 4637   __ cmpli(r3, Operand(1)); | 4556   __ CmpLogicalP(r2, Operand(1)); | 
| 4638 | 4557 | 
| 4639   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind); | 4558   InternalArrayNoArgumentConstructorStub stub0(isolate(), kind); | 
| 4640   __ TailCallStub(&stub0, lt); | 4559   __ TailCallStub(&stub0, lt); | 
| 4641 | 4560 | 
| 4642   InternalArrayNArgumentsConstructorStub stubN(isolate(), kind); | 4561   InternalArrayNArgumentsConstructorStub stubN(isolate(), kind); | 
| 4643   __ TailCallStub(&stubN, gt); | 4562   __ TailCallStub(&stubN, gt); | 
| 4644 | 4563 | 
| 4645   if (IsFastPackedElementsKind(kind)) { | 4564   if (IsFastPackedElementsKind(kind)) { | 
| 4646     // We might need to create a holey array | 4565     // We might need to create a holey array | 
| 4647     // look at the first argument | 4566     // look at the first argument | 
| 4648     __ LoadP(r6, MemOperand(sp, 0)); | 4567     __ LoadP(r5, MemOperand(sp, 0)); | 
| 4649     __ cmpi(r6, Operand::Zero()); | 4568     __ CmpP(r5, Operand::Zero()); | 
| 4650 | 4569 | 
| 4651     InternalArraySingleArgumentConstructorStub stub1_holey( | 4570     InternalArraySingleArgumentConstructorStub stub1_holey( | 
| 4652         isolate(), GetHoleyElementsKind(kind)); | 4571         isolate(), GetHoleyElementsKind(kind)); | 
| 4653     __ TailCallStub(&stub1_holey, ne); | 4572     __ TailCallStub(&stub1_holey, ne); | 
| 4654   } | 4573   } | 
| 4655 | 4574 | 
| 4656   InternalArraySingleArgumentConstructorStub stub1(isolate(), kind); | 4575   InternalArraySingleArgumentConstructorStub stub1(isolate(), kind); | 
| 4657   __ TailCallStub(&stub1); | 4576   __ TailCallStub(&stub1); | 
| 4658 } | 4577 } | 
| 4659 | 4578 | 
| 4660 |  | 
| 4661 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { | 4579 void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { | 
| 4662   // ----------- S t a t e ------------- | 4580   // ----------- S t a t e ------------- | 
| 4663   //  -- r3 : argc | 4581   //  -- r2 : argc | 
| 4664   //  -- r4 : constructor | 4582   //  -- r3 : constructor | 
| 4665   //  -- sp[0] : return address | 4583   //  -- sp[0] : return address | 
| 4666   //  -- sp[4] : last argument | 4584   //  -- sp[4] : last argument | 
| 4667   // ----------------------------------- | 4585   // ----------------------------------- | 
| 4668 | 4586 | 
| 4669   if (FLAG_debug_code) { | 4587   if (FLAG_debug_code) { | 
| 4670     // The array construct code is only set for the global and natives | 4588     // The array construct code is only set for the global and natives | 
| 4671     // builtin Array functions which always have maps. | 4589     // builtin Array functions which always have maps. | 
| 4672 | 4590 | 
| 4673     // Initial map for the builtin Array function should be a map. | 4591     // Initial map for the builtin Array function should be a map. | 
| 4674     __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset)); | 4592     __ LoadP(r5, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset)); | 
| 4675     // Will both indicate a NULL and a Smi. | 4593     // Will both indicate a NULL and a Smi. | 
| 4676     __ TestIfSmi(r6, r0); | 4594     __ TestIfSmi(r5); | 
| 4677     __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0); | 4595     __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0); | 
| 4678     __ CompareObjectType(r6, r6, r7, MAP_TYPE); | 4596     __ CompareObjectType(r5, r5, r6, MAP_TYPE); | 
| 4679     __ Assert(eq, kUnexpectedInitialMapForArrayFunction); | 4597     __ Assert(eq, kUnexpectedInitialMapForArrayFunction); | 
| 4680   } | 4598   } | 
| 4681 | 4599 | 
| 4682   // Figure out the right elements kind | 4600   // Figure out the right elements kind | 
| 4683   __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset)); | 4601   __ LoadP(r5, FieldMemOperand(r3, JSFunction::kPrototypeOrInitialMapOffset)); | 
| 4684   // Load the map's "bit field 2" into |result|. | 4602   // Load the map's "bit field 2" into |result|. | 
| 4685   __ lbz(r6, FieldMemOperand(r6, Map::kBitField2Offset)); | 4603   __ LoadlB(r5, FieldMemOperand(r5, Map::kBitField2Offset)); | 
| 4686   // Retrieve elements_kind from bit field 2. | 4604   // Retrieve elements_kind from bit field 2. | 
| 4687   __ DecodeField<Map::ElementsKindBits>(r6); | 4605   __ DecodeField<Map::ElementsKindBits>(r5); | 
| 4688 | 4606 | 
| 4689   if (FLAG_debug_code) { | 4607   if (FLAG_debug_code) { | 
| 4690     Label done; | 4608     Label done; | 
| 4691     __ cmpi(r6, Operand(FAST_ELEMENTS)); | 4609     __ CmpP(r5, Operand(FAST_ELEMENTS)); | 
| 4692     __ beq(&done); | 4610     __ beq(&done); | 
| 4693     __ cmpi(r6, Operand(FAST_HOLEY_ELEMENTS)); | 4611     __ CmpP(r5, Operand(FAST_HOLEY_ELEMENTS)); | 
| 4694     __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray); | 4612     __ Assert(eq, kInvalidElementsKindForInternalArrayOrInternalPackedArray); | 
| 4695     __ bind(&done); | 4613     __ bind(&done); | 
| 4696   } | 4614   } | 
| 4697 | 4615 | 
| 4698   Label fast_elements_case; | 4616   Label fast_elements_case; | 
| 4699   __ cmpi(r6, Operand(FAST_ELEMENTS)); | 4617   __ CmpP(r5, Operand(FAST_ELEMENTS)); | 
| 4700   __ beq(&fast_elements_case); | 4618   __ beq(&fast_elements_case); | 
| 4701   GenerateCase(masm, FAST_HOLEY_ELEMENTS); | 4619   GenerateCase(masm, FAST_HOLEY_ELEMENTS); | 
| 4702 | 4620 | 
| 4703   __ bind(&fast_elements_case); | 4621   __ bind(&fast_elements_case); | 
| 4704   GenerateCase(masm, FAST_ELEMENTS); | 4622   GenerateCase(masm, FAST_ELEMENTS); | 
| 4705 } | 4623 } | 
| 4706 | 4624 | 
| 4707 void FastNewObjectStub::Generate(MacroAssembler* masm) { | 4625 void FastNewObjectStub::Generate(MacroAssembler* masm) { | 
| 4708   // ----------- S t a t e ------------- | 4626   // ----------- S t a t e ------------- | 
| 4709   //  -- r4 : target | 4627   //  -- r3 : target | 
| 4710   //  -- r6 : new target | 4628   //  -- r5 : new target | 
| 4711   //  -- cp : context | 4629   //  -- cp : context | 
| 4712   //  -- lr : return address | 4630   //  -- lr : return address | 
| 4713   // ----------------------------------- | 4631   // ----------------------------------- | 
| 4714   __ AssertFunction(r4); | 4632   __ AssertFunction(r3); | 
| 4715   __ AssertReceiver(r6); | 4633   __ AssertReceiver(r5); | 
| 4716 | 4634 | 
| 4717   // Verify that the new target is a JSFunction. | 4635   // Verify that the new target is a JSFunction. | 
| 4718   Label new_object; | 4636   Label new_object; | 
| 4719   __ CompareObjectType(r6, r5, r5, JS_FUNCTION_TYPE); | 4637   __ CompareObjectType(r5, r4, r4, JS_FUNCTION_TYPE); | 
| 4720   __ bne(&new_object); | 4638   __ bne(&new_object); | 
| 4721 | 4639 | 
| 4722   // Load the initial map and verify that it's in fact a map. | 4640   // Load the initial map and verify that it's in fact a map. | 
| 4723   __ LoadP(r5, FieldMemOperand(r6, JSFunction::kPrototypeOrInitialMapOffset)); | 4641   __ LoadP(r4, FieldMemOperand(r5, JSFunction::kPrototypeOrInitialMapOffset)); | 
| 4724   __ JumpIfSmi(r5, &new_object); | 4642   __ JumpIfSmi(r4, &new_object); | 
| 4725   __ CompareObjectType(r5, r3, r3, MAP_TYPE); | 4643   __ CompareObjectType(r4, r2, r2, MAP_TYPE); | 
| 4726   __ bne(&new_object); | 4644   __ bne(&new_object); | 
| 4727 | 4645 | 
| 4728   // Fall back to runtime if the target differs from the new target's | 4646   // Fall back to runtime if the target differs from the new target's | 
| 4729   // initial map constructor. | 4647   // initial map constructor. | 
| 4730   __ LoadP(r3, FieldMemOperand(r5, Map::kConstructorOrBackPointerOffset)); | 4648   __ LoadP(r2, FieldMemOperand(r4, Map::kConstructorOrBackPointerOffset)); | 
| 4731   __ cmp(r3, r4); | 4649   __ CmpP(r2, r3); | 
| 4732   __ bne(&new_object); | 4650   __ bne(&new_object); | 
| 4733 | 4651 | 
| 4734   // Allocate the JSObject on the heap. | 4652   // Allocate the JSObject on the heap. | 
| 4735   Label allocate, done_allocate; | 4653   Label allocate, done_allocate; | 
| 4736   __ lbz(r7, FieldMemOperand(r5, Map::kInstanceSizeOffset)); | 4654   __ LoadlB(r6, FieldMemOperand(r4, Map::kInstanceSizeOffset)); | 
| 4737   __ Allocate(r7, r3, r8, r9, &allocate, SIZE_IN_WORDS); | 4655   __ Allocate(r6, r2, r7, r8, &allocate, SIZE_IN_WORDS); | 
| 4738   __ bind(&done_allocate); | 4656   __ bind(&done_allocate); | 
| 4739 | 4657 | 
| 4740   // Initialize the JSObject fields. | 4658   // Initialize the JSObject fields. | 
| 4741   __ StoreP(r5, MemOperand(r3, JSObject::kMapOffset)); | 4659   __ StoreP(r4, MemOperand(r2, JSObject::kMapOffset)); | 
| 4742   __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex); | 4660   __ LoadRoot(r5, Heap::kEmptyFixedArrayRootIndex); | 
| 4743   __ StoreP(r6, MemOperand(r3, JSObject::kPropertiesOffset)); | 4661   __ StoreP(r5, MemOperand(r2, JSObject::kPropertiesOffset)); | 
| 4744   __ StoreP(r6, MemOperand(r3, JSObject::kElementsOffset)); | 4662   __ StoreP(r5, MemOperand(r2, JSObject::kElementsOffset)); | 
| 4745   STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize); | 4663   STATIC_ASSERT(JSObject::kHeaderSize == 3 * kPointerSize); | 
| 4746   __ addi(r4, r3, Operand(JSObject::kHeaderSize)); | 4664   __ AddP(r3, r2, Operand(JSObject::kHeaderSize)); | 
| 4747 | 4665 | 
| 4748   // ----------- S t a t e ------------- | 4666   // ----------- S t a t e ------------- | 
| 4749   //  -- r3 : result (untagged) | 4667   //  -- r2 : result (untagged) | 
| 4750   //  -- r4 : result fields (untagged) | 4668   //  -- r3 : result fields (untagged) | 
| 4751   //  -- r8 : result end (untagged) | 4669   //  -- r7 : result end (untagged) | 
| 4752   //  -- r5 : initial map | 4670   //  -- r4 : initial map | 
| 4753   //  -- cp : context | 4671   //  -- cp : context | 
| 4754   //  -- lr : return address | 4672   //  -- lr : return address | 
| 4755   // ----------------------------------- | 4673   // ----------------------------------- | 
| 4756 | 4674 | 
| 4757   // Perform in-object slack tracking if requested. | 4675   // Perform in-object slack tracking if requested. | 
| 4758   Label slack_tracking; | 4676   Label slack_tracking; | 
| 4759   STATIC_ASSERT(Map::kNoSlackTracking == 0); | 4677   STATIC_ASSERT(Map::kNoSlackTracking == 0); | 
| 4760   __ LoadRoot(r9, Heap::kUndefinedValueRootIndex); | 4678   __ LoadRoot(r8, Heap::kUndefinedValueRootIndex); | 
| 4761   __ lwz(r6, FieldMemOperand(r5, Map::kBitField3Offset)); | 4679   __ LoadlW(r5, FieldMemOperand(r4, Map::kBitField3Offset)); | 
| 4762   __ DecodeField<Map::ConstructionCounter>(r10, r6, SetRC); | 4680   __ DecodeField<Map::ConstructionCounter>(r9, r5); | 
| 4763   __ bne(&slack_tracking, cr0); | 4681   __ LoadAndTestP(r9, r9); | 
|  | 4682   __ bne(&slack_tracking); | 
| 4764   { | 4683   { | 
| 4765     // Initialize all in-object fields with undefined. | 4684     // Initialize all in-object fields with undefined. | 
| 4766     __ InitializeFieldsWithFiller(r4, r8, r9); | 4685     __ InitializeFieldsWithFiller(r3, r7, r8); | 
| 4767 | 4686 | 
| 4768     // Add the object tag to make the JSObject real. | 4687     // Add the object tag to make the JSObject real. | 
| 4769     __ addi(r3, r3, Operand(kHeapObjectTag)); | 4688     __ AddP(r2, r2, Operand(kHeapObjectTag)); | 
| 4770     __ Ret(); | 4689     __ Ret(); | 
| 4771   } | 4690   } | 
| 4772   __ bind(&slack_tracking); | 4691   __ bind(&slack_tracking); | 
| 4773   { | 4692   { | 
| 4774     // Decrease generous allocation count. | 4693     // Decrease generous allocation count. | 
| 4775     STATIC_ASSERT(Map::ConstructionCounter::kNext == 32); | 4694     STATIC_ASSERT(Map::ConstructionCounter::kNext == 32); | 
| 4776     __ Add(r6, r6, -(1 << Map::ConstructionCounter::kShift), r0); | 4695     __ Add32(r5, r5, Operand(-(1 << Map::ConstructionCounter::kShift))); | 
| 4777     __ stw(r6, FieldMemOperand(r5, Map::kBitField3Offset)); | 4696     __ StoreW(r5, FieldMemOperand(r4, Map::kBitField3Offset)); | 
| 4778 | 4697 | 
| 4779     // Initialize the in-object fields with undefined. | 4698     // Initialize the in-object fields with undefined. | 
| 4780     __ lbz(r7, FieldMemOperand(r5, Map::kUnusedPropertyFieldsOffset)); | 4699     __ LoadlB(r6, FieldMemOperand(r4, Map::kUnusedPropertyFieldsOffset)); | 
| 4781     __ ShiftLeftImm(r7, r7, Operand(kPointerSizeLog2)); | 4700     __ ShiftLeftP(r6, r6, Operand(kPointerSizeLog2)); | 
| 4782     __ sub(r7, r8, r7); | 4701     __ SubP(r6, r7, r6); | 
| 4783     __ InitializeFieldsWithFiller(r4, r7, r9); | 4702     __ InitializeFieldsWithFiller(r3, r6, r8); | 
| 4784 | 4703 | 
| 4785     // Initialize the remaining (reserved) fields with one pointer filler map. | 4704     // Initialize the remaining (reserved) fields with one pointer filler map. | 
| 4786     __ LoadRoot(r9, Heap::kOnePointerFillerMapRootIndex); | 4705     __ LoadRoot(r8, Heap::kOnePointerFillerMapRootIndex); | 
| 4787     __ InitializeFieldsWithFiller(r4, r8, r9); | 4706     __ InitializeFieldsWithFiller(r3, r7, r8); | 
| 4788 | 4707 | 
| 4789     // Add the object tag to make the JSObject real. | 4708     // Add the object tag to make the JSObject real. | 
| 4790     __ addi(r3, r3, Operand(kHeapObjectTag)); | 4709     __ AddP(r2, r2, Operand(kHeapObjectTag)); | 
| 4791 | 4710 | 
| 4792     // Check if we can finalize the instance size. | 4711     // Check if we can finalize the instance size. | 
| 4793     __ cmpi(r10, Operand(Map::kSlackTrackingCounterEnd)); | 4712     __ CmpP(r9, Operand(Map::kSlackTrackingCounterEnd)); | 
| 4794     __ Ret(ne); | 4713     __ Ret(ne); | 
| 4795 | 4714 | 
| 4796     // Finalize the instance size. | 4715     // Finalize the instance size. | 
| 4797     { | 4716     { | 
| 4798       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 4717       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 
| 4799       __ Push(r3, r5); | 4718       __ Push(r2, r4); | 
| 4800       __ CallRuntime(Runtime::kFinalizeInstanceSize); | 4719       __ CallRuntime(Runtime::kFinalizeInstanceSize); | 
| 4801       __ Pop(r3); | 4720       __ Pop(r2); | 
| 4802     } | 4721     } | 
| 4803     __ Ret(); | 4722     __ Ret(); | 
| 4804   } | 4723   } | 
| 4805 | 4724 | 
| 4806   // Fall back to %AllocateInNewSpace. | 4725   // Fall back to %AllocateInNewSpace. | 
| 4807   __ bind(&allocate); | 4726   __ bind(&allocate); | 
| 4808   { | 4727   { | 
| 4809     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 4728     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 
| 4810     STATIC_ASSERT(kSmiTag == 0); | 4729     STATIC_ASSERT(kSmiTag == 0); | 
| 4811     __ ShiftLeftImm(r7, r7, | 4730     __ ShiftLeftP(r6, r6, | 
| 4812                     Operand(kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize)); | 4731                   Operand(kPointerSizeLog2 + kSmiTagSize + kSmiShiftSize)); | 
| 4813     __ Push(r5, r7); | 4732     __ Push(r4, r6); | 
| 4814     __ CallRuntime(Runtime::kAllocateInNewSpace); | 4733     __ CallRuntime(Runtime::kAllocateInNewSpace); | 
| 4815     __ Pop(r5); | 4734     __ Pop(r4); | 
| 4816   } | 4735   } | 
| 4817   __ subi(r3, r3, Operand(kHeapObjectTag)); | 4736   __ SubP(r2, r2, Operand(kHeapObjectTag)); | 
| 4818   __ lbz(r8, FieldMemOperand(r5, Map::kInstanceSizeOffset)); | 4737   __ LoadlB(r7, FieldMemOperand(r4, Map::kInstanceSizeOffset)); | 
| 4819   __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2)); | 4738   __ ShiftLeftP(r7, r7, Operand(kPointerSizeLog2)); | 
| 4820   __ add(r8, r3, r8); | 4739   __ AddP(r7, r2, r7); | 
| 4821   __ b(&done_allocate); | 4740   __ b(&done_allocate); | 
| 4822 | 4741 | 
| 4823   // Fall back to %NewObject. | 4742   // Fall back to %NewObject. | 
| 4824   __ bind(&new_object); | 4743   __ bind(&new_object); | 
| 4825   __ Push(r4, r6); | 4744   __ Push(r3, r5); | 
| 4826   __ TailCallRuntime(Runtime::kNewObject); | 4745   __ TailCallRuntime(Runtime::kNewObject); | 
| 4827 } | 4746 } | 
| 4828 | 4747 | 
| 4829 void FastNewRestParameterStub::Generate(MacroAssembler* masm) { | 4748 void FastNewRestParameterStub::Generate(MacroAssembler* masm) { | 
| 4830   // ----------- S t a t e ------------- | 4749   // ----------- S t a t e ------------- | 
| 4831   //  -- r4 : function | 4750   //  -- r3 : function | 
| 4832   //  -- cp : context | 4751   //  -- cp : context | 
| 4833   //  -- fp : frame pointer | 4752   //  -- fp : frame pointer | 
| 4834   //  -- lr : return address | 4753   //  -- lr : return address | 
| 4835   // ----------------------------------- | 4754   // ----------------------------------- | 
| 4836   __ AssertFunction(r4); | 4755   __ AssertFunction(r3); | 
| 4837 | 4756 | 
| 4838   // For Ignition we need to skip all possible handler/stub frames until | 4757   // For Ignition we need to skip all possible handler/stub frames until | 
| 4839   // we reach the JavaScript frame for the function (similar to what the | 4758   // we reach the JavaScript frame for the function (similar to what the | 
| 4840   // runtime fallback implementation does). So make r5 point to that | 4759   // runtime fallback implementation does). So make r4 point to that | 
| 4841   // JavaScript frame. | 4760   // JavaScript frame. | 
| 4842   { | 4761   { | 
| 4843     Label loop, loop_entry; | 4762     Label loop, loop_entry; | 
| 4844     __ mr(r5, fp); | 4763     __ LoadRR(r4, fp); | 
| 4845     __ b(&loop_entry); | 4764     __ b(&loop_entry); | 
| 4846     __ bind(&loop); | 4765     __ bind(&loop); | 
| 4847     __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset)); | 4766     __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset)); | 
| 4848     __ bind(&loop_entry); | 4767     __ bind(&loop_entry); | 
| 4849     __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kMarkerOffset)); | 4768     __ LoadP(ip, MemOperand(r4, StandardFrameConstants::kMarkerOffset)); | 
| 4850     __ cmp(ip, r4); | 4769     __ CmpP(ip, r3); | 
| 4851     __ bne(&loop); | 4770     __ bne(&loop); | 
| 4852   } | 4771   } | 
| 4853 | 4772 | 
| 4854   // Check if we have rest parameters (only possible if we have an | 4773   // Check if we have rest parameters (only possible if we have an | 
| 4855   // arguments adaptor frame below the function frame). | 4774   // arguments adaptor frame below the function frame). | 
| 4856   Label no_rest_parameters; | 4775   Label no_rest_parameters; | 
| 4857   __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset)); | 4776   __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset)); | 
| 4858   __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kContextOffset)); | 4777   __ LoadP(ip, MemOperand(r4, StandardFrameConstants::kContextOffset)); | 
| 4859   __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); | 4778   __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); | 
| 4860   __ bne(&no_rest_parameters); | 4779   __ bne(&no_rest_parameters); | 
| 4861 | 4780 | 
| 4862   // Check if the arguments adaptor frame contains more arguments than | 4781   // Check if the arguments adaptor frame contains more arguments than | 
| 4863   // specified by the function's internal formal parameter count. | 4782   // specified by the function's internal formal parameter count. | 
| 4864   Label rest_parameters; | 4783   Label rest_parameters; | 
| 4865   __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 4784   __ LoadP(r2, MemOperand(r4, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 
| 4866   __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); | 4785   __ LoadP(r3, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); | 
| 4867   __ LoadWordArith( | 4786   __ LoadW( | 
| 4868       r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset)); | 4787       r3, FieldMemOperand(r3, SharedFunctionInfo::kFormalParameterCountOffset)); | 
| 4869 #if V8_TARGET_ARCH_PPC64 | 4788 #if V8_TARGET_ARCH_S390X | 
| 4870   __ SmiTag(r4); | 4789   __ SmiTag(r3); | 
| 4871 #endif | 4790 #endif | 
| 4872   __ sub(r3, r3, r4, LeaveOE, SetRC); | 4791   __ SubP(r2, r2, r3); | 
| 4873   __ bgt(&rest_parameters, cr0); | 4792   __ bgt(&rest_parameters); | 
| 4874 | 4793 | 
| 4875   // Return an empty rest parameter array. | 4794   // Return an empty rest parameter array. | 
| 4876   __ bind(&no_rest_parameters); | 4795   __ bind(&no_rest_parameters); | 
| 4877   { | 4796   { | 
| 4878     // ----------- S t a t e ------------- | 4797     // ----------- S t a t e ------------- | 
| 4879     //  -- cp : context | 4798     //  -- cp : context | 
| 4880     //  -- lr : return address | 4799     //  -- lr : return address | 
| 4881     // ----------------------------------- | 4800     // ----------------------------------- | 
| 4882 | 4801 | 
| 4883     // Allocate an empty rest parameter array. | 4802     // Allocate an empty rest parameter array. | 
| 4884     Label allocate, done_allocate; | 4803     Label allocate, done_allocate; | 
| 4885     __ Allocate(JSArray::kSize, r3, r4, r5, &allocate, TAG_OBJECT); | 4804     __ Allocate(JSArray::kSize, r2, r3, r4, &allocate, TAG_OBJECT); | 
| 4886     __ bind(&done_allocate); | 4805     __ bind(&done_allocate); | 
| 4887 | 4806 | 
| 4888     // Setup the rest parameter array in r0. | 4807     // Setup the rest parameter array in r0. | 
| 4889     __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r4); | 4808     __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r3); | 
| 4890     __ StoreP(r4, FieldMemOperand(r3, JSArray::kMapOffset), r0); | 4809     __ StoreP(r3, FieldMemOperand(r2, JSArray::kMapOffset), r0); | 
| 4891     __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex); | 4810     __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex); | 
| 4892     __ StoreP(r4, FieldMemOperand(r3, JSArray::kPropertiesOffset), r0); | 4811     __ StoreP(r3, FieldMemOperand(r2, JSArray::kPropertiesOffset), r0); | 
| 4893     __ StoreP(r4, FieldMemOperand(r3, JSArray::kElementsOffset), r0); | 4812     __ StoreP(r3, FieldMemOperand(r2, JSArray::kElementsOffset), r0); | 
| 4894     __ li(r4, Operand::Zero()); | 4813     __ LoadImmP(r3, Operand::Zero()); | 
| 4895     __ StoreP(r4, FieldMemOperand(r3, JSArray::kLengthOffset), r0); | 4814     __ StoreP(r3, FieldMemOperand(r2, JSArray::kLengthOffset), r0); | 
| 4896     STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize); | 4815     STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize); | 
| 4897     __ Ret(); | 4816     __ Ret(); | 
| 4898 | 4817 | 
| 4899     // Fall back to %AllocateInNewSpace. | 4818     // Fall back to %AllocateInNewSpace. | 
| 4900     __ bind(&allocate); | 4819     __ bind(&allocate); | 
| 4901     { | 4820     { | 
| 4902       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 4821       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 
| 4903       __ Push(Smi::FromInt(JSArray::kSize)); | 4822       __ Push(Smi::FromInt(JSArray::kSize)); | 
| 4904       __ CallRuntime(Runtime::kAllocateInNewSpace); | 4823       __ CallRuntime(Runtime::kAllocateInNewSpace); | 
| 4905     } | 4824     } | 
| 4906     __ b(&done_allocate); | 4825     __ b(&done_allocate); | 
| 4907   } | 4826   } | 
| 4908 | 4827 | 
| 4909   __ bind(&rest_parameters); | 4828   __ bind(&rest_parameters); | 
| 4910   { | 4829   { | 
| 4911     // Compute the pointer to the first rest parameter (skippping the receiver). | 4830     // Compute the pointer to the first rest parameter (skippping the receiver). | 
| 4912     __ SmiToPtrArrayOffset(r9, r3); | 4831     __ SmiToPtrArrayOffset(r8, r2); | 
| 4913     __ add(r5, r5, r9); | 4832     __ AddP(r4, r4, r8); | 
| 4914     __ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset)); | 4833     __ AddP(r4, r4, Operand(StandardFrameConstants::kCallerSPOffset)); | 
| 4915 | 4834 | 
| 4916     // ----------- S t a t e ------------- | 4835     // ----------- S t a t e ------------- | 
| 4917     //  -- cp : context | 4836     //  -- cp : context | 
| 4918     //  -- r3 : number of rest parameters (tagged) | 4837     //  -- r2 : number of rest parameters (tagged) | 
| 4919     //  -- r5 : pointer just past first rest parameters | 4838     //  -- r4 : pointer just past first rest parameters | 
| 4920     //  -- r9 : size of rest parameters | 4839     //  -- r8 : size of rest parameters | 
| 4921     //  -- lr : return address | 4840     //  -- lr : return address | 
| 4922     // ----------------------------------- | 4841     // ----------------------------------- | 
| 4923 | 4842 | 
| 4924     // Allocate space for the rest parameter array plus the backing store. | 4843     // Allocate space for the rest parameter array plus the backing store. | 
| 4925     Label allocate, done_allocate; | 4844     Label allocate, done_allocate; | 
| 4926     __ mov(r4, Operand(JSArray::kSize + FixedArray::kHeaderSize)); | 4845     __ mov(r3, Operand(JSArray::kSize + FixedArray::kHeaderSize)); | 
| 4927     __ add(r4, r4, r9); | 4846     __ AddP(r3, r3, r8); | 
| 4928     __ Allocate(r4, r6, r7, r8, &allocate, TAG_OBJECT); | 4847     __ Allocate(r3, r5, r6, r7, &allocate, TAG_OBJECT); | 
| 4929     __ bind(&done_allocate); | 4848     __ bind(&done_allocate); | 
| 4930 | 4849 | 
| 4931     // Setup the elements array in r6. | 4850     // Setup the elements array in r5. | 
| 4932     __ LoadRoot(r4, Heap::kFixedArrayMapRootIndex); | 4851     __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex); | 
| 4933     __ StoreP(r4, FieldMemOperand(r6, FixedArray::kMapOffset), r0); | 4852     __ StoreP(r3, FieldMemOperand(r5, FixedArray::kMapOffset), r0); | 
| 4934     __ StoreP(r3, FieldMemOperand(r6, FixedArray::kLengthOffset), r0); | 4853     __ StoreP(r2, FieldMemOperand(r5, FixedArray::kLengthOffset), r0); | 
| 4935     __ addi(r7, r6, | 4854     __ AddP(r6, r5, | 
| 4936             Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize)); | 4855             Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize)); | 
| 4937     { | 4856     { | 
| 4938       Label loop; | 4857       Label loop; | 
| 4939       __ SmiUntag(r0, r3); | 4858       __ SmiUntag(r1, r2); | 
| 4940       __ mtctr(r0); | 4859       // __ mtctr(r0); | 
| 4941       __ bind(&loop); | 4860       __ bind(&loop); | 
| 4942       __ LoadPU(ip, MemOperand(r5, -kPointerSize)); | 4861       __ lay(r4, MemOperand(r4, -kPointerSize)); | 
| 4943       __ StorePU(ip, MemOperand(r7, kPointerSize)); | 4862       __ LoadP(ip, MemOperand(r4)); | 
| 4944       __ bdnz(&loop); | 4863       __ la(r6, MemOperand(r6, kPointerSize)); | 
| 4945       __ addi(r7, r7, Operand(kPointerSize)); | 4864       __ StoreP(ip, MemOperand(r6)); | 
|  | 4865       // __ bdnz(&loop); | 
|  | 4866       __ BranchOnCount(r1, &loop); | 
|  | 4867       __ AddP(r6, r6, Operand(kPointerSize)); | 
| 4946     } | 4868     } | 
| 4947 | 4869 | 
| 4948     // Setup the rest parameter array in r7. | 4870     // Setup the rest parameter array in r6. | 
| 4949     __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r4); | 4871     __ LoadNativeContextSlot(Context::JS_ARRAY_FAST_ELEMENTS_MAP_INDEX, r3); | 
| 4950     __ StoreP(r4, MemOperand(r7, JSArray::kMapOffset)); | 4872     __ StoreP(r3, MemOperand(r6, JSArray::kMapOffset)); | 
| 4951     __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex); | 4873     __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex); | 
| 4952     __ StoreP(r4, MemOperand(r7, JSArray::kPropertiesOffset)); | 4874     __ StoreP(r3, MemOperand(r6, JSArray::kPropertiesOffset)); | 
| 4953     __ StoreP(r6, MemOperand(r7, JSArray::kElementsOffset)); | 4875     __ StoreP(r5, MemOperand(r6, JSArray::kElementsOffset)); | 
| 4954     __ StoreP(r3, MemOperand(r7, JSArray::kLengthOffset)); | 4876     __ StoreP(r2, MemOperand(r6, JSArray::kLengthOffset)); | 
| 4955     STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize); | 4877     STATIC_ASSERT(JSArray::kSize == 4 * kPointerSize); | 
| 4956     __ addi(r3, r7, Operand(kHeapObjectTag)); | 4878     __ AddP(r2, r6, Operand(kHeapObjectTag)); | 
| 4957     __ Ret(); | 4879     __ Ret(); | 
| 4958 | 4880 | 
| 4959     // Fall back to %AllocateInNewSpace. | 4881     // Fall back to %AllocateInNewSpace. | 
| 4960     __ bind(&allocate); | 4882     __ bind(&allocate); | 
| 4961     { | 4883     { | 
| 4962       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 4884       FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 
| 4963       __ SmiTag(r4); | 4885       __ SmiTag(r3); | 
| 4964       __ Push(r3, r5, r4); | 4886       __ Push(r2, r4, r3); | 
| 4965       __ CallRuntime(Runtime::kAllocateInNewSpace); | 4887       __ CallRuntime(Runtime::kAllocateInNewSpace); | 
| 4966       __ mr(r6, r3); | 4888       __ LoadRR(r5, r2); | 
| 4967       __ Pop(r3, r5); | 4889       __ Pop(r2, r4); | 
| 4968     } | 4890     } | 
| 4969     __ b(&done_allocate); | 4891     __ b(&done_allocate); | 
| 4970   } | 4892   } | 
| 4971 } | 4893 } | 
| 4972 | 4894 | 
| 4973 void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) { | 4895 void FastNewSloppyArgumentsStub::Generate(MacroAssembler* masm) { | 
| 4974   // ----------- S t a t e ------------- | 4896   // ----------- S t a t e ------------- | 
| 4975   //  -- r4 : function | 4897   //  -- r3 : function | 
| 4976   //  -- cp : context | 4898   //  -- cp : context | 
| 4977   //  -- fp : frame pointer | 4899   //  -- fp : frame pointer | 
| 4978   //  -- lr : return address | 4900   //  -- lr : return address | 
| 4979   // ----------------------------------- | 4901   // ----------------------------------- | 
| 4980   __ AssertFunction(r4); | 4902   __ AssertFunction(r3); | 
| 4981 | 4903 | 
| 4982   // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub. | 4904   // TODO(bmeurer): Cleanup to match the FastNewStrictArgumentsStub. | 
| 4983   __ LoadP(r5, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); | 4905   __ LoadP(r4, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); | 
| 4984   __ LoadWordArith( | 4906   __ LoadW( | 
| 4985       r5, FieldMemOperand(r5, SharedFunctionInfo::kFormalParameterCountOffset)); | 4907       r4, FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset)); | 
| 4986 #if V8_TARGET_ARCH_PPC64 | 4908 #if V8_TARGET_ARCH_S390X | 
| 4987   __ SmiTag(r5); | 4909   __ SmiTag(r4); | 
| 4988 #endif | 4910 #endif | 
| 4989   __ SmiToPtrArrayOffset(r6, r5); | 4911   __ SmiToPtrArrayOffset(r5, r4); | 
| 4990   __ add(r6, fp, r6); | 4912   __ AddP(r5, fp, r5); | 
| 4991   __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset)); | 4913   __ AddP(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset)); | 
| 4992 | 4914 | 
| 4993   // r4 : function | 4915   // r3 : function | 
| 4994   // r5 : number of parameters (tagged) | 4916   // r4 : number of parameters (tagged) | 
| 4995   // r6 : parameters pointer | 4917   // r5 : parameters pointer | 
| 4996   // Registers used over whole function: | 4918   // Registers used over whole function: | 
| 4997   // r8 : arguments count (tagged) | 4919   // r7 : arguments count (tagged) | 
| 4998   // r9 : mapped parameter count (tagged) | 4920   // r8 : mapped parameter count (tagged) | 
| 4999 | 4921 | 
| 5000   // Check if the calling frame is an arguments adaptor frame. | 4922   // Check if the calling frame is an arguments adaptor frame. | 
| 5001   Label adaptor_frame, try_allocate, runtime; | 4923   Label adaptor_frame, try_allocate, runtime; | 
| 5002   __ LoadP(r7, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 4924   __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 
| 5003   __ LoadP(r3, MemOperand(r7, StandardFrameConstants::kContextOffset)); | 4925   __ LoadP(r2, MemOperand(r6, StandardFrameConstants::kContextOffset)); | 
| 5004   __ CmpSmiLiteral(r3, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); | 4926   __ CmpSmiLiteral(r2, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); | 
| 5005   __ beq(&adaptor_frame); | 4927   __ beq(&adaptor_frame); | 
| 5006 | 4928 | 
| 5007   // No adaptor, parameter count = argument count. | 4929   // No adaptor, parameter count = argument count. | 
| 5008   __ mr(r8, r5); | 4930   __ LoadRR(r7, r4); | 
| 5009   __ mr(r9, r5); | 4931   __ LoadRR(r8, r4); | 
| 5010   __ b(&try_allocate); | 4932   __ b(&try_allocate); | 
| 5011 | 4933 | 
| 5012   // We have an adaptor frame. Patch the parameters pointer. | 4934   // We have an adaptor frame. Patch the parameters pointer. | 
| 5013   __ bind(&adaptor_frame); | 4935   __ bind(&adaptor_frame); | 
| 5014   __ LoadP(r8, MemOperand(r7, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 4936   __ LoadP(r7, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 
| 5015   __ SmiToPtrArrayOffset(r6, r8); | 4937   __ SmiToPtrArrayOffset(r5, r7); | 
| 5016   __ add(r6, r6, r7); | 4938   __ AddP(r5, r5, r6); | 
| 5017   __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset)); | 4939   __ AddP(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset)); | 
| 5018 | 4940 | 
| 5019   // r8 = argument count (tagged) | 4941   // r7 = argument count (tagged) | 
| 5020   // r9 = parameter count (tagged) | 4942   // r8 = parameter count (tagged) | 
| 5021   // Compute the mapped parameter count = min(r5, r8) in r9. | 4943   // Compute the mapped parameter count = min(r4, r7) in r8. | 
| 5022   __ cmp(r5, r8); | 4944   __ CmpP(r4, r7); | 
| 5023   if (CpuFeatures::IsSupported(ISELECT)) { | 4945   Label skip; | 
| 5024     __ isel(lt, r9, r5, r8); | 4946   __ LoadRR(r8, r4); | 
| 5025   } else { | 4947   __ blt(&skip); | 
| 5026     Label skip; | 4948   __ LoadRR(r8, r7); | 
| 5027     __ mr(r9, r5); | 4949   __ bind(&skip); | 
| 5028     __ blt(&skip); |  | 
| 5029     __ mr(r9, r8); |  | 
| 5030     __ bind(&skip); |  | 
| 5031   } |  | 
| 5032 | 4950 | 
| 5033   __ bind(&try_allocate); | 4951   __ bind(&try_allocate); | 
| 5034 | 4952 | 
| 5035   // Compute the sizes of backing store, parameter map, and arguments object. | 4953   // Compute the sizes of backing store, parameter map, and arguments object. | 
| 5036   // 1. Parameter map, has 2 extra words containing context and backing store. | 4954   // 1. Parameter map, has 2 extra words containing context and backing store. | 
| 5037   const int kParameterMapHeaderSize = | 4955   const int kParameterMapHeaderSize = | 
| 5038       FixedArray::kHeaderSize + 2 * kPointerSize; | 4956       FixedArray::kHeaderSize + 2 * kPointerSize; | 
| 5039   // If there are no mapped parameters, we do not need the parameter_map. | 4957   // If there are no mapped parameters, we do not need the parameter_map. | 
| 5040   __ CmpSmiLiteral(r9, Smi::FromInt(0), r0); | 4958   __ CmpSmiLiteral(r8, Smi::FromInt(0), r0); | 
| 5041   if (CpuFeatures::IsSupported(ISELECT)) { | 4959   Label skip2, skip3; | 
| 5042     __ SmiToPtrArrayOffset(r11, r9); | 4960   __ bne(&skip2); | 
| 5043     __ addi(r11, r11, Operand(kParameterMapHeaderSize)); | 4961   __ LoadImmP(r1, Operand::Zero()); | 
| 5044     __ isel(eq, r11, r0, r11); | 4962   __ b(&skip3); | 
| 5045   } else { | 4963   __ bind(&skip2); | 
| 5046     Label skip2, skip3; | 4964   __ SmiToPtrArrayOffset(r1, r8); | 
| 5047     __ bne(&skip2); | 4965   __ AddP(r1, r1, Operand(kParameterMapHeaderSize)); | 
| 5048     __ li(r11, Operand::Zero()); | 4966   __ bind(&skip3); | 
| 5049     __ b(&skip3); |  | 
| 5050     __ bind(&skip2); |  | 
| 5051     __ SmiToPtrArrayOffset(r11, r9); |  | 
| 5052     __ addi(r11, r11, Operand(kParameterMapHeaderSize)); |  | 
| 5053     __ bind(&skip3); |  | 
| 5054   } |  | 
| 5055 | 4967 | 
| 5056   // 2. Backing store. | 4968   // 2. Backing store. | 
| 5057   __ SmiToPtrArrayOffset(r7, r8); | 4969   __ SmiToPtrArrayOffset(r6, r7); | 
| 5058   __ add(r11, r11, r7); | 4970   __ AddP(r1, r1, r6); | 
| 5059   __ addi(r11, r11, Operand(FixedArray::kHeaderSize)); | 4971   __ AddP(r1, r1, Operand(FixedArray::kHeaderSize)); | 
| 5060 | 4972 | 
| 5061   // 3. Arguments object. | 4973   // 3. Arguments object. | 
| 5062   __ addi(r11, r11, Operand(JSSloppyArgumentsObject::kSize)); | 4974   __ AddP(r1, r1, Operand(JSSloppyArgumentsObject::kSize)); | 
| 5063 | 4975 | 
| 5064   // Do the allocation of all three objects in one go. | 4976   // Do the allocation of all three objects in one go. | 
| 5065   __ Allocate(r11, r3, r11, r7, &runtime, TAG_OBJECT); | 4977   __ Allocate(r1, r2, r1, r6, &runtime, TAG_OBJECT); | 
| 5066 | 4978 | 
| 5067   // r3 = address of new object(s) (tagged) | 4979   // r2 = address of new object(s) (tagged) | 
| 5068   // r5 = argument count (smi-tagged) | 4980   // r4 = argument count (smi-tagged) | 
| 5069   // Get the arguments boilerplate from the current native context into r4. | 4981   // Get the arguments boilerplate from the current native context into r3. | 
| 5070   const int kNormalOffset = | 4982   const int kNormalOffset = | 
| 5071       Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX); | 4983       Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX); | 
| 5072   const int kAliasedOffset = | 4984   const int kAliasedOffset = | 
| 5073       Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX); | 4985       Context::SlotOffset(Context::FAST_ALIASED_ARGUMENTS_MAP_INDEX); | 
| 5074 | 4986 | 
| 5075   __ LoadP(r7, NativeContextMemOperand()); | 4987   __ LoadP(r6, NativeContextMemOperand()); | 
| 5076   __ cmpi(r9, Operand::Zero()); | 4988   __ CmpP(r8, Operand::Zero()); | 
| 5077   if (CpuFeatures::IsSupported(ISELECT)) { | 4989   Label skip4, skip5; | 
| 5078     __ LoadP(r11, MemOperand(r7, kNormalOffset)); | 4990   __ bne(&skip4); | 
| 5079     __ LoadP(r7, MemOperand(r7, kAliasedOffset)); | 4991   __ LoadP(r6, MemOperand(r6, kNormalOffset)); | 
| 5080     __ isel(eq, r7, r11, r7); | 4992   __ b(&skip5); | 
| 5081   } else { | 4993   __ bind(&skip4); | 
| 5082     Label skip4, skip5; | 4994   __ LoadP(r6, MemOperand(r6, kAliasedOffset)); | 
| 5083     __ bne(&skip4); | 4995   __ bind(&skip5); | 
| 5084     __ LoadP(r7, MemOperand(r7, kNormalOffset)); |  | 
| 5085     __ b(&skip5); |  | 
| 5086     __ bind(&skip4); |  | 
| 5087     __ LoadP(r7, MemOperand(r7, kAliasedOffset)); |  | 
| 5088     __ bind(&skip5); |  | 
| 5089   } |  | 
| 5090 | 4996 | 
| 5091   // r3 = address of new object (tagged) | 4997   // r2 = address of new object (tagged) | 
| 5092   // r5 = argument count (smi-tagged) | 4998   // r4 = argument count (smi-tagged) | 
| 5093   // r7 = address of arguments map (tagged) | 4999   // r6 = address of arguments map (tagged) | 
| 5094   // r9 = mapped parameter count (tagged) | 5000   // r8 = mapped parameter count (tagged) | 
| 5095   __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0); | 5001   __ StoreP(r6, FieldMemOperand(r2, JSObject::kMapOffset), r0); | 
| 5096   __ LoadRoot(r11, Heap::kEmptyFixedArrayRootIndex); | 5002   __ LoadRoot(r1, Heap::kEmptyFixedArrayRootIndex); | 
| 5097   __ StoreP(r11, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0); | 5003   __ StoreP(r1, FieldMemOperand(r2, JSObject::kPropertiesOffset), r0); | 
| 5098   __ StoreP(r11, FieldMemOperand(r3, JSObject::kElementsOffset), r0); | 5004   __ StoreP(r1, FieldMemOperand(r2, JSObject::kElementsOffset), r0); | 
| 5099 | 5005 | 
| 5100   // Set up the callee in-object property. | 5006   // Set up the callee in-object property. | 
| 5101   __ AssertNotSmi(r4); | 5007   __ AssertNotSmi(r3); | 
| 5102   __ StoreP(r4, FieldMemOperand(r3, JSSloppyArgumentsObject::kCalleeOffset), | 5008   __ StoreP(r3, FieldMemOperand(r2, JSSloppyArgumentsObject::kCalleeOffset), | 
| 5103             r0); | 5009             r0); | 
| 5104 | 5010 | 
| 5105   // Use the length (smi tagged) and set that as an in-object property too. | 5011   // Use the length (smi tagged) and set that as an in-object property too. | 
| 5106   __ AssertSmi(r8); | 5012   __ AssertSmi(r7); | 
| 5107   __ StoreP(r8, FieldMemOperand(r3, JSSloppyArgumentsObject::kLengthOffset), | 5013   __ StoreP(r7, FieldMemOperand(r2, JSSloppyArgumentsObject::kLengthOffset), | 
| 5108             r0); | 5014             r0); | 
| 5109 | 5015 | 
| 5110   // Set up the elements pointer in the allocated arguments object. | 5016   // Set up the elements pointer in the allocated arguments object. | 
| 5111   // If we allocated a parameter map, r7 will point there, otherwise | 5017   // If we allocated a parameter map, r6 will point there, otherwise | 
| 5112   // it will point to the backing store. | 5018   // it will point to the backing store. | 
| 5113   __ addi(r7, r3, Operand(JSSloppyArgumentsObject::kSize)); | 5019   __ AddP(r6, r2, Operand(JSSloppyArgumentsObject::kSize)); | 
| 5114   __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0); | 5020   __ StoreP(r6, FieldMemOperand(r2, JSObject::kElementsOffset), r0); | 
| 5115 | 5021 | 
| 5116   // r3 = address of new object (tagged) | 5022   // r2 = address of new object (tagged) | 
| 5117   // r5 = argument count (tagged) | 5023   // r4 = argument count (tagged) | 
| 5118   // r7 = address of parameter map or backing store (tagged) | 5024   // r6 = address of parameter map or backing store (tagged) | 
| 5119   // r9 = mapped parameter count (tagged) | 5025   // r8 = mapped parameter count (tagged) | 
| 5120   // Initialize parameter map. If there are no mapped arguments, we're done. | 5026   // Initialize parameter map. If there are no mapped arguments, we're done. | 
| 5121   Label skip_parameter_map; | 5027   Label skip_parameter_map; | 
| 5122   __ CmpSmiLiteral(r9, Smi::FromInt(0), r0); | 5028   __ CmpSmiLiteral(r8, Smi::FromInt(0), r0); | 
| 5123   if (CpuFeatures::IsSupported(ISELECT)) { | 5029   Label skip6; | 
| 5124     __ isel(eq, r4, r7, r4); | 5030   __ bne(&skip6); | 
| 5125     __ beq(&skip_parameter_map); | 5031   // Move backing store address to r3, because it is | 
| 5126   } else { | 5032   // expected there when filling in the unmapped arguments. | 
| 5127     Label skip6; | 5033   __ LoadRR(r3, r6); | 
| 5128     __ bne(&skip6); | 5034   __ b(&skip_parameter_map); | 
| 5129     // Move backing store address to r4, because it is | 5035   __ bind(&skip6); | 
| 5130     // expected there when filling in the unmapped arguments. |  | 
| 5131     __ mr(r4, r7); |  | 
| 5132     __ b(&skip_parameter_map); |  | 
| 5133     __ bind(&skip6); |  | 
| 5134   } |  | 
| 5135 | 5036 | 
| 5136   __ LoadRoot(r8, Heap::kSloppyArgumentsElementsMapRootIndex); | 5037   __ LoadRoot(r7, Heap::kSloppyArgumentsElementsMapRootIndex); | 
| 5137   __ StoreP(r8, FieldMemOperand(r7, FixedArray::kMapOffset), r0); | 5038   __ StoreP(r7, FieldMemOperand(r6, FixedArray::kMapOffset), r0); | 
| 5138   __ AddSmiLiteral(r8, r9, Smi::FromInt(2), r0); | 5039   __ AddSmiLiteral(r7, r8, Smi::FromInt(2), r0); | 
| 5139   __ StoreP(r8, FieldMemOperand(r7, FixedArray::kLengthOffset), r0); | 5040   __ StoreP(r7, FieldMemOperand(r6, FixedArray::kLengthOffset), r0); | 
| 5140   __ StoreP(cp, FieldMemOperand(r7, FixedArray::kHeaderSize + 0 * kPointerSize), | 5041   __ StoreP(cp, FieldMemOperand(r6, FixedArray::kHeaderSize + 0 * kPointerSize), | 
| 5141             r0); | 5042             r0); | 
| 5142   __ SmiToPtrArrayOffset(r8, r9); | 5043   __ SmiToPtrArrayOffset(r7, r8); | 
| 5143   __ add(r8, r8, r7); | 5044   __ AddP(r7, r7, r6); | 
| 5144   __ addi(r8, r8, Operand(kParameterMapHeaderSize)); | 5045   __ AddP(r7, r7, Operand(kParameterMapHeaderSize)); | 
| 5145   __ StoreP(r8, FieldMemOperand(r7, FixedArray::kHeaderSize + 1 * kPointerSize), | 5046   __ StoreP(r7, FieldMemOperand(r6, FixedArray::kHeaderSize + 1 * kPointerSize), | 
| 5146             r0); | 5047             r0); | 
| 5147 | 5048 | 
| 5148   // Copy the parameter slots and the holes in the arguments. | 5049   // Copy the parameter slots and the holes in the arguments. | 
| 5149   // We need to fill in mapped_parameter_count slots. They index the context, | 5050   // We need to fill in mapped_parameter_count slots. They index the context, | 
| 5150   // where parameters are stored in reverse order, at | 5051   // where parameters are stored in reverse order, at | 
| 5151   //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 | 5052   //   MIN_CONTEXT_SLOTS .. MIN_CONTEXT_SLOTS+parameter_count-1 | 
| 5152   // The mapped parameter thus need to get indices | 5053   // The mapped parameter thus need to get indices | 
| 5153   //   MIN_CONTEXT_SLOTS+parameter_count-1 .. | 5054   //   MIN_CONTEXT_SLOTS+parameter_count-1 .. | 
| 5154   //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count | 5055   //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count | 
| 5155   // We loop from right to left. | 5056   // We loop from right to left. | 
| 5156   Label parameters_loop; | 5057   Label parameters_loop; | 
| 5157   __ mr(r8, r9); | 5058   __ LoadRR(r7, r8); | 
| 5158   __ AddSmiLiteral(r11, r5, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0); | 5059   __ AddSmiLiteral(r1, r4, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0); | 
| 5159   __ sub(r11, r11, r9); | 5060   __ SubP(r1, r1, r8); | 
| 5160   __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 5061   __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 
| 5161   __ SmiToPtrArrayOffset(r4, r8); | 5062   __ SmiToPtrArrayOffset(r3, r7); | 
| 5162   __ add(r4, r4, r7); | 5063   __ AddP(r3, r3, r6); | 
| 5163   __ addi(r4, r4, Operand(kParameterMapHeaderSize)); | 5064   __ AddP(r3, r3, Operand(kParameterMapHeaderSize)); | 
| 5164 | 5065 | 
| 5165   // r4 = address of backing store (tagged) | 5066   // r3 = address of backing store (tagged) | 
| 5166   // r7 = address of parameter map (tagged) | 5067   // r6 = address of parameter map (tagged) | 
| 5167   // r8 = temporary scratch (a.o., for address calculation) | 5068   // r7 = temporary scratch (a.o., for address calculation) | 
| 5168   // r10 = temporary scratch (a.o., for address calculation) | 5069   // r9 = temporary scratch (a.o., for address calculation) | 
| 5169   // ip = the hole value | 5070   // ip = the hole value | 
| 5170   __ SmiUntag(r8); | 5071   __ SmiUntag(r7); | 
| 5171   __ mtctr(r8); | 5072   __ push(r4); | 
| 5172   __ ShiftLeftImm(r8, r8, Operand(kPointerSizeLog2)); | 5073   __ LoadRR(r4, r7); | 
| 5173   __ add(r10, r4, r8); | 5074   __ ShiftLeftP(r7, r7, Operand(kPointerSizeLog2)); | 
| 5174   __ add(r8, r7, r8); | 5075   __ AddP(r9, r3, r7); | 
| 5175   __ addi(r10, r10, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 5076   __ AddP(r7, r6, r7); | 
| 5176   __ addi(r8, r8, Operand(kParameterMapHeaderSize - kHeapObjectTag)); | 5077   __ AddP(r9, r9, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 
|  | 5078   __ AddP(r7, r7, Operand(kParameterMapHeaderSize - kHeapObjectTag)); | 
| 5177 | 5079 | 
| 5178   __ bind(¶meters_loop); | 5080   __ bind(¶meters_loop); | 
| 5179   __ StorePU(r11, MemOperand(r8, -kPointerSize)); | 5081   __ StoreP(r1, MemOperand(r7, -kPointerSize)); | 
| 5180   __ StorePU(ip, MemOperand(r10, -kPointerSize)); | 5082   __ lay(r7, MemOperand(r7, -kPointerSize)); | 
| 5181   __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0); | 5083   __ StoreP(ip, MemOperand(r9, -kPointerSize)); | 
| 5182   __ bdnz(¶meters_loop); | 5084   __ lay(r9, MemOperand(r9, -kPointerSize)); | 
|  | 5085   __ AddSmiLiteral(r1, r1, Smi::FromInt(1), r0); | 
|  | 5086   __ BranchOnCount(r4, ¶meters_loop); | 
|  | 5087   __ pop(r4); | 
| 5183 | 5088 | 
| 5184   // Restore r8 = argument count (tagged). | 5089   // Restore r7 = argument count (tagged). | 
| 5185   __ LoadP(r8, FieldMemOperand(r3, JSSloppyArgumentsObject::kLengthOffset)); | 5090   __ LoadP(r7, FieldMemOperand(r2, JSSloppyArgumentsObject::kLengthOffset)); | 
| 5186 | 5091 | 
| 5187   __ bind(&skip_parameter_map); | 5092   __ bind(&skip_parameter_map); | 
| 5188   // r3 = address of new object (tagged) | 5093   // r2 = address of new object (tagged) | 
| 5189   // r4 = address of backing store (tagged) | 5094   // r3 = address of backing store (tagged) | 
| 5190   // r8 = argument count (tagged) | 5095   // r7 = argument count (tagged) | 
| 5191   // r9 = mapped parameter count (tagged) | 5096   // r8 = mapped parameter count (tagged) | 
| 5192   // r11 = scratch | 5097   // r1 = scratch | 
| 5193   // Copy arguments header and remaining slots (if there are any). | 5098   // Copy arguments header and remaining slots (if there are any). | 
| 5194   __ LoadRoot(r11, Heap::kFixedArrayMapRootIndex); | 5099   __ LoadRoot(r1, Heap::kFixedArrayMapRootIndex); | 
| 5195   __ StoreP(r11, FieldMemOperand(r4, FixedArray::kMapOffset), r0); | 5100   __ StoreP(r1, FieldMemOperand(r3, FixedArray::kMapOffset), r0); | 
| 5196   __ StoreP(r8, FieldMemOperand(r4, FixedArray::kLengthOffset), r0); | 5101   __ StoreP(r7, FieldMemOperand(r3, FixedArray::kLengthOffset), r0); | 
| 5197   __ sub(r11, r8, r9, LeaveOE, SetRC); | 5102   __ SubP(r1, r7, r8); | 
| 5198   __ Ret(eq, cr0); | 5103   __ Ret(eq); | 
| 5199 | 5104 | 
| 5200   Label arguments_loop; | 5105   Label arguments_loop; | 
| 5201   __ SmiUntag(r11); | 5106   __ SmiUntag(r1); | 
| 5202   __ mtctr(r11); | 5107   __ LoadRR(r4, r1); | 
| 5203 | 5108 | 
| 5204   __ SmiToPtrArrayOffset(r0, r9); | 5109   __ SmiToPtrArrayOffset(r0, r8); | 
| 5205   __ sub(r6, r6, r0); | 5110   __ SubP(r5, r5, r0); | 
| 5206   __ add(r11, r4, r0); | 5111   __ AddP(r1, r3, r0); | 
| 5207   __ addi(r11, r11, | 5112   __ AddP(r1, r1, | 
| 5208           Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize)); | 5113           Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize)); | 
| 5209 | 5114 | 
| 5210   __ bind(&arguments_loop); | 5115   __ bind(&arguments_loop); | 
| 5211   __ LoadPU(r7, MemOperand(r6, -kPointerSize)); | 5116   __ LoadP(r6, MemOperand(r5, -kPointerSize)); | 
| 5212   __ StorePU(r7, MemOperand(r11, kPointerSize)); | 5117   __ lay(r5, MemOperand(r5, -kPointerSize)); | 
| 5213   __ bdnz(&arguments_loop); | 5118   __ StoreP(r6, MemOperand(r1, kPointerSize)); | 
|  | 5119   __ la(r1, MemOperand(r1, kPointerSize)); | 
|  | 5120   __ BranchOnCount(r4, &arguments_loop); | 
| 5214 | 5121 | 
| 5215   // Return. | 5122   // Return. | 
| 5216   __ Ret(); | 5123   __ Ret(); | 
| 5217 | 5124 | 
| 5218   // Do the runtime call to allocate the arguments object. | 5125   // Do the runtime call to allocate the arguments object. | 
| 5219   // r8 = argument count (tagged) | 5126   // r7 = argument count (tagged) | 
| 5220   __ bind(&runtime); | 5127   __ bind(&runtime); | 
| 5221   __ Push(r4, r6, r8); | 5128   __ Push(r3, r5, r7); | 
| 5222   __ TailCallRuntime(Runtime::kNewSloppyArguments); | 5129   __ TailCallRuntime(Runtime::kNewSloppyArguments); | 
| 5223 } | 5130 } | 
| 5224 | 5131 | 
| 5225 void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) { | 5132 void FastNewStrictArgumentsStub::Generate(MacroAssembler* masm) { | 
| 5226   // ----------- S t a t e ------------- | 5133   // ----------- S t a t e ------------- | 
| 5227   //  -- r4 : function | 5134   //  -- r3 : function | 
| 5228   //  -- cp : context | 5135   //  -- cp : context | 
| 5229   //  -- fp : frame pointer | 5136   //  -- fp : frame pointer | 
| 5230   //  -- lr : return address | 5137   //  -- lr : return address | 
| 5231   // ----------------------------------- | 5138   // ----------------------------------- | 
| 5232   __ AssertFunction(r4); | 5139   __ AssertFunction(r3); | 
| 5233 | 5140 | 
| 5234   // For Ignition we need to skip all possible handler/stub frames until | 5141   // For Ignition we need to skip all possible handler/stub frames until | 
| 5235   // we reach the JavaScript frame for the function (similar to what the | 5142   // we reach the JavaScript frame for the function (similar to what the | 
| 5236   // runtime fallback implementation does). So make r5 point to that | 5143   // runtime fallback implementation does). So make r4 point to that | 
| 5237   // JavaScript frame. | 5144   // JavaScript frame. | 
| 5238   { | 5145   { | 
| 5239     Label loop, loop_entry; | 5146     Label loop, loop_entry; | 
| 5240     __ mr(r5, fp); | 5147     __ LoadRR(r4, fp); | 
| 5241     __ b(&loop_entry); | 5148     __ b(&loop_entry); | 
| 5242     __ bind(&loop); | 5149     __ bind(&loop); | 
| 5243     __ LoadP(r5, MemOperand(r5, StandardFrameConstants::kCallerFPOffset)); | 5150     __ LoadP(r4, MemOperand(r4, StandardFrameConstants::kCallerFPOffset)); | 
| 5244     __ bind(&loop_entry); | 5151     __ bind(&loop_entry); | 
| 5245     __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kMarkerOffset)); | 5152     __ LoadP(ip, MemOperand(r4, StandardFrameConstants::kMarkerOffset)); | 
| 5246     __ cmp(ip, r4); | 5153     __ CmpP(ip, r3); | 
| 5247     __ bne(&loop); | 5154     __ bne(&loop); | 
| 5248   } | 5155   } | 
| 5249 | 5156 | 
| 5250   // Check if we have an arguments adaptor frame below the function frame. | 5157   // Check if we have an arguments adaptor frame below the function frame. | 
| 5251   Label arguments_adaptor, arguments_done; | 5158   Label arguments_adaptor, arguments_done; | 
| 5252   __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kCallerFPOffset)); | 5159   __ LoadP(r5, MemOperand(r4, StandardFrameConstants::kCallerFPOffset)); | 
| 5253   __ LoadP(ip, MemOperand(r6, StandardFrameConstants::kContextOffset)); | 5160   __ LoadP(ip, MemOperand(r5, StandardFrameConstants::kContextOffset)); | 
| 5254   __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); | 5161   __ CmpSmiLiteral(ip, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); | 
| 5255   __ beq(&arguments_adaptor); | 5162   __ beq(&arguments_adaptor); | 
| 5256   { | 5163   { | 
| 5257     __ LoadP(r4, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); | 5164     __ LoadP(r3, FieldMemOperand(r3, JSFunction::kSharedFunctionInfoOffset)); | 
| 5258     __ LoadWordArith( | 5165     __ LoadW(r2, FieldMemOperand( | 
| 5259         r3, | 5166                      r3, SharedFunctionInfo::kFormalParameterCountOffset)); | 
| 5260         FieldMemOperand(r4, SharedFunctionInfo::kFormalParameterCountOffset)); | 5167 #if V8_TARGET_ARCH_S390X | 
| 5261 #if V8_TARGET_ARCH_PPC64 | 5168     __ SmiTag(r2); | 
| 5262     __ SmiTag(r3); |  | 
| 5263 #endif | 5169 #endif | 
| 5264     __ SmiToPtrArrayOffset(r9, r3); | 5170     __ SmiToPtrArrayOffset(r8, r2); | 
| 5265     __ add(r5, r5, r9); | 5171     __ AddP(r4, r4, r8); | 
| 5266   } | 5172   } | 
| 5267   __ b(&arguments_done); | 5173   __ b(&arguments_done); | 
| 5268   __ bind(&arguments_adaptor); | 5174   __ bind(&arguments_adaptor); | 
| 5269   { | 5175   { | 
| 5270     __ LoadP(r3, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 5176     __ LoadP(r2, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 
| 5271     __ SmiToPtrArrayOffset(r9, r3); | 5177     __ SmiToPtrArrayOffset(r8, r2); | 
| 5272     __ add(r5, r6, r9); | 5178     __ AddP(r4, r5, r8); | 
| 5273   } | 5179   } | 
| 5274   __ bind(&arguments_done); | 5180   __ bind(&arguments_done); | 
| 5275   __ addi(r5, r5, Operand(StandardFrameConstants::kCallerSPOffset)); | 5181   __ AddP(r4, r4, Operand(StandardFrameConstants::kCallerSPOffset)); | 
| 5276 | 5182 | 
| 5277   // ----------- S t a t e ------------- | 5183   // ----------- S t a t e ------------- | 
| 5278   //  -- cp : context | 5184   //  -- cp : context | 
| 5279   //  -- r3 : number of rest parameters (tagged) | 5185   //  -- r2 : number of rest parameters (tagged) | 
| 5280   //  -- r5 : pointer just past first rest parameters | 5186   //  -- r4 : pointer just past first rest parameters | 
| 5281   //  -- r9 : size of rest parameters | 5187   //  -- r8 : size of rest parameters | 
| 5282   //  -- lr : return address | 5188   //  -- lr : return address | 
| 5283   // ----------------------------------- | 5189   // ----------------------------------- | 
| 5284 | 5190 | 
| 5285   // Allocate space for the strict arguments object plus the backing store. | 5191   // Allocate space for the strict arguments object plus the backing store. | 
| 5286   Label allocate, done_allocate; | 5192   Label allocate, done_allocate; | 
| 5287   __ mov(r4, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize)); | 5193   __ mov(r3, Operand(JSStrictArgumentsObject::kSize + FixedArray::kHeaderSize)); | 
| 5288   __ add(r4, r4, r9); | 5194   __ AddP(r3, r3, r8); | 
| 5289   __ Allocate(r4, r6, r7, r8, &allocate, TAG_OBJECT); | 5195   __ Allocate(r3, r5, r6, r7, &allocate, TAG_OBJECT); | 
| 5290   __ bind(&done_allocate); | 5196   __ bind(&done_allocate); | 
| 5291 | 5197 | 
| 5292   // Setup the elements array in r6. | 5198   // Setup the elements array in r5. | 
| 5293   __ LoadRoot(r4, Heap::kFixedArrayMapRootIndex); | 5199   __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex); | 
| 5294   __ StoreP(r4, FieldMemOperand(r6, FixedArray::kMapOffset), r0); | 5200   __ StoreP(r3, FieldMemOperand(r5, FixedArray::kMapOffset), r0); | 
| 5295   __ StoreP(r3, FieldMemOperand(r6, FixedArray::kLengthOffset), r0); | 5201   __ StoreP(r2, FieldMemOperand(r5, FixedArray::kLengthOffset), r0); | 
| 5296   __ addi(r7, r6, | 5202   __ AddP(r6, r5, | 
| 5297           Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize)); | 5203           Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize)); | 
| 5298   { | 5204   { | 
| 5299     Label loop, done_loop; | 5205     Label loop, done_loop; | 
| 5300     __ SmiUntag(r0, r3, SetRC); | 5206     __ SmiUntag(r1, r2); | 
| 5301     __ beq(&done_loop, cr0); | 5207     __ LoadAndTestP(r1, r1); | 
| 5302     __ mtctr(r0); | 5208     __ beq(&done_loop); | 
| 5303     __ bind(&loop); | 5209     __ bind(&loop); | 
| 5304     __ LoadPU(ip, MemOperand(r5, -kPointerSize)); | 5210     __ lay(r4, MemOperand(r4, -kPointerSize)); | 
| 5305     __ StorePU(ip, MemOperand(r7, kPointerSize)); | 5211     __ LoadP(ip, MemOperand(r4)); | 
| 5306     __ bdnz(&loop); | 5212     __ la(r6, MemOperand(r6, kPointerSize)); | 
|  | 5213     __ StoreP(ip, MemOperand(r6)); | 
|  | 5214     __ BranchOnCount(r1, &loop); | 
| 5307     __ bind(&done_loop); | 5215     __ bind(&done_loop); | 
| 5308     __ addi(r7, r7, Operand(kPointerSize)); | 5216     __ AddP(r6, r6, Operand(kPointerSize)); | 
| 5309   } | 5217   } | 
| 5310 | 5218 | 
| 5311   // Setup the rest parameter array in r7. | 5219   // Setup the rest parameter array in r6. | 
| 5312   __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r4); | 5220   __ LoadNativeContextSlot(Context::STRICT_ARGUMENTS_MAP_INDEX, r3); | 
| 5313   __ StoreP(r4, MemOperand(r7, JSStrictArgumentsObject::kMapOffset)); | 5221   __ StoreP(r3, MemOperand(r6, JSStrictArgumentsObject::kMapOffset)); | 
| 5314   __ LoadRoot(r4, Heap::kEmptyFixedArrayRootIndex); | 5222   __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex); | 
| 5315   __ StoreP(r4, MemOperand(r7, JSStrictArgumentsObject::kPropertiesOffset)); | 5223   __ StoreP(r3, MemOperand(r6, JSStrictArgumentsObject::kPropertiesOffset)); | 
| 5316   __ StoreP(r6, MemOperand(r7, JSStrictArgumentsObject::kElementsOffset)); | 5224   __ StoreP(r5, MemOperand(r6, JSStrictArgumentsObject::kElementsOffset)); | 
| 5317   __ StoreP(r3, MemOperand(r7, JSStrictArgumentsObject::kLengthOffset)); | 5225   __ StoreP(r2, MemOperand(r6, JSStrictArgumentsObject::kLengthOffset)); | 
| 5318   STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize); | 5226   STATIC_ASSERT(JSStrictArgumentsObject::kSize == 4 * kPointerSize); | 
| 5319   __ addi(r3, r7, Operand(kHeapObjectTag)); | 5227   __ AddP(r2, r6, Operand(kHeapObjectTag)); | 
| 5320   __ Ret(); | 5228   __ Ret(); | 
| 5321 | 5229 | 
| 5322   // Fall back to %AllocateInNewSpace. | 5230   // Fall back to %AllocateInNewSpace. | 
| 5323   __ bind(&allocate); | 5231   __ bind(&allocate); | 
| 5324   { | 5232   { | 
| 5325     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 5233     FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 
| 5326     __ SmiTag(r4); | 5234     __ SmiTag(r3); | 
| 5327     __ Push(r3, r5, r4); | 5235     __ Push(r2, r4, r3); | 
| 5328     __ CallRuntime(Runtime::kAllocateInNewSpace); | 5236     __ CallRuntime(Runtime::kAllocateInNewSpace); | 
| 5329     __ mr(r6, r3); | 5237     __ LoadRR(r5, r2); | 
| 5330     __ Pop(r3, r5); | 5238     __ Pop(r2, r4); | 
| 5331   } | 5239   } | 
| 5332   __ b(&done_allocate); | 5240   __ b(&done_allocate); | 
| 5333 } | 5241 } | 
| 5334 | 5242 | 
| 5335 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) { | 5243 void LoadGlobalViaContextStub::Generate(MacroAssembler* masm) { | 
| 5336   Register context = cp; | 5244   Register context = cp; | 
| 5337   Register result = r3; | 5245   Register result = r2; | 
| 5338   Register slot = r5; | 5246   Register slot = r4; | 
| 5339 | 5247 | 
| 5340   // Go up the context chain to the script context. | 5248   // Go up the context chain to the script context. | 
| 5341   for (int i = 0; i < depth(); ++i) { | 5249   for (int i = 0; i < depth(); ++i) { | 
| 5342     __ LoadP(result, ContextMemOperand(context, Context::PREVIOUS_INDEX)); | 5250     __ LoadP(result, ContextMemOperand(context, Context::PREVIOUS_INDEX)); | 
| 5343     context = result; | 5251     context = result; | 
| 5344   } | 5252   } | 
| 5345 | 5253 | 
| 5346   // Load the PropertyCell value at the specified slot. | 5254   // Load the PropertyCell value at the specified slot. | 
| 5347   __ ShiftLeftImm(r0, slot, Operand(kPointerSizeLog2)); | 5255   __ ShiftLeftP(r0, slot, Operand(kPointerSizeLog2)); | 
| 5348   __ add(result, context, r0); | 5256   __ AddP(result, context, r0); | 
| 5349   __ LoadP(result, ContextMemOperand(result)); | 5257   __ LoadP(result, ContextMemOperand(result)); | 
| 5350   __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset)); | 5258   __ LoadP(result, FieldMemOperand(result, PropertyCell::kValueOffset)); | 
| 5351 | 5259 | 
| 5352   // If the result is not the_hole, return. Otherwise, handle in the runtime. | 5260   // If the result is not the_hole, return. Otherwise, handle in the runtime. | 
| 5353   __ CompareRoot(result, Heap::kTheHoleValueRootIndex); | 5261   __ CompareRoot(result, Heap::kTheHoleValueRootIndex); | 
| 5354   __ Ret(ne); | 5262   Label runtime; | 
|  | 5263   __ beq(&runtime); | 
|  | 5264   __ Ret(); | 
|  | 5265   __ bind(&runtime); | 
| 5355 | 5266 | 
| 5356   // Fallback to runtime. | 5267   // Fallback to runtime. | 
| 5357   __ SmiTag(slot); | 5268   __ SmiTag(slot); | 
| 5358   __ Push(slot); | 5269   __ Push(slot); | 
| 5359   __ TailCallRuntime(Runtime::kLoadGlobalViaContext); | 5270   __ TailCallRuntime(Runtime::kLoadGlobalViaContext); | 
| 5360 } | 5271 } | 
| 5361 | 5272 | 
|  | 5273 void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) { | 
|  | 5274   Register value = r2; | 
|  | 5275   Register slot = r4; | 
| 5362 | 5276 | 
| 5363 void StoreGlobalViaContextStub::Generate(MacroAssembler* masm) { | 5277   Register cell = r3; | 
| 5364   Register value = r3; | 5278   Register cell_details = r5; | 
| 5365   Register slot = r5; | 5279   Register cell_value = r6; | 
| 5366 | 5280   Register cell_value_map = r7; | 
| 5367   Register cell = r4; | 5281   Register scratch = r8; | 
| 5368   Register cell_details = r6; |  | 
| 5369   Register cell_value = r7; |  | 
| 5370   Register cell_value_map = r8; |  | 
| 5371   Register scratch = r9; |  | 
| 5372 | 5282 | 
| 5373   Register context = cp; | 5283   Register context = cp; | 
| 5374   Register context_temp = cell; | 5284   Register context_temp = cell; | 
| 5375 | 5285 | 
| 5376   Label fast_heapobject_case, fast_smi_case, slow_case; | 5286   Label fast_heapobject_case, fast_smi_case, slow_case; | 
| 5377 | 5287 | 
| 5378   if (FLAG_debug_code) { | 5288   if (FLAG_debug_code) { | 
| 5379     __ CompareRoot(value, Heap::kTheHoleValueRootIndex); | 5289     __ CompareRoot(value, Heap::kTheHoleValueRootIndex); | 
| 5380     __ Check(ne, kUnexpectedValue); | 5290     __ Check(ne, kUnexpectedValue); | 
| 5381   } | 5291   } | 
| 5382 | 5292 | 
| 5383   // Go up the context chain to the script context. | 5293   // Go up the context chain to the script context. | 
| 5384   for (int i = 0; i < depth(); i++) { | 5294   for (int i = 0; i < depth(); i++) { | 
| 5385     __ LoadP(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX)); | 5295     __ LoadP(context_temp, ContextMemOperand(context, Context::PREVIOUS_INDEX)); | 
| 5386     context = context_temp; | 5296     context = context_temp; | 
| 5387   } | 5297   } | 
| 5388 | 5298 | 
| 5389   // Load the PropertyCell at the specified slot. | 5299   // Load the PropertyCell at the specified slot. | 
| 5390   __ ShiftLeftImm(r0, slot, Operand(kPointerSizeLog2)); | 5300   __ ShiftLeftP(r0, slot, Operand(kPointerSizeLog2)); | 
| 5391   __ add(cell, context, r0); | 5301   __ AddP(cell, context, r0); | 
| 5392   __ LoadP(cell, ContextMemOperand(cell)); | 5302   __ LoadP(cell, ContextMemOperand(cell)); | 
| 5393 | 5303 | 
| 5394   // Load PropertyDetails for the cell (actually only the cell_type and kind). | 5304   // Load PropertyDetails for the cell (actually only the cell_type and kind). | 
| 5395   __ LoadP(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset)); | 5305   __ LoadP(cell_details, FieldMemOperand(cell, PropertyCell::kDetailsOffset)); | 
| 5396   __ SmiUntag(cell_details); | 5306   __ SmiUntag(cell_details); | 
| 5397   __ andi(cell_details, cell_details, | 5307   __ AndP(cell_details, cell_details, | 
| 5398           Operand(PropertyDetails::PropertyCellTypeField::kMask | | 5308           Operand(PropertyDetails::PropertyCellTypeField::kMask | | 
| 5399                   PropertyDetails::KindField::kMask | | 5309                   PropertyDetails::KindField::kMask | | 
| 5400                   PropertyDetails::kAttributesReadOnlyMask)); | 5310                   PropertyDetails::kAttributesReadOnlyMask)); | 
| 5401 | 5311 | 
| 5402   // Check if PropertyCell holds mutable data. | 5312   // Check if PropertyCell holds mutable data. | 
| 5403   Label not_mutable_data; | 5313   Label not_mutable_data; | 
| 5404   __ cmpi(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode( | 5314   __ CmpP(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode( | 
| 5405                                     PropertyCellType::kMutable) | | 5315                                     PropertyCellType::kMutable) | | 
| 5406                                 PropertyDetails::KindField::encode(kData))); | 5316                                 PropertyDetails::KindField::encode(kData))); | 
| 5407   __ bne(¬_mutable_data); | 5317   __ bne(¬_mutable_data); | 
| 5408   __ JumpIfSmi(value, &fast_smi_case); | 5318   __ JumpIfSmi(value, &fast_smi_case); | 
| 5409 | 5319 | 
| 5410   __ bind(&fast_heapobject_case); | 5320   __ bind(&fast_heapobject_case); | 
| 5411   __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0); | 5321   __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0); | 
| 5412   // RecordWriteField clobbers the value register, so we copy it before the | 5322   // RecordWriteField clobbers the value register, so we copy it before the | 
| 5413   // call. | 5323   // call. | 
| 5414   __ mr(r6, value); | 5324   __ LoadRR(r5, value); | 
| 5415   __ RecordWriteField(cell, PropertyCell::kValueOffset, r6, scratch, | 5325   __ RecordWriteField(cell, PropertyCell::kValueOffset, r5, scratch, | 
| 5416                       kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, | 5326                       kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET, | 
| 5417                       OMIT_SMI_CHECK); | 5327                       OMIT_SMI_CHECK); | 
| 5418   __ Ret(); | 5328   __ Ret(); | 
| 5419 | 5329 | 
| 5420   __ bind(¬_mutable_data); | 5330   __ bind(¬_mutable_data); | 
| 5421   // Check if PropertyCell value matches the new value (relevant for Constant, | 5331   // Check if PropertyCell value matches the new value (relevant for Constant, | 
| 5422   // ConstantType and Undefined cells). | 5332   // ConstantType and Undefined cells). | 
| 5423   Label not_same_value; | 5333   Label not_same_value; | 
| 5424   __ LoadP(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset)); | 5334   __ LoadP(cell_value, FieldMemOperand(cell, PropertyCell::kValueOffset)); | 
| 5425   __ cmp(cell_value, value); | 5335   __ CmpP(cell_value, value); | 
| 5426   __ bne(¬_same_value); | 5336   __ bne(¬_same_value); | 
| 5427 | 5337 | 
| 5428   // Make sure the PropertyCell is not marked READ_ONLY. | 5338   // Make sure the PropertyCell is not marked READ_ONLY. | 
| 5429   __ andi(r0, cell_details, Operand(PropertyDetails::kAttributesReadOnlyMask)); | 5339   __ AndP(r0, cell_details, Operand(PropertyDetails::kAttributesReadOnlyMask)); | 
| 5430   __ bne(&slow_case, cr0); | 5340   __ bne(&slow_case); | 
| 5431 | 5341 | 
| 5432   if (FLAG_debug_code) { | 5342   if (FLAG_debug_code) { | 
| 5433     Label done; | 5343     Label done; | 
| 5434     // This can only be true for Constant, ConstantType and Undefined cells, | 5344     // This can only be true for Constant, ConstantType and Undefined cells, | 
| 5435     // because we never store the_hole via this stub. | 5345     // because we never store the_hole via this stub. | 
| 5436     __ cmpi(cell_details, | 5346     __ CmpP(cell_details, | 
| 5437             Operand(PropertyDetails::PropertyCellTypeField::encode( | 5347             Operand(PropertyDetails::PropertyCellTypeField::encode( | 
| 5438                         PropertyCellType::kConstant) | | 5348                         PropertyCellType::kConstant) | | 
| 5439                     PropertyDetails::KindField::encode(kData))); | 5349                     PropertyDetails::KindField::encode(kData))); | 
| 5440     __ beq(&done); | 5350     __ beq(&done); | 
| 5441     __ cmpi(cell_details, | 5351     __ CmpP(cell_details, | 
| 5442             Operand(PropertyDetails::PropertyCellTypeField::encode( | 5352             Operand(PropertyDetails::PropertyCellTypeField::encode( | 
| 5443                         PropertyCellType::kConstantType) | | 5353                         PropertyCellType::kConstantType) | | 
| 5444                     PropertyDetails::KindField::encode(kData))); | 5354                     PropertyDetails::KindField::encode(kData))); | 
| 5445     __ beq(&done); | 5355     __ beq(&done); | 
| 5446     __ cmpi(cell_details, | 5356     __ CmpP(cell_details, | 
| 5447             Operand(PropertyDetails::PropertyCellTypeField::encode( | 5357             Operand(PropertyDetails::PropertyCellTypeField::encode( | 
| 5448                         PropertyCellType::kUndefined) | | 5358                         PropertyCellType::kUndefined) | | 
| 5449                     PropertyDetails::KindField::encode(kData))); | 5359                     PropertyDetails::KindField::encode(kData))); | 
| 5450     __ Check(eq, kUnexpectedValue); | 5360     __ Check(eq, kUnexpectedValue); | 
| 5451     __ bind(&done); | 5361     __ bind(&done); | 
| 5452   } | 5362   } | 
| 5453   __ Ret(); | 5363   __ Ret(); | 
| 5454   __ bind(¬_same_value); | 5364   __ bind(¬_same_value); | 
| 5455 | 5365 | 
| 5456   // Check if PropertyCell contains data with constant type (and is not | 5366   // Check if PropertyCell contains data with constant type (and is not | 
| 5457   // READ_ONLY). | 5367   // READ_ONLY). | 
| 5458   __ cmpi(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode( | 5368   __ CmpP(cell_details, Operand(PropertyDetails::PropertyCellTypeField::encode( | 
| 5459                                     PropertyCellType::kConstantType) | | 5369                                     PropertyCellType::kConstantType) | | 
| 5460                                 PropertyDetails::KindField::encode(kData))); | 5370                                 PropertyDetails::KindField::encode(kData))); | 
| 5461   __ bne(&slow_case); | 5371   __ bne(&slow_case); | 
| 5462 | 5372 | 
| 5463   // Now either both old and new values must be smis or both must be heap | 5373   // Now either both old and new values must be smis or both must be heap | 
| 5464   // objects with same map. | 5374   // objects with same map. | 
| 5465   Label value_is_heap_object; | 5375   Label value_is_heap_object; | 
| 5466   __ JumpIfNotSmi(value, &value_is_heap_object); | 5376   __ JumpIfNotSmi(value, &value_is_heap_object); | 
| 5467   __ JumpIfNotSmi(cell_value, &slow_case); | 5377   __ JumpIfNotSmi(cell_value, &slow_case); | 
| 5468   // Old and new values are smis, no need for a write barrier here. | 5378   // Old and new values are smis, no need for a write barrier here. | 
| 5469   __ bind(&fast_smi_case); | 5379   __ bind(&fast_smi_case); | 
| 5470   __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0); | 5380   __ StoreP(value, FieldMemOperand(cell, PropertyCell::kValueOffset), r0); | 
| 5471   __ Ret(); | 5381   __ Ret(); | 
| 5472 | 5382 | 
| 5473   __ bind(&value_is_heap_object); | 5383   __ bind(&value_is_heap_object); | 
| 5474   __ JumpIfSmi(cell_value, &slow_case); | 5384   __ JumpIfSmi(cell_value, &slow_case); | 
| 5475 | 5385 | 
| 5476   __ LoadP(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset)); | 5386   __ LoadP(cell_value_map, FieldMemOperand(cell_value, HeapObject::kMapOffset)); | 
| 5477   __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); | 5387   __ LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); | 
| 5478   __ cmp(cell_value_map, scratch); | 5388   __ CmpP(cell_value_map, scratch); | 
| 5479   __ beq(&fast_heapobject_case); | 5389   __ beq(&fast_heapobject_case); | 
| 5480 | 5390 | 
| 5481   // Fallback to runtime. | 5391   // Fallback to runtime. | 
| 5482   __ bind(&slow_case); | 5392   __ bind(&slow_case); | 
| 5483   __ SmiTag(slot); | 5393   __ SmiTag(slot); | 
| 5484   __ Push(slot, value); | 5394   __ Push(slot, value); | 
| 5485   __ TailCallRuntime(is_strict(language_mode()) | 5395   __ TailCallRuntime(is_strict(language_mode()) | 
| 5486                          ? Runtime::kStoreGlobalViaContext_Strict | 5396                          ? Runtime::kStoreGlobalViaContext_Strict | 
| 5487                          : Runtime::kStoreGlobalViaContext_Sloppy); | 5397                          : Runtime::kStoreGlobalViaContext_Sloppy); | 
| 5488 } | 5398 } | 
| 5489 | 5399 | 
| 5490 |  | 
| 5491 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { | 5400 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { | 
| 5492   return ref0.address() - ref1.address(); | 5401   return ref0.address() - ref1.address(); | 
| 5493 } | 5402 } | 
| 5494 | 5403 | 
| 5495 |  | 
| 5496 // Calls an API function.  Allocates HandleScope, extracts returned value | 5404 // Calls an API function.  Allocates HandleScope, extracts returned value | 
| 5497 // from handle and propagates exceptions.  Restores context.  stack_space | 5405 // from handle and propagates exceptions.  Restores context.  stack_space | 
| 5498 // - space to be unwound on exit (includes the call JS arguments space and | 5406 // - space to be unwound on exit (includes the call JS arguments space and | 
| 5499 // the additional space allocated for the fast call). | 5407 // the additional space allocated for the fast call). | 
| 5500 static void CallApiFunctionAndReturn(MacroAssembler* masm, | 5408 static void CallApiFunctionAndReturn(MacroAssembler* masm, | 
| 5501                                      Register function_address, | 5409                                      Register function_address, | 
| 5502                                      ExternalReference thunk_ref, | 5410                                      ExternalReference thunk_ref, | 
| 5503                                      int stack_space, | 5411                                      int stack_space, | 
| 5504                                      MemOperand* stack_space_operand, | 5412                                      MemOperand* stack_space_operand, | 
| 5505                                      MemOperand return_value_operand, | 5413                                      MemOperand return_value_operand, | 
| 5506                                      MemOperand* context_restore_operand) { | 5414                                      MemOperand* context_restore_operand) { | 
| 5507   Isolate* isolate = masm->isolate(); | 5415   Isolate* isolate = masm->isolate(); | 
| 5508   ExternalReference next_address = | 5416   ExternalReference next_address = | 
| 5509       ExternalReference::handle_scope_next_address(isolate); | 5417       ExternalReference::handle_scope_next_address(isolate); | 
| 5510   const int kNextOffset = 0; | 5418   const int kNextOffset = 0; | 
| 5511   const int kLimitOffset = AddressOffset( | 5419   const int kLimitOffset = AddressOffset( | 
| 5512       ExternalReference::handle_scope_limit_address(isolate), next_address); | 5420       ExternalReference::handle_scope_limit_address(isolate), next_address); | 
| 5513   const int kLevelOffset = AddressOffset( | 5421   const int kLevelOffset = AddressOffset( | 
| 5514       ExternalReference::handle_scope_level_address(isolate), next_address); | 5422       ExternalReference::handle_scope_level_address(isolate), next_address); | 
| 5515 | 5423 | 
| 5516   // Additional parameter is the address of the actual callback. | 5424   // Additional parameter is the address of the actual callback. | 
| 5517   DCHECK(function_address.is(r4) || function_address.is(r5)); | 5425   DCHECK(function_address.is(r3) || function_address.is(r4)); | 
| 5518   Register scratch = r6; | 5426   Register scratch = r5; | 
| 5519 | 5427 | 
| 5520   __ mov(scratch, Operand(ExternalReference::is_profiling_address(isolate))); | 5428   __ mov(scratch, Operand(ExternalReference::is_profiling_address(isolate))); | 
| 5521   __ lbz(scratch, MemOperand(scratch, 0)); | 5429   __ LoadlB(scratch, MemOperand(scratch, 0)); | 
| 5522   __ cmpi(scratch, Operand::Zero()); | 5430   __ CmpP(scratch, Operand::Zero()); | 
| 5523 | 5431 | 
| 5524   if (CpuFeatures::IsSupported(ISELECT)) { | 5432   Label profiler_disabled; | 
| 5525     __ mov(scratch, Operand(thunk_ref)); | 5433   Label end_profiler_check; | 
| 5526     __ isel(eq, scratch, function_address, scratch); | 5434   __ beq(&profiler_disabled, Label::kNear); | 
| 5527   } else { | 5435   __ mov(scratch, Operand(thunk_ref)); | 
| 5528     Label profiler_disabled; | 5436   __ b(&end_profiler_check, Label::kNear); | 
| 5529     Label end_profiler_check; | 5437   __ bind(&profiler_disabled); | 
| 5530     __ beq(&profiler_disabled); | 5438   __ LoadRR(scratch, function_address); | 
| 5531     __ mov(scratch, Operand(thunk_ref)); | 5439   __ bind(&end_profiler_check); | 
| 5532     __ b(&end_profiler_check); |  | 
| 5533     __ bind(&profiler_disabled); |  | 
| 5534     __ mr(scratch, function_address); |  | 
| 5535     __ bind(&end_profiler_check); |  | 
| 5536   } |  | 
| 5537 | 5440 | 
| 5538   // Allocate HandleScope in callee-save registers. | 5441   // Allocate HandleScope in callee-save registers. | 
| 5539   // r17 - next_address | 5442   // r9 - next_address | 
| 5540   // r14 - next_address->kNextOffset | 5443   // r6 - next_address->kNextOffset | 
| 5541   // r15 - next_address->kLimitOffset | 5444   // r7 - next_address->kLimitOffset | 
| 5542   // r16 - next_address->kLevelOffset | 5445   // r8 - next_address->kLevelOffset | 
| 5543   __ mov(r17, Operand(next_address)); | 5446   __ mov(r9, Operand(next_address)); | 
| 5544   __ LoadP(r14, MemOperand(r17, kNextOffset)); | 5447   __ LoadP(r6, MemOperand(r9, kNextOffset)); | 
| 5545   __ LoadP(r15, MemOperand(r17, kLimitOffset)); | 5448   __ LoadP(r7, MemOperand(r9, kLimitOffset)); | 
| 5546   __ lwz(r16, MemOperand(r17, kLevelOffset)); | 5449   __ LoadlW(r8, MemOperand(r9, kLevelOffset)); | 
| 5547   __ addi(r16, r16, Operand(1)); | 5450   __ AddP(r8, Operand(1)); | 
| 5548   __ stw(r16, MemOperand(r17, kLevelOffset)); | 5451   __ StoreW(r8, MemOperand(r9, kLevelOffset)); | 
| 5549 | 5452 | 
| 5550   if (FLAG_log_timer_events) { | 5453   if (FLAG_log_timer_events) { | 
| 5551     FrameScope frame(masm, StackFrame::MANUAL); | 5454     FrameScope frame(masm, StackFrame::MANUAL); | 
| 5552     __ PushSafepointRegisters(); | 5455     __ PushSafepointRegisters(); | 
| 5553     __ PrepareCallCFunction(1, r3); | 5456     __ PrepareCallCFunction(1, r2); | 
| 5554     __ mov(r3, Operand(ExternalReference::isolate_address(isolate))); | 5457     __ mov(r2, Operand(ExternalReference::isolate_address(isolate))); | 
| 5555     __ CallCFunction(ExternalReference::log_enter_external_function(isolate), | 5458     __ CallCFunction(ExternalReference::log_enter_external_function(isolate), | 
| 5556                      1); | 5459                      1); | 
| 5557     __ PopSafepointRegisters(); | 5460     __ PopSafepointRegisters(); | 
| 5558   } | 5461   } | 
| 5559 | 5462 | 
| 5560   // Native call returns to the DirectCEntry stub which redirects to the | 5463   // Native call returns to the DirectCEntry stub which redirects to the | 
| 5561   // return address pushed on stack (could have moved after GC). | 5464   // return address pushed on stack (could have moved after GC). | 
| 5562   // DirectCEntry stub itself is generated early and never moves. | 5465   // DirectCEntry stub itself is generated early and never moves. | 
| 5563   DirectCEntryStub stub(isolate); | 5466   DirectCEntryStub stub(isolate); | 
| 5564   stub.GenerateCall(masm, scratch); | 5467   stub.GenerateCall(masm, scratch); | 
| 5565 | 5468 | 
| 5566   if (FLAG_log_timer_events) { | 5469   if (FLAG_log_timer_events) { | 
| 5567     FrameScope frame(masm, StackFrame::MANUAL); | 5470     FrameScope frame(masm, StackFrame::MANUAL); | 
| 5568     __ PushSafepointRegisters(); | 5471     __ PushSafepointRegisters(); | 
| 5569     __ PrepareCallCFunction(1, r3); | 5472     __ PrepareCallCFunction(1, r2); | 
| 5570     __ mov(r3, Operand(ExternalReference::isolate_address(isolate))); | 5473     __ mov(r2, Operand(ExternalReference::isolate_address(isolate))); | 
| 5571     __ CallCFunction(ExternalReference::log_leave_external_function(isolate), | 5474     __ CallCFunction(ExternalReference::log_leave_external_function(isolate), | 
| 5572                      1); | 5475                      1); | 
| 5573     __ PopSafepointRegisters(); | 5476     __ PopSafepointRegisters(); | 
| 5574   } | 5477   } | 
| 5575 | 5478 | 
| 5576   Label promote_scheduled_exception; | 5479   Label promote_scheduled_exception; | 
| 5577   Label delete_allocated_handles; | 5480   Label delete_allocated_handles; | 
| 5578   Label leave_exit_frame; | 5481   Label leave_exit_frame; | 
| 5579   Label return_value_loaded; | 5482   Label return_value_loaded; | 
| 5580 | 5483 | 
| 5581   // load value from ReturnValue | 5484   // load value from ReturnValue | 
| 5582   __ LoadP(r3, return_value_operand); | 5485   __ LoadP(r2, return_value_operand); | 
| 5583   __ bind(&return_value_loaded); | 5486   __ bind(&return_value_loaded); | 
| 5584   // No more valid handles (the result handle was the last one). Restore | 5487   // No more valid handles (the result handle was the last one). Restore | 
| 5585   // previous handle scope. | 5488   // previous handle scope. | 
| 5586   __ StoreP(r14, MemOperand(r17, kNextOffset)); | 5489   __ StoreP(r6, MemOperand(r9, kNextOffset)); | 
| 5587   if (__ emit_debug_code()) { | 5490   if (__ emit_debug_code()) { | 
| 5588     __ lwz(r4, MemOperand(r17, kLevelOffset)); | 5491     __ LoadlW(r3, MemOperand(r9, kLevelOffset)); | 
| 5589     __ cmp(r4, r16); | 5492     __ CmpP(r3, r8); | 
| 5590     __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall); | 5493     __ Check(eq, kUnexpectedLevelAfterReturnFromApiCall); | 
| 5591   } | 5494   } | 
| 5592   __ subi(r16, r16, Operand(1)); | 5495   __ SubP(r8, Operand(1)); | 
| 5593   __ stw(r16, MemOperand(r17, kLevelOffset)); | 5496   __ StoreW(r8, MemOperand(r9, kLevelOffset)); | 
| 5594   __ LoadP(r0, MemOperand(r17, kLimitOffset)); | 5497   __ CmpP(r7, MemOperand(r9, kLimitOffset)); | 
| 5595   __ cmp(r15, r0); | 5498   __ bne(&delete_allocated_handles, Label::kNear); | 
| 5596   __ bne(&delete_allocated_handles); |  | 
| 5597 | 5499 | 
| 5598   // Leave the API exit frame. | 5500   // Leave the API exit frame. | 
| 5599   __ bind(&leave_exit_frame); | 5501   __ bind(&leave_exit_frame); | 
| 5600   bool restore_context = context_restore_operand != NULL; | 5502   bool restore_context = context_restore_operand != NULL; | 
| 5601   if (restore_context) { | 5503   if (restore_context) { | 
| 5602     __ LoadP(cp, *context_restore_operand); | 5504     __ LoadP(cp, *context_restore_operand); | 
| 5603   } | 5505   } | 
| 5604   // LeaveExitFrame expects unwind space to be in a register. | 5506   // LeaveExitFrame expects unwind space to be in a register. | 
| 5605   if (stack_space_operand != NULL) { | 5507   if (stack_space_operand != NULL) { | 
| 5606     __ lwz(r14, *stack_space_operand); | 5508     __ l(r6, *stack_space_operand); | 
| 5607   } else { | 5509   } else { | 
| 5608     __ mov(r14, Operand(stack_space)); | 5510     __ mov(r6, Operand(stack_space)); | 
| 5609   } | 5511   } | 
| 5610   __ LeaveExitFrame(false, r14, !restore_context, stack_space_operand != NULL); | 5512   __ LeaveExitFrame(false, r6, !restore_context, stack_space_operand != NULL); | 
| 5611 | 5513 | 
| 5612   // Check if the function scheduled an exception. | 5514   // Check if the function scheduled an exception. | 
| 5613   __ LoadRoot(r14, Heap::kTheHoleValueRootIndex); | 5515   __ mov(r7, Operand(ExternalReference::scheduled_exception_address(isolate))); | 
| 5614   __ mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate))); | 5516   __ LoadP(r7, MemOperand(r7)); | 
| 5615   __ LoadP(r15, MemOperand(r15)); | 5517   __ CompareRoot(r7, Heap::kTheHoleValueRootIndex); | 
| 5616   __ cmp(r14, r15); | 5518   __ bne(&promote_scheduled_exception, Label::kNear); | 
| 5617   __ bne(&promote_scheduled_exception); |  | 
| 5618 | 5519 | 
| 5619   __ blr(); | 5520   __ b(r14); | 
| 5620 | 5521 | 
| 5621   // Re-throw by promoting a scheduled exception. | 5522   // Re-throw by promoting a scheduled exception. | 
| 5622   __ bind(&promote_scheduled_exception); | 5523   __ bind(&promote_scheduled_exception); | 
| 5623   __ TailCallRuntime(Runtime::kPromoteScheduledException); | 5524   __ TailCallRuntime(Runtime::kPromoteScheduledException); | 
| 5624 | 5525 | 
| 5625   // HandleScope limit has changed. Delete allocated extensions. | 5526   // HandleScope limit has changed. Delete allocated extensions. | 
| 5626   __ bind(&delete_allocated_handles); | 5527   __ bind(&delete_allocated_handles); | 
| 5627   __ StoreP(r15, MemOperand(r17, kLimitOffset)); | 5528   __ StoreP(r7, MemOperand(r9, kLimitOffset)); | 
| 5628   __ mr(r14, r3); | 5529   __ LoadRR(r6, r2); | 
| 5629   __ PrepareCallCFunction(1, r15); | 5530   __ PrepareCallCFunction(1, r7); | 
| 5630   __ mov(r3, Operand(ExternalReference::isolate_address(isolate))); | 5531   __ mov(r2, Operand(ExternalReference::isolate_address(isolate))); | 
| 5631   __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate), | 5532   __ CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate), | 
| 5632                    1); | 5533                    1); | 
| 5633   __ mr(r3, r14); | 5534   __ LoadRR(r2, r6); | 
| 5634   __ b(&leave_exit_frame); | 5535   __ b(&leave_exit_frame, Label::kNear); | 
| 5635 } | 5536 } | 
| 5636 | 5537 | 
| 5637 static void CallApiFunctionStubHelper(MacroAssembler* masm, | 5538 static void CallApiFunctionStubHelper(MacroAssembler* masm, | 
| 5638                                       const ParameterCount& argc, | 5539                                       const ParameterCount& argc, | 
| 5639                                       bool return_first_arg, | 5540                                       bool return_first_arg, | 
| 5640                                       bool call_data_undefined, bool is_lazy) { | 5541                                       bool call_data_undefined, bool is_lazy) { | 
| 5641   // ----------- S t a t e ------------- | 5542   // ----------- S t a t e ------------- | 
| 5642   //  -- r3                  : callee | 5543   //  -- r2                  : callee | 
| 5643   //  -- r7                  : call_data | 5544   //  -- r6                  : call_data | 
| 5644   //  -- r5                  : holder | 5545   //  -- r4                  : holder | 
| 5645   //  -- r4                  : api_function_address | 5546   //  -- r3                  : api_function_address | 
| 5646   //  -- r6                  : number of arguments if argc is a register | 5547   //  -- r5                  : number of arguments if argc is a register | 
| 5647   //  -- cp                  : context | 5548   //  -- cp                  : context | 
| 5648   //  -- | 5549   //  -- | 
| 5649   //  -- sp[0]               : last argument | 5550   //  -- sp[0]               : last argument | 
| 5650   //  -- ... | 5551   //  -- ... | 
| 5651   //  -- sp[(argc - 1)* 4]   : first argument | 5552   //  -- sp[(argc - 1)* 4]   : first argument | 
| 5652   //  -- sp[argc * 4]        : receiver | 5553   //  -- sp[argc * 4]        : receiver | 
| 5653   // ----------------------------------- | 5554   // ----------------------------------- | 
| 5654 | 5555 | 
| 5655   Register callee = r3; | 5556   Register callee = r2; | 
| 5656   Register call_data = r7; | 5557   Register call_data = r6; | 
| 5657   Register holder = r5; | 5558   Register holder = r4; | 
| 5658   Register api_function_address = r4; | 5559   Register api_function_address = r3; | 
| 5659   Register context = cp; | 5560   Register context = cp; | 
| 5660 | 5561 | 
| 5661   typedef FunctionCallbackArguments FCA; | 5562   typedef FunctionCallbackArguments FCA; | 
| 5662 | 5563 | 
| 5663   STATIC_ASSERT(FCA::kContextSaveIndex == 6); | 5564   STATIC_ASSERT(FCA::kContextSaveIndex == 6); | 
| 5664   STATIC_ASSERT(FCA::kCalleeIndex == 5); | 5565   STATIC_ASSERT(FCA::kCalleeIndex == 5); | 
| 5665   STATIC_ASSERT(FCA::kDataIndex == 4); | 5566   STATIC_ASSERT(FCA::kDataIndex == 4); | 
| 5666   STATIC_ASSERT(FCA::kReturnValueOffset == 3); | 5567   STATIC_ASSERT(FCA::kReturnValueOffset == 3); | 
| 5667   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2); | 5568   STATIC_ASSERT(FCA::kReturnValueDefaultValueIndex == 2); | 
| 5668   STATIC_ASSERT(FCA::kIsolateIndex == 1); | 5569   STATIC_ASSERT(FCA::kIsolateIndex == 1); | 
| 5669   STATIC_ASSERT(FCA::kHolderIndex == 0); | 5570   STATIC_ASSERT(FCA::kHolderIndex == 0); | 
| 5670   STATIC_ASSERT(FCA::kArgsLength == 7); | 5571   STATIC_ASSERT(FCA::kArgsLength == 7); | 
| 5671 | 5572 | 
| 5672   DCHECK(argc.is_immediate() || r3.is(argc.reg())); | 5573   DCHECK(argc.is_immediate() || r2.is(argc.reg())); | 
| 5673 | 5574 | 
| 5674   // context save | 5575   // context save | 
| 5675   __ push(context); | 5576   __ push(context); | 
| 5676   if (!is_lazy) { | 5577   if (!is_lazy) { | 
| 5677     // load context from callee | 5578     // load context from callee | 
| 5678     __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset)); | 5579     __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset)); | 
| 5679   } | 5580   } | 
| 5680 | 5581 | 
| 5681   // callee | 5582   // callee | 
| 5682   __ push(callee); | 5583   __ push(callee); | 
| 5683 | 5584 | 
| 5684   // call data | 5585   // call data | 
| 5685   __ push(call_data); | 5586   __ push(call_data); | 
| 5686 | 5587 | 
| 5687   Register scratch = call_data; | 5588   Register scratch = call_data; | 
| 5688   if (!call_data_undefined) { | 5589   if (!call_data_undefined) { | 
| 5689     __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | 5590     __ LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | 
| 5690   } | 5591   } | 
| 5691   // return value | 5592   // return value | 
| 5692   __ push(scratch); | 5593   __ push(scratch); | 
| 5693   // return value default | 5594   // return value default | 
| 5694   __ push(scratch); | 5595   __ push(scratch); | 
| 5695   // isolate | 5596   // isolate | 
| 5696   __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate()))); | 5597   __ mov(scratch, Operand(ExternalReference::isolate_address(masm->isolate()))); | 
| 5697   __ push(scratch); | 5598   __ push(scratch); | 
| 5698   // holder | 5599   // holder | 
| 5699   __ push(holder); | 5600   __ push(holder); | 
| 5700 | 5601 | 
| 5701   // Prepare arguments. | 5602   // Prepare arguments. | 
| 5702   __ mr(scratch, sp); | 5603   __ LoadRR(scratch, sp); | 
| 5703 | 5604 | 
| 5704   // Allocate the v8::Arguments structure in the arguments' space since | 5605   // Allocate the v8::Arguments structure in the arguments' space since | 
| 5705   // it's not controlled by GC. | 5606   // it's not controlled by GC. | 
| 5706   // PPC LINUX ABI: | 5607   // S390 LINUX ABI: | 
| 5707   // | 5608   // | 
| 5708   // Create 5 extra slots on stack: | 5609   // Create 5 extra slots on stack: | 
| 5709   //    [0] space for DirectCEntryStub's LR save | 5610   //    [0] space for DirectCEntryStub's LR save | 
| 5710   //    [1-4] FunctionCallbackInfo | 5611   //    [1-4] FunctionCallbackInfo | 
| 5711   const int kApiStackSpace = 5; | 5612   const int kApiStackSpace = 5; | 
| 5712   const int kFunctionCallbackInfoOffset = | 5613   const int kFunctionCallbackInfoOffset = | 
| 5713       (kStackFrameExtraParamSlot + 1) * kPointerSize; | 5614       (kStackFrameExtraParamSlot + 1) * kPointerSize; | 
| 5714 | 5615 | 
| 5715   FrameScope frame_scope(masm, StackFrame::MANUAL); | 5616   FrameScope frame_scope(masm, StackFrame::MANUAL); | 
| 5716   __ EnterExitFrame(false, kApiStackSpace); | 5617   __ EnterExitFrame(false, kApiStackSpace); | 
| 5717 | 5618 | 
| 5718   DCHECK(!api_function_address.is(r3) && !scratch.is(r3)); | 5619   DCHECK(!api_function_address.is(r2) && !scratch.is(r2)); | 
| 5719   // r3 = FunctionCallbackInfo& | 5620   // r2 = FunctionCallbackInfo& | 
| 5720   // Arguments is after the return address. | 5621   // Arguments is after the return address. | 
| 5721   __ addi(r3, sp, Operand(kFunctionCallbackInfoOffset)); | 5622   __ AddP(r2, sp, Operand(kFunctionCallbackInfoOffset)); | 
| 5722   // FunctionCallbackInfo::implicit_args_ | 5623   // FunctionCallbackInfo::implicit_args_ | 
| 5723   __ StoreP(scratch, MemOperand(r3, 0 * kPointerSize)); | 5624   __ StoreP(scratch, MemOperand(r2, 0 * kPointerSize)); | 
| 5724   if (argc.is_immediate()) { | 5625   if (argc.is_immediate()) { | 
| 5725     // FunctionCallbackInfo::values_ | 5626     // FunctionCallbackInfo::values_ | 
| 5726     __ addi(ip, scratch, | 5627     __ AddP(ip, scratch, | 
| 5727             Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize)); | 5628             Operand((FCA::kArgsLength - 1 + argc.immediate()) * kPointerSize)); | 
| 5728     __ StoreP(ip, MemOperand(r3, 1 * kPointerSize)); | 5629     __ StoreP(ip, MemOperand(r2, 1 * kPointerSize)); | 
| 5729     // FunctionCallbackInfo::length_ = argc | 5630     // FunctionCallbackInfo::length_ = argc | 
| 5730     __ li(ip, Operand(argc.immediate())); | 5631     __ LoadImmP(ip, Operand(argc.immediate())); | 
| 5731     __ stw(ip, MemOperand(r3, 2 * kPointerSize)); | 5632     __ StoreW(ip, MemOperand(r2, 2 * kPointerSize)); | 
| 5732     // FunctionCallbackInfo::is_construct_call_ = 0 | 5633     // FunctionCallbackInfo::is_construct_call_ = 0 | 
| 5733     __ li(ip, Operand::Zero()); | 5634     __ LoadImmP(ip, Operand::Zero()); | 
| 5734     __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize)); | 5635     __ StoreW(ip, MemOperand(r2, 2 * kPointerSize + kIntSize)); | 
| 5735   } else { | 5636   } else { | 
| 5736     __ ShiftLeftImm(ip, argc.reg(), Operand(kPointerSizeLog2)); | 5637     __ ShiftLeftP(ip, argc.reg(), Operand(kPointerSizeLog2)); | 
| 5737     __ addi(ip, ip, Operand((FCA::kArgsLength - 1) * kPointerSize)); | 5638     __ AddP(ip, ip, Operand((FCA::kArgsLength - 1) * kPointerSize)); | 
| 5738     // FunctionCallbackInfo::values_ | 5639     // FunctionCallbackInfo::values_ | 
| 5739     __ add(r0, scratch, ip); | 5640     __ AddP(r0, scratch, ip); | 
| 5740     __ StoreP(r0, MemOperand(r3, 1 * kPointerSize)); | 5641     __ StoreP(r0, MemOperand(r2, 1 * kPointerSize)); | 
| 5741     // FunctionCallbackInfo::length_ = argc | 5642     // FunctionCallbackInfo::length_ = argc | 
| 5742     __ stw(argc.reg(), MemOperand(r3, 2 * kPointerSize)); | 5643     __ StoreW(argc.reg(), MemOperand(r2, 2 * kPointerSize)); | 
| 5743     // FunctionCallbackInfo::is_construct_call_ | 5644     // FunctionCallbackInfo::is_construct_call_ | 
| 5744     __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize)); | 5645     __ StoreW(ip, MemOperand(r2, 2 * kPointerSize + kIntSize)); | 
| 5745   } | 5646   } | 
| 5746 | 5647 | 
| 5747   ExternalReference thunk_ref = | 5648   ExternalReference thunk_ref = | 
| 5748       ExternalReference::invoke_function_callback(masm->isolate()); | 5649       ExternalReference::invoke_function_callback(masm->isolate()); | 
| 5749 | 5650 | 
| 5750   AllowExternalCallThatCantCauseGC scope(masm); | 5651   AllowExternalCallThatCantCauseGC scope(masm); | 
| 5751   MemOperand context_restore_operand( | 5652   MemOperand context_restore_operand( | 
| 5752       fp, (2 + FCA::kContextSaveIndex) * kPointerSize); | 5653       fp, (2 + FCA::kContextSaveIndex) * kPointerSize); | 
| 5753   // Stores return the first js argument | 5654   // Stores return the first js argument | 
| 5754   int return_value_offset = 0; | 5655   int return_value_offset = 0; | 
| 5755   if (return_first_arg) { | 5656   if (return_first_arg) { | 
| 5756     return_value_offset = 2 + FCA::kArgsLength; | 5657     return_value_offset = 2 + FCA::kArgsLength; | 
| 5757   } else { | 5658   } else { | 
| 5758     return_value_offset = 2 + FCA::kReturnValueOffset; | 5659     return_value_offset = 2 + FCA::kReturnValueOffset; | 
| 5759   } | 5660   } | 
| 5760   MemOperand return_value_operand(fp, return_value_offset * kPointerSize); | 5661   MemOperand return_value_operand(fp, return_value_offset * kPointerSize); | 
| 5761   int stack_space = 0; | 5662   int stack_space = 0; | 
| 5762   MemOperand is_construct_call_operand = | 5663   MemOperand is_construct_call_operand = | 
| 5763       MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize + kIntSize); | 5664       MemOperand(sp, kFunctionCallbackInfoOffset + 2 * kPointerSize + kIntSize); | 
| 5764   MemOperand* stack_space_operand = &is_construct_call_operand; | 5665   MemOperand* stack_space_operand = &is_construct_call_operand; | 
| 5765   if (argc.is_immediate()) { | 5666   if (argc.is_immediate()) { | 
| 5766     stack_space = argc.immediate() + FCA::kArgsLength + 1; | 5667     stack_space = argc.immediate() + FCA::kArgsLength + 1; | 
| 5767     stack_space_operand = NULL; | 5668     stack_space_operand = NULL; | 
| 5768   } | 5669   } | 
| 5769   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space, | 5670   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, stack_space, | 
| 5770                            stack_space_operand, return_value_operand, | 5671                            stack_space_operand, return_value_operand, | 
| 5771                            &context_restore_operand); | 5672                            &context_restore_operand); | 
| 5772 } | 5673 } | 
| 5773 | 5674 | 
| 5774 |  | 
| 5775 void CallApiFunctionStub::Generate(MacroAssembler* masm) { | 5675 void CallApiFunctionStub::Generate(MacroAssembler* masm) { | 
| 5776   bool call_data_undefined = this->call_data_undefined(); | 5676   bool call_data_undefined = this->call_data_undefined(); | 
| 5777   CallApiFunctionStubHelper(masm, ParameterCount(r6), false, | 5677   CallApiFunctionStubHelper(masm, ParameterCount(r6), false, | 
| 5778                             call_data_undefined, false); | 5678                             call_data_undefined, false); | 
| 5779 } | 5679 } | 
| 5780 | 5680 | 
| 5781 |  | 
| 5782 void CallApiAccessorStub::Generate(MacroAssembler* masm) { | 5681 void CallApiAccessorStub::Generate(MacroAssembler* masm) { | 
| 5783   bool is_store = this->is_store(); | 5682   bool is_store = this->is_store(); | 
| 5784   int argc = this->argc(); | 5683   int argc = this->argc(); | 
| 5785   bool call_data_undefined = this->call_data_undefined(); | 5684   bool call_data_undefined = this->call_data_undefined(); | 
| 5786   bool is_lazy = this->is_lazy(); | 5685   bool is_lazy = this->is_lazy(); | 
| 5787   CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store, | 5686   CallApiFunctionStubHelper(masm, ParameterCount(argc), is_store, | 
| 5788                             call_data_undefined, is_lazy); | 5687                             call_data_undefined, is_lazy); | 
| 5789 } | 5688 } | 
| 5790 | 5689 | 
| 5791 |  | 
| 5792 void CallApiGetterStub::Generate(MacroAssembler* masm) { | 5690 void CallApiGetterStub::Generate(MacroAssembler* masm) { | 
| 5793   // ----------- S t a t e ------------- | 5691   // ----------- S t a t e ------------- | 
| 5794   //  -- sp[0]                        : name | 5692   //  -- sp[0]                        : name | 
| 5795   //  -- sp[4 .. (4 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_ | 5693   //  -- sp[4 .. (4 + kArgsLength*4)] : v8::PropertyCallbackInfo::args_ | 
| 5796   //  -- ... | 5694   //  -- ... | 
| 5797   //  -- r5                           : api_function_address | 5695   //  -- r4                           : api_function_address | 
| 5798   // ----------------------------------- | 5696   // ----------------------------------- | 
| 5799 | 5697 | 
| 5800   Register api_function_address = ApiGetterDescriptor::function_address(); | 5698   Register api_function_address = ApiGetterDescriptor::function_address(); | 
| 5801   int arg0Slot = 0; | 5699   int arg0Slot = 0; | 
| 5802   int accessorInfoSlot = 0; | 5700   int accessorInfoSlot = 0; | 
| 5803   int apiStackSpace = 0; | 5701   int apiStackSpace = 0; | 
| 5804   DCHECK(api_function_address.is(r5)); | 5702   DCHECK(api_function_address.is(r4)); | 
| 5805 | 5703 | 
| 5806   // v8::PropertyCallbackInfo::args_ array and name handle. | 5704   // v8::PropertyCallbackInfo::args_ array and name handle. | 
| 5807   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; | 5705   const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; | 
| 5808 | 5706 | 
| 5809   // Load address of v8::PropertyAccessorInfo::args_ array and name handle. | 5707   // Load address of v8::PropertyAccessorInfo::args_ array and name handle. | 
| 5810   __ mr(r3, sp);                               // r3 = Handle<Name> | 5708   __ LoadRR(r2, sp);                           // r2 = Handle<Name> | 
| 5811   __ addi(r4, r3, Operand(1 * kPointerSize));  // r4 = v8::PCI::args_ | 5709   __ AddP(r3, r2, Operand(1 * kPointerSize));  // r3 = v8::PCI::args_ | 
| 5812 | 5710 | 
| 5813 // If ABI passes Handles (pointer-sized struct) in a register: | 5711   // If ABI passes Handles (pointer-sized struct) in a register: | 
| 5814 // | 5712   // | 
| 5815 // Create 2 extra slots on stack: | 5713   // Create 2 extra slots on stack: | 
| 5816 //    [0] space for DirectCEntryStub's LR save | 5714   //    [0] space for DirectCEntryStub's LR save | 
| 5817 //    [1] AccessorInfo& | 5715   //    [1] AccessorInfo& | 
| 5818 // | 5716   // | 
| 5819 // Otherwise: | 5717   // Otherwise: | 
| 5820 // | 5718   // | 
| 5821 // Create 3 extra slots on stack: | 5719   // Create 3 extra slots on stack: | 
| 5822 //    [0] space for DirectCEntryStub's LR save | 5720   //    [0] space for DirectCEntryStub's LR save | 
| 5823 //    [1] copy of Handle (first arg) | 5721   //    [1] copy of Handle (first arg) | 
| 5824 //    [2] AccessorInfo& | 5722   //    [2] AccessorInfo& | 
| 5825   if (ABI_PASSES_HANDLES_IN_REGS) { | 5723   if (ABI_PASSES_HANDLES_IN_REGS) { | 
| 5826     accessorInfoSlot = kStackFrameExtraParamSlot + 1; | 5724     accessorInfoSlot = kStackFrameExtraParamSlot + 1; | 
| 5827     apiStackSpace = 2; | 5725     apiStackSpace = 2; | 
| 5828   } else { | 5726   } else { | 
| 5829     arg0Slot = kStackFrameExtraParamSlot + 1; | 5727     arg0Slot = kStackFrameExtraParamSlot + 1; | 
| 5830     accessorInfoSlot = arg0Slot + 1; | 5728     accessorInfoSlot = arg0Slot + 1; | 
| 5831     apiStackSpace = 3; | 5729     apiStackSpace = 3; | 
| 5832   } | 5730   } | 
| 5833 | 5731 | 
| 5834   FrameScope frame_scope(masm, StackFrame::MANUAL); | 5732   FrameScope frame_scope(masm, StackFrame::MANUAL); | 
| 5835   __ EnterExitFrame(false, apiStackSpace); | 5733   __ EnterExitFrame(false, apiStackSpace); | 
| 5836 | 5734 | 
| 5837   if (!ABI_PASSES_HANDLES_IN_REGS) { | 5735   if (!ABI_PASSES_HANDLES_IN_REGS) { | 
| 5838     // pass 1st arg by reference | 5736     // pass 1st arg by reference | 
| 5839     __ StoreP(r3, MemOperand(sp, arg0Slot * kPointerSize)); | 5737     __ StoreP(r2, MemOperand(sp, arg0Slot * kPointerSize)); | 
| 5840     __ addi(r3, sp, Operand(arg0Slot * kPointerSize)); | 5738     __ AddP(r2, sp, Operand(arg0Slot * kPointerSize)); | 
| 5841   } | 5739   } | 
| 5842 | 5740 | 
| 5843   // Create v8::PropertyCallbackInfo object on the stack and initialize | 5741   // Create v8::PropertyCallbackInfo object on the stack and initialize | 
| 5844   // it's args_ field. | 5742   // it's args_ field. | 
| 5845   __ StoreP(r4, MemOperand(sp, accessorInfoSlot * kPointerSize)); | 5743   __ StoreP(r3, MemOperand(sp, accessorInfoSlot * kPointerSize)); | 
| 5846   __ addi(r4, sp, Operand(accessorInfoSlot * kPointerSize)); | 5744   __ AddP(r3, sp, Operand(accessorInfoSlot * kPointerSize)); | 
| 5847   // r4 = v8::PropertyCallbackInfo& | 5745   // r3 = v8::PropertyCallbackInfo& | 
| 5848 | 5746 | 
| 5849   ExternalReference thunk_ref = | 5747   ExternalReference thunk_ref = | 
| 5850       ExternalReference::invoke_accessor_getter_callback(isolate()); | 5748       ExternalReference::invoke_accessor_getter_callback(isolate()); | 
| 5851 | 5749 | 
| 5852   // +3 is to skip prolog, return address and name handle. | 5750   // +3 is to skip prolog, return address and name handle. | 
| 5853   MemOperand return_value_operand( | 5751   MemOperand return_value_operand( | 
| 5854       fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize); | 5752       fp, (PropertyCallbackArguments::kReturnValueOffset + 3) * kPointerSize); | 
| 5855   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, | 5753   CallApiFunctionAndReturn(masm, api_function_address, thunk_ref, | 
| 5856                            kStackUnwindSpace, NULL, return_value_operand, NULL); | 5754                            kStackUnwindSpace, NULL, return_value_operand, NULL); | 
| 5857 } | 5755 } | 
| 5858 | 5756 | 
|  | 5757 #undef __ | 
| 5859 | 5758 | 
| 5860 #undef __ |  | 
| 5861 }  // namespace internal | 5759 }  // namespace internal | 
| 5862 }  // namespace v8 | 5760 }  // namespace v8 | 
| 5863 | 5761 | 
| 5864 #endif  // V8_TARGET_ARCH_PPC | 5762 #endif  // V8_TARGET_ARCH_S390 | 
| OLD | NEW | 
|---|