| Index: src/ppc/code-stubs-ppc.cc | 
| diff --git a/src/arm/code-stubs-arm.cc b/src/ppc/code-stubs-ppc.cc | 
| similarity index 59% | 
| copy from src/arm/code-stubs-arm.cc | 
| copy to src/ppc/code-stubs-ppc.cc | 
| index a728d58fbfb58490dbd371f4829df1223dc6ed7e..03bfa156542e2d6bbb2adca35cfd6f297cff455d 100644 | 
| --- a/src/arm/code-stubs-arm.cc | 
| +++ b/src/ppc/code-stubs-ppc.cc | 
| @@ -1,23 +1,28 @@ | 
| // Copyright 2012 the V8 project authors. All rights reserved. | 
| +// | 
| +// Copyright IBM Corp. 2012, 2013. All rights reserved. | 
| +// | 
| // Use of this source code is governed by a BSD-style license that can be | 
| // found in the LICENSE file. | 
|  | 
| #include "src/v8.h" | 
|  | 
| -#if V8_TARGET_ARCH_ARM | 
| +#if V8_TARGET_ARCH_PPC | 
|  | 
| #include "src/bootstrapper.h" | 
| #include "src/code-stubs.h" | 
| #include "src/regexp-macro-assembler.h" | 
| #include "src/stub-cache.h" | 
|  | 
| +#include "src/ppc/regexp-macro-assembler-ppc.h" | 
| + | 
| namespace v8 { | 
| namespace internal { | 
|  | 
|  | 
| void FastNewClosureStub::InitializeInterfaceDescriptor( | 
| CodeStubInterfaceDescriptor* descriptor) { | 
| -  Register registers[] = { cp, r2 }; | 
| +  Register registers[] = { cp, r5 }; | 
| descriptor->Initialize( | 
| MajorKey(), ARRAY_SIZE(registers), registers, | 
| Runtime::FunctionForId(Runtime::kNewClosureFromStubFailure)->entry); | 
| @@ -26,21 +31,21 @@ void FastNewClosureStub::InitializeInterfaceDescriptor( | 
|  | 
| void FastNewContextStub::InitializeInterfaceDescriptor( | 
| CodeStubInterfaceDescriptor* descriptor) { | 
| -  Register registers[] = { cp, r1 }; | 
| +  Register registers[] = { cp, r4 }; | 
| descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); | 
| } | 
|  | 
|  | 
| void ToNumberStub::InitializeInterfaceDescriptor( | 
| CodeStubInterfaceDescriptor* descriptor) { | 
| -  Register registers[] = { cp, r0 }; | 
| +  Register registers[] = { cp, r3 }; | 
| descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); | 
| } | 
|  | 
|  | 
| void NumberToStringStub::InitializeInterfaceDescriptor( | 
| CodeStubInterfaceDescriptor* descriptor) { | 
| -  Register registers[] = { cp, r0 }; | 
| +  Register registers[] = { cp, r3 }; | 
| descriptor->Initialize( | 
| MajorKey(), ARRAY_SIZE(registers), registers, | 
| Runtime::FunctionForId(Runtime::kNumberToStringRT)->entry); | 
| @@ -49,7 +54,7 @@ void NumberToStringStub::InitializeInterfaceDescriptor( | 
|  | 
| void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( | 
| CodeStubInterfaceDescriptor* descriptor) { | 
| -  Register registers[] = { cp, r3, r2, r1 }; | 
| +  Register registers[] = { cp, r6, r5, r4 }; | 
| Representation representations[] = { | 
| Representation::Tagged(), | 
| Representation::Tagged(), | 
| @@ -64,7 +69,7 @@ void FastCloneShallowArrayStub::InitializeInterfaceDescriptor( | 
|  | 
| void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( | 
| CodeStubInterfaceDescriptor* descriptor) { | 
| -  Register registers[] = { cp, r3, r2, r1, r0 }; | 
| +  Register registers[] = { cp, r6, r5, r4, r3 }; | 
| descriptor->Initialize( | 
| MajorKey(), ARRAY_SIZE(registers), registers, | 
| Runtime::FunctionForId(Runtime::kCreateObjectLiteral)->entry); | 
| @@ -73,36 +78,26 @@ void FastCloneShallowObjectStub::InitializeInterfaceDescriptor( | 
|  | 
| void CreateAllocationSiteStub::InitializeInterfaceDescriptor( | 
| CodeStubInterfaceDescriptor* descriptor) { | 
| -  Register registers[] = { cp, r2, r3 }; | 
| +  Register registers[] = { cp, r5, r6 }; | 
| descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); | 
| } | 
|  | 
|  | 
| void CallFunctionStub::InitializeInterfaceDescriptor( | 
| CodeStubInterfaceDescriptor* descriptor) { | 
| -  // r1  function    the function to call | 
| -  Register registers[] = {cp, r1}; | 
| -  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); | 
| +  UNIMPLEMENTED();  // turbofan | 
| } | 
|  | 
|  | 
| void CallConstructStub::InitializeInterfaceDescriptor( | 
| CodeStubInterfaceDescriptor* descriptor) { | 
| -  // r0 : number of arguments | 
| -  // r1 : the function to call | 
| -  // r2 : feedback vector | 
| -  // r3 : (only if r2 is not the megamorphic symbol) slot in feedback | 
| -  //      vector (Smi) | 
| -  // TODO(turbofan): So far we don't gather type feedback and hence skip the | 
| -  // slot parameter, but ArrayConstructStub needs the vector to be undefined. | 
| -  Register registers[] = {cp, r0, r1, r2}; | 
| -  descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers); | 
| +  UNIMPLEMENTED();  // turbofan | 
| } | 
|  | 
|  | 
| void RegExpConstructResultStub::InitializeInterfaceDescriptor( | 
| CodeStubInterfaceDescriptor* descriptor) { | 
| -  Register registers[] = { cp, r2, r1, r0 }; | 
| +  Register registers[] = { cp, r5, r4, r3 }; | 
| descriptor->Initialize( | 
| MajorKey(), ARRAY_SIZE(registers), registers, | 
| Runtime::FunctionForId(Runtime::kRegExpConstructResult)->entry); | 
| @@ -111,7 +106,7 @@ void RegExpConstructResultStub::InitializeInterfaceDescriptor( | 
|  | 
| void TransitionElementsKindStub::InitializeInterfaceDescriptor( | 
| CodeStubInterfaceDescriptor* descriptor) { | 
| -  Register registers[] = { cp, r0, r1 }; | 
| +  Register registers[] = { cp, r3, r4 }; | 
| Address entry = | 
| Runtime::FunctionForId(Runtime::kTransitionElementsKind)->entry; | 
| descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, | 
| @@ -121,7 +116,7 @@ void TransitionElementsKindStub::InitializeInterfaceDescriptor( | 
|  | 
| void CompareNilICStub::InitializeInterfaceDescriptor( | 
| CodeStubInterfaceDescriptor* descriptor) { | 
| -  Register registers[] = { cp, r0 }; | 
| +  Register registers[] = { cp, r3 }; | 
| descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, | 
| FUNCTION_ADDR(CompareNilIC_Miss)); | 
| descriptor->SetMissHandler( | 
| @@ -137,26 +132,26 @@ static void InitializeArrayConstructorDescriptor( | 
| int constant_stack_parameter_count) { | 
| // register state | 
| // cp -- context | 
| -  // r0 -- number of arguments | 
| -  // r1 -- function | 
| -  // r2 -- allocation site with elements kind | 
| +  // r3 -- number of arguments | 
| +  // r4 -- function | 
| +  // r5 -- allocation site with elements kind | 
| Address deopt_handler = Runtime::FunctionForId( | 
| Runtime::kArrayConstructor)->entry; | 
|  | 
| if (constant_stack_parameter_count == 0) { | 
| -    Register registers[] = { cp, r1, r2 }; | 
| +    Register registers[] = { cp, r4, r5 }; | 
| descriptor->Initialize(major, ARRAY_SIZE(registers), registers, | 
| deopt_handler, NULL, constant_stack_parameter_count, | 
| JS_FUNCTION_STUB_MODE); | 
| } else { | 
| // stack param count needs (constructor pointer, and single argument) | 
| -    Register registers[] = { cp, r1, r2, r0 }; | 
| +    Register registers[] = { cp, r4, r5, r3 }; | 
| Representation representations[] = { | 
| Representation::Tagged(), | 
| Representation::Tagged(), | 
| Representation::Tagged(), | 
| Representation::Integer32() }; | 
| -    descriptor->Initialize(major, ARRAY_SIZE(registers), registers, r0, | 
| +    descriptor->Initialize(major, ARRAY_SIZE(registers), registers, r3, | 
| deopt_handler, representations, | 
| constant_stack_parameter_count, | 
| JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS); | 
| @@ -169,24 +164,24 @@ static void InitializeInternalArrayConstructorDescriptor( | 
| int constant_stack_parameter_count) { | 
| // register state | 
| // cp -- context | 
| -  // r0 -- number of arguments | 
| -  // r1 -- constructor function | 
| +  // r3 -- number of arguments | 
| +  // r4 -- constructor function | 
| Address deopt_handler = Runtime::FunctionForId( | 
| Runtime::kInternalArrayConstructor)->entry; | 
|  | 
| if (constant_stack_parameter_count == 0) { | 
| -    Register registers[] = { cp, r1 }; | 
| +    Register registers[] = { cp, r4 }; | 
| descriptor->Initialize(major, ARRAY_SIZE(registers), registers, | 
| deopt_handler, NULL, constant_stack_parameter_count, | 
| JS_FUNCTION_STUB_MODE); | 
| } else { | 
| // stack param count needs (constructor pointer, and single argument) | 
| -    Register registers[] = { cp, r1, r0 }; | 
| +    Register registers[] = { cp, r4, r3 }; | 
| Representation representations[] = { | 
| Representation::Tagged(), | 
| Representation::Tagged(), | 
| Representation::Integer32() }; | 
| -    descriptor->Initialize(major, ARRAY_SIZE(registers), registers, r0, | 
| +    descriptor->Initialize(major, ARRAY_SIZE(registers), registers, r3, | 
| deopt_handler, representations, | 
| constant_stack_parameter_count, | 
| JS_FUNCTION_STUB_MODE, PASS_ARGUMENTS); | 
| @@ -214,7 +209,7 @@ void ArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( | 
|  | 
| void ToBooleanStub::InitializeInterfaceDescriptor( | 
| CodeStubInterfaceDescriptor* descriptor) { | 
| -  Register registers[] = { cp, r0 }; | 
| +  Register registers[] = { cp, r3 }; | 
| descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, | 
| FUNCTION_ADDR(ToBooleanIC_Miss)); | 
| descriptor->SetMissHandler( | 
| @@ -242,7 +237,7 @@ void InternalArrayNArgumentsConstructorStub::InitializeInterfaceDescriptor( | 
|  | 
| void BinaryOpICStub::InitializeInterfaceDescriptor( | 
| CodeStubInterfaceDescriptor* descriptor) { | 
| -  Register registers[] = { cp, r1, r0 }; | 
| +  Register registers[] = { cp, r4, r3 }; | 
| descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, | 
| FUNCTION_ADDR(BinaryOpIC_Miss)); | 
| descriptor->SetMissHandler( | 
| @@ -252,7 +247,7 @@ void BinaryOpICStub::InitializeInterfaceDescriptor( | 
|  | 
| void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor( | 
| CodeStubInterfaceDescriptor* descriptor) { | 
| -  Register registers[] = { cp, r2, r1, r0 }; | 
| +  Register registers[] = { cp, r5, r4, r3 }; | 
| descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, | 
| FUNCTION_ADDR(BinaryOpIC_MissWithAllocationSite)); | 
| } | 
| @@ -260,26 +255,20 @@ void BinaryOpWithAllocationSiteStub::InitializeInterfaceDescriptor( | 
|  | 
| void StringAddStub::InitializeInterfaceDescriptor( | 
| CodeStubInterfaceDescriptor* descriptor) { | 
| -  Register registers[] = { cp, r1, r0 }; | 
| +  Register registers[] = { cp, r4, r3 }; | 
| descriptor->Initialize(MajorKey(), ARRAY_SIZE(registers), registers, | 
| Runtime::FunctionForId(Runtime::kStringAdd)->entry); | 
| } | 
|  | 
|  | 
| void CallDescriptors::InitializeForIsolate(Isolate* isolate) { | 
| -  static PlatformInterfaceDescriptor default_descriptor = | 
| -      PlatformInterfaceDescriptor(CAN_INLINE_TARGET_ADDRESS); | 
| - | 
| -  static PlatformInterfaceDescriptor noInlineDescriptor = | 
| -      PlatformInterfaceDescriptor(NEVER_INLINE_TARGET_ADDRESS); | 
| - | 
| { | 
| CallInterfaceDescriptor* descriptor = | 
| isolate->call_descriptor(Isolate::ArgumentAdaptorCall); | 
| Register registers[] = { cp,  // context | 
| -                             r1,  // JSFunction | 
| -                             r0,  // actual number of arguments | 
| -                             r2,  // expected number of arguments | 
| +                             r4,  // JSFunction | 
| +                             r3,  // actual number of arguments | 
| +                             r5,  // expected number of arguments | 
| }; | 
| Representation representations[] = { | 
| Representation::Tagged(),     // context | 
| @@ -287,56 +276,52 @@ void CallDescriptors::InitializeForIsolate(Isolate* isolate) { | 
| Representation::Integer32(),  // actual number of arguments | 
| Representation::Integer32(),  // expected number of arguments | 
| }; | 
| -    descriptor->Initialize(ARRAY_SIZE(registers), registers, | 
| -                           representations, &default_descriptor); | 
| +    descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); | 
| } | 
| { | 
| CallInterfaceDescriptor* descriptor = | 
| isolate->call_descriptor(Isolate::KeyedCall); | 
| Register registers[] = { cp,  // context | 
| -                             r2,  // key | 
| +                             r5,  // key | 
| }; | 
| Representation representations[] = { | 
| Representation::Tagged(),     // context | 
| Representation::Tagged(),     // key | 
| }; | 
| -    descriptor->Initialize(ARRAY_SIZE(registers), registers, | 
| -                           representations, &noInlineDescriptor); | 
| +    descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); | 
| } | 
| { | 
| CallInterfaceDescriptor* descriptor = | 
| isolate->call_descriptor(Isolate::NamedCall); | 
| Register registers[] = { cp,  // context | 
| -                             r2,  // name | 
| +                             r5,  // name | 
| }; | 
| Representation representations[] = { | 
| Representation::Tagged(),     // context | 
| Representation::Tagged(),     // name | 
| }; | 
| -    descriptor->Initialize(ARRAY_SIZE(registers), registers, | 
| -                           representations, &noInlineDescriptor); | 
| +    descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); | 
| } | 
| { | 
| CallInterfaceDescriptor* descriptor = | 
| isolate->call_descriptor(Isolate::CallHandler); | 
| Register registers[] = { cp,  // context | 
| -                             r0,  // receiver | 
| +                             r3,  // receiver | 
| }; | 
| Representation representations[] = { | 
| Representation::Tagged(),  // context | 
| Representation::Tagged(),  // receiver | 
| }; | 
| -    descriptor->Initialize(ARRAY_SIZE(registers), registers, | 
| -                           representations, &default_descriptor); | 
| +    descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); | 
| } | 
| { | 
| CallInterfaceDescriptor* descriptor = | 
| isolate->call_descriptor(Isolate::ApiFunctionCall); | 
| Register registers[] = { cp,  // context | 
| -                             r0,  // callee | 
| -                             r4,  // call_data | 
| -                             r2,  // holder | 
| -                             r1,  // api_function_address | 
| +                             r3,  // callee | 
| +                             r7,  // call_data | 
| +                             r5,  // holder | 
| +                             r4,  // api_function_address | 
| }; | 
| Representation representations[] = { | 
| Representation::Tagged(),    // context | 
| @@ -345,8 +330,7 @@ void CallDescriptors::InitializeForIsolate(Isolate* isolate) { | 
| Representation::Tagged(),    // holder | 
| Representation::External(),  // api_function_address | 
| }; | 
| -    descriptor->Initialize(ARRAY_SIZE(registers), registers, | 
| -                           representations, &default_descriptor); | 
| +    descriptor->Initialize(ARRAY_SIZE(registers), registers, representations); | 
| } | 
| } | 
|  | 
| @@ -378,7 +362,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { | 
| // Call the runtime system in a fresh internal frame. | 
| FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 
| DCHECK(param_count == 0 || | 
| -           r0.is(descriptor->GetEnvironmentParameterRegister( | 
| +           r3.is(descriptor->GetEnvironmentParameterRegister( | 
| param_count - 1))); | 
| // Push arguments | 
| for (int i = 0; i < param_count; ++i) { | 
| @@ -392,6 +376,7 @@ void HydrogenCodeStub::GenerateLightweightMiss(MacroAssembler* masm) { | 
| } | 
|  | 
|  | 
| +#if 0  // roohack unused? | 
| // Takes a Smi and converts to an IEEE 64 bit floating point value in two | 
| // registers.  The format is 1 sign bit, 11 exponent bits (biased 1023) and | 
| // 52 fraction bits (20 in the first word, 32 in the second).  Zeros is a | 
| @@ -485,84 +470,92 @@ void ConvertToDoubleStub::Generate(MacroAssembler* masm) { | 
| Operand(source_, LSR, 32 - HeapNumber::kMantissaBitsInTopWord)); | 
| __ Ret(); | 
| } | 
| +#endif  // roohack | 
|  | 
|  | 
| void DoubleToIStub::Generate(MacroAssembler* masm) { | 
| -  Label out_of_range, only_low, negate, done; | 
| +  Label out_of_range, only_low, negate, done, fastpath_done; | 
| Register input_reg = source(); | 
| Register result_reg = destination(); | 
| DCHECK(is_truncating()); | 
|  | 
| int double_offset = offset(); | 
| -  // Account for saved regs if input is sp. | 
| -  if (input_reg.is(sp)) double_offset += 3 * kPointerSize; | 
|  | 
| +  // Immediate values for this stub fit in instructions, so it's safe to use ip. | 
| Register scratch = GetRegisterThatIsNotOneOf(input_reg, result_reg); | 
| Register scratch_low = | 
| GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch); | 
| Register scratch_high = | 
| GetRegisterThatIsNotOneOf(input_reg, result_reg, scratch, scratch_low); | 
| -  LowDwVfpRegister double_scratch = kScratchDoubleReg; | 
| +  DoubleRegister double_scratch = kScratchDoubleReg; | 
|  | 
| -  __ Push(scratch_high, scratch_low, scratch); | 
| +  __ push(scratch); | 
| +  // Account for saved regs if input is sp. | 
| +  if (input_reg.is(sp)) double_offset += kPointerSize; | 
|  | 
| if (!skip_fastpath()) { | 
| // Load double input. | 
| -    __ vldr(double_scratch, MemOperand(input_reg, double_offset)); | 
| -    __ vmov(scratch_low, scratch_high, double_scratch); | 
| +    __ lfd(double_scratch, MemOperand(input_reg, double_offset)); | 
|  | 
| // Do fast-path convert from double to int. | 
| -    __ vcvt_s32_f64(double_scratch.low(), double_scratch); | 
| -    __ vmov(result_reg, double_scratch.low()); | 
| +    __ ConvertDoubleToInt64(double_scratch, | 
| +#if !V8_TARGET_ARCH_PPC64 | 
| +                            scratch, | 
| +#endif | 
| +                            result_reg, d0); | 
|  | 
| -    // If result is not saturated (0x7fffffff or 0x80000000), we are done. | 
| -    __ sub(scratch, result_reg, Operand(1)); | 
| -    __ cmp(scratch, Operand(0x7ffffffe)); | 
| -    __ b(lt, &done); | 
| -  } else { | 
| -    // We've already done MacroAssembler::TryFastTruncatedDoubleToILoad, so we | 
| -    // know exponent > 31, so we can skip the vcvt_s32_f64 which will saturate. | 
| -    if (double_offset == 0) { | 
| -      __ ldm(ia, input_reg, scratch_low.bit() | scratch_high.bit()); | 
| -    } else { | 
| -      __ ldr(scratch_low, MemOperand(input_reg, double_offset)); | 
| -      __ ldr(scratch_high, MemOperand(input_reg, double_offset + kIntSize)); | 
| -    } | 
| +    // Test for overflow | 
| +#if V8_TARGET_ARCH_PPC64 | 
| +    __ TestIfInt32(result_reg, scratch, r0); | 
| +#else | 
| +    __ TestIfInt32(scratch, result_reg, r0); | 
| +#endif | 
| +    __ beq(&fastpath_done); | 
| } | 
|  | 
| -  __ Ubfx(scratch, scratch_high, | 
| -         HeapNumber::kExponentShift, HeapNumber::kExponentBits); | 
| +  __ Push(scratch_high, scratch_low); | 
| +  // Account for saved regs if input is sp. | 
| +  if (input_reg.is(sp)) double_offset += 2 * kPointerSize; | 
| + | 
| +  __ lwz(scratch_high, MemOperand(input_reg, double_offset + | 
| +                                  Register::kExponentOffset)); | 
| +  __ lwz(scratch_low, MemOperand(input_reg, double_offset + | 
| +                                 Register::kMantissaOffset)); | 
| + | 
| +  __ ExtractBitMask(scratch, scratch_high, HeapNumber::kExponentMask); | 
| // Load scratch with exponent - 1. This is faster than loading | 
| -  // with exponent because Bias + 1 = 1024 which is an *ARM* immediate value. | 
| +  // with exponent because Bias + 1 = 1024 which is a *PPC* immediate value. | 
| STATIC_ASSERT(HeapNumber::kExponentBias + 1 == 1024); | 
| -  __ sub(scratch, scratch, Operand(HeapNumber::kExponentBias + 1)); | 
| +  __ subi(scratch, scratch, Operand(HeapNumber::kExponentBias + 1)); | 
| // If exponent is greater than or equal to 84, the 32 less significant | 
| // bits are 0s (2^84 = 1, 52 significant bits, 32 uncoded bits), | 
| // the result is 0. | 
| // Compare exponent with 84 (compare exponent - 1 with 83). | 
| -  __ cmp(scratch, Operand(83)); | 
| -  __ b(ge, &out_of_range); | 
| +  __ cmpi(scratch, Operand(83)); | 
| +  __ bge(&out_of_range); | 
|  | 
| // If we reach this code, 31 <= exponent <= 83. | 
| // So, we don't have to handle cases where 0 <= exponent <= 20 for | 
| // which we would need to shift right the high part of the mantissa. | 
| // Scratch contains exponent - 1. | 
| // Load scratch with 52 - exponent (load with 51 - (exponent - 1)). | 
| -  __ rsb(scratch, scratch, Operand(51), SetCC); | 
| -  __ b(ls, &only_low); | 
| +  __ subfic(scratch, scratch, Operand(51)); | 
| +  __ cmpi(scratch, Operand::Zero()); | 
| +  __ ble(&only_low); | 
| // 21 <= exponent <= 51, shift scratch_low and scratch_high | 
| // to generate the result. | 
| -  __ mov(scratch_low, Operand(scratch_low, LSR, scratch)); | 
| +  __ srw(scratch_low, scratch_low, scratch); | 
| // Scratch contains: 52 - exponent. | 
| // We needs: exponent - 20. | 
| // So we use: 32 - scratch = 32 - 52 + exponent = exponent - 20. | 
| -  __ rsb(scratch, scratch, Operand(32)); | 
| -  __ Ubfx(result_reg, scratch_high, | 
| -          0, HeapNumber::kMantissaBitsInTopWord); | 
| +  __ subfic(scratch, scratch, Operand(32)); | 
| +  __ ExtractBitMask(result_reg, scratch_high, HeapNumber::kMantissaMask); | 
| // Set the implicit 1 before the mantissa part in scratch_high. | 
| -  __ orr(result_reg, result_reg, | 
| -         Operand(1 << HeapNumber::kMantissaBitsInTopWord)); | 
| -  __ orr(result_reg, scratch_low, Operand(result_reg, LSL, scratch)); | 
| +  STATIC_ASSERT(HeapNumber::kMantissaBitsInTopWord >= 16); | 
| +  __ oris(result_reg, result_reg, | 
| +          Operand(1 << ((HeapNumber::kMantissaBitsInTopWord) - 16))); | 
| +  __ slw(r0, result_reg, scratch); | 
| +  __ orx(result_reg, scratch_low, r0); | 
| __ b(&negate); | 
|  | 
| __ bind(&out_of_range); | 
| @@ -572,8 +565,8 @@ void DoubleToIStub::Generate(MacroAssembler* masm) { | 
| __ bind(&only_low); | 
| // 52 <= exponent <= 83, shift only scratch_low. | 
| // On entry, scratch contains: 52 - exponent. | 
| -  __ rsb(scratch, scratch, Operand::Zero()); | 
| -  __ mov(result_reg, Operand(scratch_low, LSL, scratch)); | 
| +  __ neg(scratch, scratch); | 
| +  __ slw(result_reg, scratch_low, scratch); | 
|  | 
| __ bind(&negate); | 
| // If input was positive, scratch_high ASR 31 equals 0 and | 
| @@ -582,20 +575,29 @@ void DoubleToIStub::Generate(MacroAssembler* masm) { | 
| // If the input was negative, we have to negate the result. | 
| // Input_high ASR 31 equals 0xffffffff and scratch_high LSR 31 equals 1. | 
| // New result = (result eor 0xffffffff) + 1 = 0 - result. | 
| -  __ eor(result_reg, result_reg, Operand(scratch_high, ASR, 31)); | 
| -  __ add(result_reg, result_reg, Operand(scratch_high, LSR, 31)); | 
| +  __ srawi(r0, scratch_high, 31); | 
| +#if V8_TARGET_ARCH_PPC64 | 
| +  __ srdi(r0, r0, Operand(32)); | 
| +#endif | 
| +  __ xor_(result_reg, result_reg, r0); | 
| +  __ srwi(r0, scratch_high, Operand(31)); | 
| +  __ add(result_reg, result_reg, r0); | 
|  | 
| __ bind(&done); | 
| +  __ Pop(scratch_high, scratch_low); | 
| + | 
| +  __ bind(&fastpath_done); | 
| +  __ pop(scratch); | 
|  | 
| -  __ Pop(scratch_high, scratch_low, scratch); | 
| __ Ret(); | 
| } | 
|  | 
|  | 
| +#if 0  // roohack unused? | 
| void WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime( | 
| Isolate* isolate) { | 
| -  WriteInt32ToHeapNumberStub stub1(isolate, r1, r0, r2); | 
| -  WriteInt32ToHeapNumberStub stub2(isolate, r2, r0, r3); | 
| +  WriteInt32ToHeapNumberStub stub1(isolate, r4, r3, r5); | 
| +  WriteInt32ToHeapNumberStub stub2(isolate, r5, r3, r6); | 
| stub1.GetCode(); | 
| stub2.GetCode(); | 
| } | 
| @@ -645,7 +647,7 @@ void WriteInt32ToHeapNumberStub::Generate(MacroAssembler* masm) { | 
| __ str(ip, FieldMemOperand(the_heap_number_, HeapNumber::kMantissaOffset)); | 
| __ Ret(); | 
| } | 
| - | 
| +#endif  // roohack | 
|  | 
| // Handle the case where the lhs and rhs are the same object. | 
| // Equality is almost reflexive (everything but NaN), so this is a test | 
| @@ -655,38 +657,38 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, | 
| Condition cond) { | 
| Label not_identical; | 
| Label heap_number, return_equal; | 
| -  __ cmp(r0, r1); | 
| -  __ b(ne, ¬_identical); | 
| +  __ cmp(r3, r4); | 
| +  __ bne(¬_identical); | 
|  | 
| // Test for NaN. Sadly, we can't just compare to Factory::nan_value(), | 
| // so we do the second best thing - test it ourselves. | 
| // They are both equal and they are not both Smis so both of them are not | 
| // Smis.  If it's not a heap number, then return equal. | 
| if (cond == lt || cond == gt) { | 
| -    __ CompareObjectType(r0, r4, r4, FIRST_SPEC_OBJECT_TYPE); | 
| -    __ b(ge, slow); | 
| +    __ CompareObjectType(r3, r7, r7, FIRST_SPEC_OBJECT_TYPE); | 
| +    __ bge(slow); | 
| } else { | 
| -    __ CompareObjectType(r0, r4, r4, HEAP_NUMBER_TYPE); | 
| -    __ b(eq, &heap_number); | 
| +    __ CompareObjectType(r3, r7, r7, HEAP_NUMBER_TYPE); | 
| +    __ beq(&heap_number); | 
| // Comparing JS objects with <=, >= is complicated. | 
| if (cond != eq) { | 
| -      __ cmp(r4, Operand(FIRST_SPEC_OBJECT_TYPE)); | 
| -      __ b(ge, slow); | 
| +      __ cmpi(r7, Operand(FIRST_SPEC_OBJECT_TYPE)); | 
| +      __ bge(slow); | 
| // Normally here we fall through to return_equal, but undefined is | 
| // special: (undefined == undefined) == true, but | 
| // (undefined <= undefined) == false!  See ECMAScript 11.8.5. | 
| if (cond == le || cond == ge) { | 
| -        __ cmp(r4, Operand(ODDBALL_TYPE)); | 
| -        __ b(ne, &return_equal); | 
| -        __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); | 
| -        __ cmp(r0, r2); | 
| -        __ b(ne, &return_equal); | 
| +        __ cmpi(r7, Operand(ODDBALL_TYPE)); | 
| +        __ bne(&return_equal); | 
| +        __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); | 
| +        __ cmp(r3, r5); | 
| +        __ bne(&return_equal); | 
| if (cond == le) { | 
| // undefined <= undefined should fail. | 
| -          __ mov(r0, Operand(GREATER)); | 
| +          __ li(r3, Operand(GREATER)); | 
| } else  { | 
| // undefined >= undefined should fail. | 
| -          __ mov(r0, Operand(LESS)); | 
| +          __ li(r3, Operand(LESS)); | 
| } | 
| __ Ret(); | 
| } | 
| @@ -695,11 +697,11 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, | 
|  | 
| __ bind(&return_equal); | 
| if (cond == lt) { | 
| -    __ mov(r0, Operand(GREATER));  // Things aren't less than themselves. | 
| +    __ li(r3, Operand(GREATER));  // Things aren't less than themselves. | 
| } else if (cond == gt) { | 
| -    __ mov(r0, Operand(LESS));     // Things aren't greater than themselves. | 
| +    __ li(r3, Operand(LESS));     // Things aren't greater than themselves. | 
| } else { | 
| -    __ mov(r0, Operand(EQUAL));    // Things are <=, >=, ==, === themselves. | 
| +    __ li(r3, Operand(EQUAL));    // Things are <=, >=, ==, === themselves. | 
| } | 
| __ Ret(); | 
|  | 
| @@ -714,29 +716,33 @@ static void EmitIdenticalObjectComparison(MacroAssembler* masm, | 
| // The representation of NaN values has all exponent bits (52..62) set, | 
| // and not all mantissa bits (0..51) clear. | 
| // Read top bits of double representation (second word of value). | 
| -    __ ldr(r2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 
| +    __ lwz(r5, FieldMemOperand(r3, HeapNumber::kExponentOffset)); | 
| // Test that exponent bits are all set. | 
| -    __ Sbfx(r3, r2, HeapNumber::kExponentShift, HeapNumber::kExponentBits); | 
| -    // NaNs have all-one exponents so they sign extend to -1. | 
| -    __ cmp(r3, Operand(-1)); | 
| -    __ b(ne, &return_equal); | 
| +    STATIC_ASSERT(HeapNumber::kExponentMask == 0x7ff00000u); | 
| +    __ ExtractBitMask(r6, r5, HeapNumber::kExponentMask); | 
| +    __ cmpli(r6, Operand(0x7ff)); | 
| +    __ bne(&return_equal); | 
|  | 
| // Shift out flag and all exponent bits, retaining only mantissa. | 
| -    __ mov(r2, Operand(r2, LSL, HeapNumber::kNonMantissaBitsInTopWord)); | 
| +    __ slwi(r5, r5, Operand(HeapNumber::kNonMantissaBitsInTopWord)); | 
| // Or with all low-bits of mantissa. | 
| -    __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | 
| -    __ orr(r0, r3, Operand(r2), SetCC); | 
| -    // For equal we already have the right value in r0:  Return zero (equal) | 
| +    __ lwz(r6, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); | 
| +    __ orx(r3, r6, r5); | 
| +    __ cmpi(r3, Operand::Zero()); | 
| +    // For equal we already have the right value in r3:  Return zero (equal) | 
| // if all bits in mantissa are zero (it's an Infinity) and non-zero if | 
| // not (it's a NaN).  For <= and >= we need to load r0 with the failing | 
| // value if it's a NaN. | 
| if (cond != eq) { | 
| +      Label not_equal; | 
| +      __ bne(¬_equal); | 
| // All-zero means Infinity means equal. | 
| -      __ Ret(eq); | 
| +      __ Ret(); | 
| +      __ bind(¬_equal); | 
| if (cond == le) { | 
| -        __ mov(r0, Operand(GREATER));  // NaN <= NaN should fail. | 
| +        __ li(r3, Operand(GREATER));  // NaN <= NaN should fail. | 
| } else { | 
| -        __ mov(r0, Operand(LESS));     // NaN >= NaN should fail. | 
| +        __ li(r3, Operand(LESS));     // NaN >= NaN should fail. | 
| } | 
| } | 
| __ Ret(); | 
| @@ -754,59 +760,65 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 
| Label* lhs_not_nan, | 
| Label* slow, | 
| bool strict) { | 
| -  DCHECK((lhs.is(r0) && rhs.is(r1)) || | 
| -         (lhs.is(r1) && rhs.is(r0))); | 
| +  DCHECK((lhs.is(r3) && rhs.is(r4)) || | 
| +         (lhs.is(r4) && rhs.is(r3))); | 
|  | 
| Label rhs_is_smi; | 
| __ JumpIfSmi(rhs, &rhs_is_smi); | 
|  | 
| // Lhs is a Smi.  Check whether the rhs is a heap number. | 
| -  __ CompareObjectType(rhs, r4, r4, HEAP_NUMBER_TYPE); | 
| +  __ CompareObjectType(rhs, r6, r7, HEAP_NUMBER_TYPE); | 
| if (strict) { | 
| // If rhs is not a number and lhs is a Smi then strict equality cannot | 
| // succeed.  Return non-equal | 
| -    // If rhs is r0 then there is already a non zero value in it. | 
| -    if (!rhs.is(r0)) { | 
| -      __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); | 
| +    // If rhs is r3 then there is already a non zero value in it. | 
| +    Label skip; | 
| +    __ beq(&skip); | 
| +    if (!rhs.is(r3)) { | 
| +      __ mov(r3, Operand(NOT_EQUAL)); | 
| } | 
| -    __ Ret(ne); | 
| +    __ Ret(); | 
| +    __ bind(&skip); | 
| } else { | 
| // Smi compared non-strictly with a non-Smi non-heap-number.  Call | 
| // the runtime. | 
| -    __ b(ne, slow); | 
| +    __ bne(slow); | 
| } | 
|  | 
| // Lhs is a smi, rhs is a number. | 
| // Convert lhs to a double in d7. | 
| __ SmiToDouble(d7, lhs); | 
| -  // Load the double from rhs, tagged HeapNumber r0, to d6. | 
| -  __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag); | 
| +  // Load the double from rhs, tagged HeapNumber r3, to d6. | 
| +  __ lfd(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 
|  | 
| // We now have both loaded as doubles but we can skip the lhs nan check | 
| // since it's a smi. | 
| -  __ jmp(lhs_not_nan); | 
| +  __ b(lhs_not_nan); | 
|  | 
| __ bind(&rhs_is_smi); | 
| // Rhs is a smi.  Check whether the non-smi lhs is a heap number. | 
| -  __ CompareObjectType(lhs, r4, r4, HEAP_NUMBER_TYPE); | 
| +  __ CompareObjectType(lhs, r7, r7, HEAP_NUMBER_TYPE); | 
| if (strict) { | 
| // If lhs is not a number and rhs is a smi then strict equality cannot | 
| // succeed.  Return non-equal. | 
| -    // If lhs is r0 then there is already a non zero value in it. | 
| -    if (!lhs.is(r0)) { | 
| -      __ mov(r0, Operand(NOT_EQUAL), LeaveCC, ne); | 
| +    // If lhs is r3 then there is already a non zero value in it. | 
| +    Label skip; | 
| +    __ beq(&skip); | 
| +    if (!lhs.is(r3)) { | 
| +      __ mov(r3, Operand(NOT_EQUAL)); | 
| } | 
| -    __ Ret(ne); | 
| +    __ Ret(); | 
| +    __ bind(&skip); | 
| } else { | 
| // Smi compared non-strictly with a non-smi non-heap-number.  Call | 
| // the runtime. | 
| -    __ b(ne, slow); | 
| +    __ bne(slow); | 
| } | 
|  | 
| // Rhs is a smi, lhs is a heap number. | 
| -  // Load the double from lhs, tagged HeapNumber r1, to d7. | 
| -  __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag); | 
| -  // Convert rhs to a double in d6              . | 
| +  // Load the double from lhs, tagged HeapNumber r4, to d7. | 
| +  __ lfd(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 
| +  // Convert rhs to a double in d6. | 
| __ SmiToDouble(d6, rhs); | 
| // Fall through to both_loaded_as_doubles. | 
| } | 
| @@ -816,42 +828,42 @@ static void EmitSmiNonsmiComparison(MacroAssembler* masm, | 
| static void EmitStrictTwoHeapObjectCompare(MacroAssembler* masm, | 
| Register lhs, | 
| Register rhs) { | 
| -    DCHECK((lhs.is(r0) && rhs.is(r1)) || | 
| -           (lhs.is(r1) && rhs.is(r0))); | 
| +    DCHECK((lhs.is(r3) && rhs.is(r4)) || | 
| +           (lhs.is(r4) && rhs.is(r3))); | 
|  | 
| // If either operand is a JS object or an oddball value, then they are | 
| // not equal since their pointers are different. | 
| // There is no test for undetectability in strict equality. | 
| STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); | 
| Label first_non_object; | 
| -    // Get the type of the first operand into r2 and compare it with | 
| +    // Get the type of the first operand into r5 and compare it with | 
| // FIRST_SPEC_OBJECT_TYPE. | 
| -    __ CompareObjectType(rhs, r2, r2, FIRST_SPEC_OBJECT_TYPE); | 
| -    __ b(lt, &first_non_object); | 
| +    __ CompareObjectType(rhs, r5, r5, FIRST_SPEC_OBJECT_TYPE); | 
| +    __ blt(&first_non_object); | 
|  | 
| -    // Return non-zero (r0 is not zero) | 
| +    // Return non-zero (r3 is not zero) | 
| Label return_not_equal; | 
| __ bind(&return_not_equal); | 
| __ Ret(); | 
|  | 
| __ bind(&first_non_object); | 
| // Check for oddballs: true, false, null, undefined. | 
| -    __ cmp(r2, Operand(ODDBALL_TYPE)); | 
| -    __ b(eq, &return_not_equal); | 
| +    __ cmpi(r5, Operand(ODDBALL_TYPE)); | 
| +    __ beq(&return_not_equal); | 
|  | 
| -    __ CompareObjectType(lhs, r3, r3, FIRST_SPEC_OBJECT_TYPE); | 
| -    __ b(ge, &return_not_equal); | 
| +    __ CompareObjectType(lhs, r6, r6, FIRST_SPEC_OBJECT_TYPE); | 
| +    __ bge(&return_not_equal); | 
|  | 
| // Check for oddballs: true, false, null, undefined. | 
| -    __ cmp(r3, Operand(ODDBALL_TYPE)); | 
| -    __ b(eq, &return_not_equal); | 
| +    __ cmpi(r6, Operand(ODDBALL_TYPE)); | 
| +    __ beq(&return_not_equal); | 
|  | 
| // Now that we have the types we might as well check for | 
| // internalized-internalized. | 
| STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); | 
| -    __ orr(r2, r2, Operand(r3)); | 
| -    __ tst(r2, Operand(kIsNotStringMask | kIsNotInternalizedMask)); | 
| -    __ b(eq, &return_not_equal); | 
| +    __ orx(r5, r5, r6); | 
| +    __ andi(r0, r5, Operand(kIsNotStringMask | kIsNotInternalizedMask)); | 
| +    __ beq(&return_not_equal, cr0); | 
| } | 
|  | 
|  | 
| @@ -862,20 +874,21 @@ static void EmitCheckForTwoHeapNumbers(MacroAssembler* masm, | 
| Label* both_loaded_as_doubles, | 
| Label* not_heap_numbers, | 
| Label* slow) { | 
| -  DCHECK((lhs.is(r0) && rhs.is(r1)) || | 
| -         (lhs.is(r1) && rhs.is(r0))); | 
| +  DCHECK((lhs.is(r3) && rhs.is(r4)) || | 
| +         (lhs.is(r4) && rhs.is(r3))); | 
|  | 
| -  __ CompareObjectType(rhs, r3, r2, HEAP_NUMBER_TYPE); | 
| -  __ b(ne, not_heap_numbers); | 
| -  __ ldr(r2, FieldMemOperand(lhs, HeapObject::kMapOffset)); | 
| -  __ cmp(r2, r3); | 
| -  __ b(ne, slow);  // First was a heap number, second wasn't.  Go slow case. | 
| +  __ CompareObjectType(rhs, r6, r5, HEAP_NUMBER_TYPE); | 
| +  __ bne(not_heap_numbers); | 
| +  __ LoadP(r5, FieldMemOperand(lhs, HeapObject::kMapOffset)); | 
| +  __ cmp(r5, r6); | 
| +  __ bne(slow);  // First was a heap number, second wasn't.  Go slow case. | 
|  | 
| // Both are heap numbers.  Load them up then jump to the code we have | 
| // for that. | 
| -  __ vldr(d6, rhs, HeapNumber::kValueOffset - kHeapObjectTag); | 
| -  __ vldr(d7, lhs, HeapNumber::kValueOffset - kHeapObjectTag); | 
| -  __ jmp(both_loaded_as_doubles); | 
| +  __ lfd(d6, FieldMemOperand(rhs, HeapNumber::kValueOffset)); | 
| +  __ lfd(d7, FieldMemOperand(lhs, HeapNumber::kValueOffset)); | 
| + | 
| +  __ b(both_loaded_as_doubles); | 
| } | 
|  | 
|  | 
| @@ -885,40 +898,40 @@ static void EmitCheckForInternalizedStringsOrObjects(MacroAssembler* masm, | 
| Register rhs, | 
| Label* possible_strings, | 
| Label* not_both_strings) { | 
| -  DCHECK((lhs.is(r0) && rhs.is(r1)) || | 
| -         (lhs.is(r1) && rhs.is(r0))); | 
| +  DCHECK((lhs.is(r3) && rhs.is(r4)) || | 
| +         (lhs.is(r4) && rhs.is(r3))); | 
|  | 
| -  // r2 is object type of rhs. | 
| +  // r5 is object type of rhs. | 
| Label object_test; | 
| STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); | 
| -  __ tst(r2, Operand(kIsNotStringMask)); | 
| -  __ b(ne, &object_test); | 
| -  __ tst(r2, Operand(kIsNotInternalizedMask)); | 
| -  __ b(ne, possible_strings); | 
| -  __ CompareObjectType(lhs, r3, r3, FIRST_NONSTRING_TYPE); | 
| -  __ b(ge, not_both_strings); | 
| -  __ tst(r3, Operand(kIsNotInternalizedMask)); | 
| -  __ b(ne, possible_strings); | 
| +  __ andi(r0, r5, Operand(kIsNotStringMask)); | 
| +  __ bne(&object_test, cr0); | 
| +  __ andi(r0, r5, Operand(kIsNotInternalizedMask)); | 
| +  __ bne(possible_strings, cr0); | 
| +  __ CompareObjectType(lhs, r6, r6, FIRST_NONSTRING_TYPE); | 
| +  __ bge(not_both_strings); | 
| +  __ andi(r0, r6, Operand(kIsNotInternalizedMask)); | 
| +  __ bne(possible_strings, cr0); | 
|  | 
| // Both are internalized.  We already checked they weren't the same pointer | 
| // so they are not equal. | 
| -  __ mov(r0, Operand(NOT_EQUAL)); | 
| +  __ li(r3, Operand(NOT_EQUAL)); | 
| __ Ret(); | 
|  | 
| __ bind(&object_test); | 
| -  __ cmp(r2, Operand(FIRST_SPEC_OBJECT_TYPE)); | 
| -  __ b(lt, not_both_strings); | 
| -  __ CompareObjectType(lhs, r2, r3, FIRST_SPEC_OBJECT_TYPE); | 
| -  __ b(lt, not_both_strings); | 
| +  __ cmpi(r5, Operand(FIRST_SPEC_OBJECT_TYPE)); | 
| +  __ blt(not_both_strings); | 
| +  __ CompareObjectType(lhs, r5, r6, FIRST_SPEC_OBJECT_TYPE); | 
| +  __ blt(not_both_strings); | 
| // If both objects are undetectable, they are equal. Otherwise, they | 
| // are not equal, since they are different objects and an object is not | 
| // equal to undefined. | 
| -  __ ldr(r3, FieldMemOperand(rhs, HeapObject::kMapOffset)); | 
| -  __ ldrb(r2, FieldMemOperand(r2, Map::kBitFieldOffset)); | 
| -  __ ldrb(r3, FieldMemOperand(r3, Map::kBitFieldOffset)); | 
| -  __ and_(r0, r2, Operand(r3)); | 
| -  __ and_(r0, r0, Operand(1 << Map::kIsUndetectable)); | 
| -  __ eor(r0, r0, Operand(1 << Map::kIsUndetectable)); | 
| +  __ LoadP(r6, FieldMemOperand(rhs, HeapObject::kMapOffset)); | 
| +  __ lbz(r5, FieldMemOperand(r5, Map::kBitFieldOffset)); | 
| +  __ lbz(r6, FieldMemOperand(r6, Map::kBitFieldOffset)); | 
| +  __ and_(r3, r5, r6); | 
| +  __ andi(r3, r3, Operand(1 << Map::kIsUndetectable)); | 
| +  __ xori(r3, r3, Operand(1 << Map::kIsUndetectable)); | 
| __ Ret(); | 
| } | 
|  | 
| @@ -942,26 +955,27 @@ static void ICCompareStub_CheckInputType(MacroAssembler* masm, | 
| } | 
|  | 
|  | 
| -// On entry r1 and r2 are the values to be compared. | 
| -// On exit r0 is 0, positive or negative to indicate the result of | 
| +// On entry r4 and r5 are the values to be compared. | 
| +// On exit r3 is 0, positive or negative to indicate the result of | 
| // the comparison. | 
| void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { | 
| -  Register lhs = r1; | 
| -  Register rhs = r0; | 
| +  Register lhs = r4; | 
| +  Register rhs = r3; | 
| Condition cc = GetCondition(); | 
|  | 
| Label miss; | 
| -  ICCompareStub_CheckInputType(masm, lhs, r2, left_, &miss); | 
| -  ICCompareStub_CheckInputType(masm, rhs, r3, right_, &miss); | 
| +  ICCompareStub_CheckInputType(masm, lhs, r5, left_, &miss); | 
| +  ICCompareStub_CheckInputType(masm, rhs, r6, right_, &miss); | 
|  | 
| Label slow;  // Call builtin. | 
| Label not_smis, both_loaded_as_doubles, lhs_not_nan; | 
|  | 
| Label not_two_smis, smi_done; | 
| -  __ orr(r2, r1, r0); | 
| -  __ JumpIfNotSmi(r2, ¬_two_smis); | 
| -  __ mov(r1, Operand(r1, ASR, 1)); | 
| -  __ sub(r0, r1, Operand(r0, ASR, 1)); | 
| +  __ orx(r5, r4, r3); | 
| +  __ JumpIfNotSmi(r5, ¬_two_smis); | 
| +  __ SmiUntag(r4); | 
| +  __ SmiUntag(r3); | 
| +  __ sub(r3, r4, r3); | 
| __ Ret(); | 
| __ bind(¬_two_smis); | 
|  | 
| @@ -976,41 +990,45 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { | 
| // be strictly equal if the other is a HeapNumber. | 
| STATIC_ASSERT(kSmiTag == 0); | 
| DCHECK_EQ(0, Smi::FromInt(0)); | 
| -  __ and_(r2, lhs, Operand(rhs)); | 
| -  __ JumpIfNotSmi(r2, ¬_smis); | 
| +  __ and_(r5, lhs, rhs); | 
| +  __ JumpIfNotSmi(r5, ¬_smis); | 
| // One operand is a smi.  EmitSmiNonsmiComparison generates code that can: | 
| // 1) Return the answer. | 
| // 2) Go to slow. | 
| // 3) Fall through to both_loaded_as_doubles. | 
| // 4) Jump to lhs_not_nan. | 
| // In cases 3 and 4 we have found out we were dealing with a number-number | 
| -  // comparison.  If VFP3 is supported the double values of the numbers have | 
| -  // been loaded into d7 and d6.  Otherwise, the double values have been loaded | 
| -  // into r0, r1, r2, and r3. | 
| +  // comparison.  The double values of the numbers have been loaded | 
| +  // into d7 and d6. | 
| EmitSmiNonsmiComparison(masm, lhs, rhs, &lhs_not_nan, &slow, strict()); | 
|  | 
| __ bind(&both_loaded_as_doubles); | 
| -  // The arguments have been converted to doubles and stored in d6 and d7, if | 
| -  // VFP3 is supported, or in r0, r1, r2, and r3. | 
| +  // The arguments have been converted to doubles and stored in d6 and d7 | 
| __ bind(&lhs_not_nan); | 
| Label no_nan; | 
| -  // ARMv7 VFP3 instructions to implement double precision comparison. | 
| -  __ VFPCompareAndSetFlags(d7, d6); | 
| -  Label nan; | 
| -  __ b(vs, &nan); | 
| -  __ mov(r0, Operand(EQUAL), LeaveCC, eq); | 
| -  __ mov(r0, Operand(LESS), LeaveCC, lt); | 
| -  __ mov(r0, Operand(GREATER), LeaveCC, gt); | 
| +  __ fcmpu(d7, d6); | 
| + | 
| +  Label nan, equal, less_than; | 
| +  __ bunordered(&nan); | 
| +  __ beq(&equal); | 
| +  __ blt(&less_than); | 
| +  __ li(r3, Operand(GREATER)); | 
| +  __ Ret(); | 
| +  __ bind(&equal); | 
| +  __ li(r3, Operand(EQUAL)); | 
| +  __ Ret(); | 
| +  __ bind(&less_than); | 
| +  __ li(r3, Operand(LESS)); | 
| __ Ret(); | 
|  | 
| __ bind(&nan); | 
| -  // If one of the sides was a NaN then the v flag is set.  Load r0 with | 
| +  // If one of the sides was a NaN then the v flag is set.  Load r3 with | 
| // whatever it takes to make the comparison fail, since comparisons with NaN | 
| // always fail. | 
| if (cc == lt || cc == le) { | 
| -    __ mov(r0, Operand(GREATER)); | 
| +    __ li(r3, Operand(GREATER)); | 
| } else { | 
| -    __ mov(r0, Operand(LESS)); | 
| +    __ li(r3, Operand(LESS)); | 
| } | 
| __ Ret(); | 
|  | 
| @@ -1026,10 +1044,10 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { | 
| Label check_for_internalized_strings; | 
| Label flat_string_check; | 
| // Check for heap-number-heap-number comparison.  Can jump to slow case, | 
| -  // or load both doubles into r0, r1, r2, r3 and jump to the code that handles | 
| +  // or load both doubles into r3, r4, r5, r6 and jump to the code that handles | 
| // that case.  If the inputs are not doubles then jumps to | 
| // check_for_internalized_strings. | 
| -  // In this case r2 will contain the type of rhs_.  Never falls through. | 
| +  // In this case r5 will contain the type of rhs_.  Never falls through. | 
| EmitCheckForTwoHeapNumbers(masm, | 
| lhs, | 
| rhs, | 
| @@ -1043,7 +1061,7 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { | 
| if (cc == eq && !strict()) { | 
| // Returns an answer for two internalized strings or two detectable objects. | 
| // Otherwise jumps to string case or not both strings case. | 
| -    // Assumes that r2 is the type of rhs_ on entry. | 
| +    // Assumes that r5 is the type of rhs_ on entry. | 
| EmitCheckForInternalizedStringsOrObjects( | 
| masm, lhs, rhs, &flat_string_check, &slow); | 
| } | 
| @@ -1052,25 +1070,23 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { | 
| // case. | 
| __ bind(&flat_string_check); | 
|  | 
| -  __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r2, r3, &slow); | 
| +  __ JumpIfNonSmisNotBothSequentialAsciiStrings(lhs, rhs, r5, r6, &slow); | 
|  | 
| -  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r2, | 
| -                      r3); | 
| +  __ IncrementCounter(isolate()->counters()->string_compare_native(), 1, r5, | 
| +                      r6); | 
| if (cc == eq) { | 
| StringCompareStub::GenerateFlatAsciiStringEquals(masm, | 
| lhs, | 
| rhs, | 
| -                                                     r2, | 
| -                                                     r3, | 
| -                                                     r4); | 
| +                                                     r5, | 
| +                                                     r6); | 
| } else { | 
| StringCompareStub::GenerateCompareFlatAsciiStrings(masm, | 
| lhs, | 
| rhs, | 
| -                                                       r2, | 
| -                                                       r3, | 
| -                                                       r4, | 
| -                                                       r5); | 
| +                                                       r5, | 
| +                                                       r6, | 
| +                                                       r7); | 
| } | 
| // Never falls through to here. | 
|  | 
| @@ -1090,8 +1106,8 @@ void ICCompareStub::GenerateGeneric(MacroAssembler* masm) { | 
| DCHECK(cc == gt || cc == ge);  // remaining cases | 
| ncr = LESS; | 
| } | 
| -    __ mov(r0, Operand(Smi::FromInt(ncr))); | 
| -    __ push(r0); | 
| +    __ LoadSmiLiteral(r3, Smi::FromInt(ncr)); | 
| +    __ push(r3); | 
| } | 
|  | 
| // Call the native; it returns -1 (less), 0 (equal), or 1 (greater) | 
| @@ -1107,41 +1123,41 @@ void StoreBufferOverflowStub::Generate(MacroAssembler* masm) { | 
| // We don't allow a GC during a store buffer overflow so there is no need to | 
| // store the registers in any particular way, but we do have to store and | 
| // restore them. | 
| -  __ stm(db_w, sp, kCallerSaved | lr.bit()); | 
| - | 
| -  const Register scratch = r1; | 
| - | 
| +  __ mflr(r0); | 
| +  __ MultiPush(kJSCallerSaved | r0.bit()); | 
| if (save_doubles_ == kSaveFPRegs) { | 
| -    __ SaveFPRegs(sp, scratch); | 
| +    __ SaveFPRegs(sp, 0, DoubleRegister::kNumVolatileRegisters); | 
| } | 
| const int argument_count = 1; | 
| const int fp_argument_count = 0; | 
| +  const Register scratch = r4; | 
|  | 
| AllowExternalCallThatCantCauseGC scope(masm); | 
| __ PrepareCallCFunction(argument_count, fp_argument_count, scratch); | 
| -  __ mov(r0, Operand(ExternalReference::isolate_address(isolate()))); | 
| +  __ mov(r3, Operand(ExternalReference::isolate_address(isolate()))); | 
| __ CallCFunction( | 
| ExternalReference::store_buffer_overflow_function(isolate()), | 
| argument_count); | 
| if (save_doubles_ == kSaveFPRegs) { | 
| -    __ RestoreFPRegs(sp, scratch); | 
| +    __ RestoreFPRegs(sp, 0, DoubleRegister::kNumVolatileRegisters); | 
| } | 
| -  __ ldm(ia_w, sp, kCallerSaved | pc.bit());  // Also pop pc to get Ret(0). | 
| +  __ MultiPop(kJSCallerSaved | r0.bit()); | 
| +  __ mtlr(r0); | 
| +  __ Ret(); | 
| } | 
|  | 
|  | 
| void MathPowStub::Generate(MacroAssembler* masm) { | 
| -  const Register base = r1; | 
| -  const Register exponent = r2; | 
| -  const Register heapnumbermap = r5; | 
| -  const Register heapnumber = r0; | 
| -  const DwVfpRegister double_base = d0; | 
| -  const DwVfpRegister double_exponent = d1; | 
| -  const DwVfpRegister double_result = d2; | 
| -  const DwVfpRegister double_scratch = d3; | 
| -  const SwVfpRegister single_scratch = s6; | 
| -  const Register scratch = r9; | 
| -  const Register scratch2 = r4; | 
| +  const Register base = r4; | 
| +  const Register exponent = r5; | 
| +  const Register heapnumbermap = r8; | 
| +  const Register heapnumber = r3; | 
| +  const DoubleRegister double_base = d1; | 
| +  const DoubleRegister double_exponent = d2; | 
| +  const DoubleRegister double_result = d3; | 
| +  const DoubleRegister double_scratch = d0; | 
| +  const Register scratch = r11; | 
| +  const Register scratch2 = r10; | 
|  | 
| Label call_runtime, done, int_exponent; | 
| if (exponent_type_ == ON_STACK) { | 
| @@ -1149,93 +1165,93 @@ void MathPowStub::Generate(MacroAssembler* masm) { | 
| // The exponent and base are supplied as arguments on the stack. | 
| // This can only happen if the stub is called from non-optimized code. | 
| // Load input parameters from stack to double registers. | 
| -    __ ldr(base, MemOperand(sp, 1 * kPointerSize)); | 
| -    __ ldr(exponent, MemOperand(sp, 0 * kPointerSize)); | 
| +    __ LoadP(base, MemOperand(sp, 1 * kPointerSize)); | 
| +    __ LoadP(exponent, MemOperand(sp, 0 * kPointerSize)); | 
|  | 
| __ LoadRoot(heapnumbermap, Heap::kHeapNumberMapRootIndex); | 
|  | 
| __ UntagAndJumpIfSmi(scratch, base, &base_is_smi); | 
| -    __ ldr(scratch, FieldMemOperand(base, JSObject::kMapOffset)); | 
| +    __ LoadP(scratch, FieldMemOperand(base, JSObject::kMapOffset)); | 
| __ cmp(scratch, heapnumbermap); | 
| -    __ b(ne, &call_runtime); | 
| +    __ bne(&call_runtime); | 
|  | 
| -    __ vldr(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); | 
| -    __ jmp(&unpack_exponent); | 
| +    __ lfd(double_base, FieldMemOperand(base, HeapNumber::kValueOffset)); | 
| +    __ b(&unpack_exponent); | 
|  | 
| __ bind(&base_is_smi); | 
| -    __ vmov(single_scratch, scratch); | 
| -    __ vcvt_f64_s32(double_base, single_scratch); | 
| +    __ ConvertIntToDouble(scratch, double_base); | 
| __ bind(&unpack_exponent); | 
|  | 
| __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); | 
| - | 
| -    __ ldr(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); | 
| +    __ LoadP(scratch, FieldMemOperand(exponent, JSObject::kMapOffset)); | 
| __ cmp(scratch, heapnumbermap); | 
| -    __ b(ne, &call_runtime); | 
| -    __ vldr(double_exponent, | 
| -            FieldMemOperand(exponent, HeapNumber::kValueOffset)); | 
| +    __ bne(&call_runtime); | 
| + | 
| +    __ lfd(double_exponent, | 
| +           FieldMemOperand(exponent, HeapNumber::kValueOffset)); | 
| } else if (exponent_type_ == TAGGED) { | 
| // Base is already in double_base. | 
| __ UntagAndJumpIfSmi(scratch, exponent, &int_exponent); | 
|  | 
| -    __ vldr(double_exponent, | 
| -            FieldMemOperand(exponent, HeapNumber::kValueOffset)); | 
| +    __ lfd(double_exponent, | 
| +           FieldMemOperand(exponent, HeapNumber::kValueOffset)); | 
| } | 
|  | 
| if (exponent_type_ != INTEGER) { | 
| -    Label int_exponent_convert; | 
| // Detect integer exponents stored as double. | 
| -    __ vcvt_u32_f64(single_scratch, double_exponent); | 
| -    // We do not check for NaN or Infinity here because comparing numbers on | 
| -    // ARM correctly distinguishes NaNs.  We end up calling the built-in. | 
| -    __ vcvt_f64_u32(double_scratch, single_scratch); | 
| -    __ VFPCompareAndSetFlags(double_scratch, double_exponent); | 
| -    __ b(eq, &int_exponent_convert); | 
| +    __ TryDoubleToInt32Exact(scratch, double_exponent, | 
| +                             scratch2, double_scratch); | 
| +    __ beq(&int_exponent); | 
|  | 
| if (exponent_type_ == ON_STACK) { | 
| // Detect square root case.  Crankshaft detects constant +/-0.5 at | 
| // compile time and uses DoMathPowHalf instead.  We then skip this check | 
| // for non-constant cases of +/-0.5 as these hardly occur. | 
| -      Label not_plus_half; | 
| +      Label not_plus_half, not_minus_inf1, not_minus_inf2; | 
|  | 
| // Test for 0.5. | 
| -      __ vmov(double_scratch, 0.5, scratch); | 
| -      __ VFPCompareAndSetFlags(double_exponent, double_scratch); | 
| -      __ b(ne, ¬_plus_half); | 
| +      __ LoadDoubleLiteral(double_scratch, 0.5, scratch); | 
| +      __ fcmpu(double_exponent, double_scratch); | 
| +      __ bne(¬_plus_half); | 
|  | 
| // Calculates square root of base.  Check for the special case of | 
| // Math.pow(-Infinity, 0.5) == Infinity (ECMA spec, 15.8.2.13). | 
| -      __ vmov(double_scratch, -V8_INFINITY, scratch); | 
| -      __ VFPCompareAndSetFlags(double_base, double_scratch); | 
| -      __ vneg(double_result, double_scratch, eq); | 
| -      __ b(eq, &done); | 
| +      __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch); | 
| +      __ fcmpu(double_base, double_scratch); | 
| +      __ bne(¬_minus_inf1); | 
| +      __ fneg(double_result, double_scratch); | 
| +      __ b(&done); | 
| +      __ bind(¬_minus_inf1); | 
|  | 
| // Add +0 to convert -0 to +0. | 
| -      __ vadd(double_scratch, double_base, kDoubleRegZero); | 
| -      __ vsqrt(double_result, double_scratch); | 
| -      __ jmp(&done); | 
| +      __ fadd(double_scratch, double_base, kDoubleRegZero); | 
| +      __ fsqrt(double_result, double_scratch); | 
| +      __ b(&done); | 
|  | 
| __ bind(¬_plus_half); | 
| -      __ vmov(double_scratch, -0.5, scratch); | 
| -      __ VFPCompareAndSetFlags(double_exponent, double_scratch); | 
| -      __ b(ne, &call_runtime); | 
| +      __ LoadDoubleLiteral(double_scratch, -0.5, scratch); | 
| +      __ fcmpu(double_exponent, double_scratch); | 
| +      __ bne(&call_runtime); | 
|  | 
| // Calculates square root of base.  Check for the special case of | 
| // Math.pow(-Infinity, -0.5) == 0 (ECMA spec, 15.8.2.13). | 
| -      __ vmov(double_scratch, -V8_INFINITY, scratch); | 
| -      __ VFPCompareAndSetFlags(double_base, double_scratch); | 
| -      __ vmov(double_result, kDoubleRegZero, eq); | 
| -      __ b(eq, &done); | 
| +      __ LoadDoubleLiteral(double_scratch, -V8_INFINITY, scratch); | 
| +      __ fcmpu(double_base, double_scratch); | 
| +      __ bne(¬_minus_inf2); | 
| +      __ fmr(double_result, kDoubleRegZero); | 
| +      __ b(&done); | 
| +      __ bind(¬_minus_inf2); | 
|  | 
| // Add +0 to convert -0 to +0. | 
| -      __ vadd(double_scratch, double_base, kDoubleRegZero); | 
| -      __ vmov(double_result, 1.0, scratch); | 
| -      __ vsqrt(double_scratch, double_scratch); | 
| -      __ vdiv(double_result, double_result, double_scratch); | 
| -      __ jmp(&done); | 
| +      __ fadd(double_scratch, double_base, kDoubleRegZero); | 
| +      __ LoadDoubleLiteral(double_result, 1.0, scratch); | 
| +      __ fsqrt(double_scratch, double_scratch); | 
| +      __ fdiv(double_result, double_result, double_scratch); | 
| +      __ b(&done); | 
| } | 
|  | 
| -    __ push(lr); | 
| +    __ mflr(r0); | 
| +    __ push(r0); | 
| { | 
| AllowExternalCallThatCantCauseGC scope(masm); | 
| __ PrepareCallCFunction(0, 2, scratch); | 
| @@ -1244,13 +1260,10 @@ void MathPowStub::Generate(MacroAssembler* masm) { | 
| ExternalReference::power_double_double_function(isolate()), | 
| 0, 2); | 
| } | 
| -    __ pop(lr); | 
| +    __ pop(r0); | 
| +    __ mtlr(r0); | 
| __ MovFromFloatResult(double_result); | 
| -    __ jmp(&done); | 
| - | 
| -    __ bind(&int_exponent_convert); | 
| -    __ vcvt_u32_f64(single_scratch, double_exponent); | 
| -    __ vmov(scratch, single_scratch); | 
| +    __ b(&done); | 
| } | 
|  | 
| // Calculate power with integer exponent. | 
| @@ -1258,38 +1271,47 @@ void MathPowStub::Generate(MacroAssembler* masm) { | 
|  | 
| // Get two copies of exponent in the registers scratch and exponent. | 
| if (exponent_type_ == INTEGER) { | 
| -    __ mov(scratch, exponent); | 
| +    __ mr(scratch, exponent); | 
| } else { | 
| // Exponent has previously been stored into scratch as untagged integer. | 
| -    __ mov(exponent, scratch); | 
| +    __ mr(exponent, scratch); | 
| } | 
| -  __ vmov(double_scratch, double_base);  // Back up base. | 
| -  __ vmov(double_result, 1.0, scratch2); | 
| +  __ fmr(double_scratch, double_base);  // Back up base. | 
| +  __ li(scratch2, Operand(1)); | 
| +  __ ConvertIntToDouble(scratch2, double_result); | 
|  | 
| // Get absolute value of exponent. | 
| -  __ cmp(scratch, Operand::Zero()); | 
| -  __ mov(scratch2, Operand::Zero(), LeaveCC, mi); | 
| -  __ sub(scratch, scratch2, scratch, LeaveCC, mi); | 
| +  Label positive_exponent; | 
| +  __ cmpi(scratch, Operand::Zero()); | 
| +  __ bge(&positive_exponent); | 
| +  __ neg(scratch, scratch); | 
| +  __ bind(&positive_exponent); | 
|  | 
| -  Label while_true; | 
| +  Label while_true, no_carry, loop_end; | 
| __ bind(&while_true); | 
| -  __ mov(scratch, Operand(scratch, ASR, 1), SetCC); | 
| -  __ vmul(double_result, double_result, double_scratch, cs); | 
| -  __ vmul(double_scratch, double_scratch, double_scratch, ne); | 
| -  __ b(ne, &while_true); | 
| - | 
| -  __ cmp(exponent, Operand::Zero()); | 
| -  __ b(ge, &done); | 
| -  __ vmov(double_scratch, 1.0, scratch); | 
| -  __ vdiv(double_result, double_scratch, double_result); | 
| +  __ andi(scratch2, scratch, Operand(1)); | 
| +  __ beq(&no_carry, cr0); | 
| +  __ fmul(double_result, double_result, double_scratch); | 
| +  __ bind(&no_carry); | 
| +  __ ShiftRightArithImm(scratch, scratch, 1, SetRC); | 
| +  __ beq(&loop_end, cr0); | 
| +  __ fmul(double_scratch, double_scratch, double_scratch); | 
| +  __ b(&while_true); | 
| +  __ bind(&loop_end); | 
| + | 
| +  __ cmpi(exponent, Operand::Zero()); | 
| +  __ bge(&done); | 
| + | 
| +  __ li(scratch2, Operand(1)); | 
| +  __ ConvertIntToDouble(scratch2, double_scratch); | 
| +  __ fdiv(double_result, double_scratch, double_result); | 
| // Test whether result is zero.  Bail out to check for subnormal result. | 
| // Due to subnormals, x^-y == (1/x)^y does not hold in all cases. | 
| -  __ VFPCompareAndSetFlags(double_result, 0.0); | 
| -  __ b(ne, &done); | 
| +  __ fcmpu(double_result, kDoubleRegZero); | 
| +  __ bne(&done); | 
| // double_exponent may not containe the exponent value if the input was a | 
| // smi.  We set it with exponent value before bailing out. | 
| -  __ vmov(single_scratch, exponent); | 
| -  __ vcvt_f64_s32(double_exponent, single_scratch); | 
| +  __ ConvertIntToDouble(exponent, double_exponent); | 
|  | 
| // Returning or bailing out. | 
| Counters* counters = isolate()->counters(); | 
| @@ -1303,13 +1325,14 @@ void MathPowStub::Generate(MacroAssembler* masm) { | 
| __ bind(&done); | 
| __ AllocateHeapNumber( | 
| heapnumber, scratch, scratch2, heapnumbermap, &call_runtime); | 
| -    __ vstr(double_result, | 
| +    __ stfd(double_result, | 
| FieldMemOperand(heapnumber, HeapNumber::kValueOffset)); | 
| -    DCHECK(heapnumber.is(r0)); | 
| +    DCHECK(heapnumber.is(r3)); | 
| __ IncrementCounter(counters->math_pow(), 1, scratch, scratch2); | 
| __ Ret(2); | 
| } else { | 
| -    __ push(lr); | 
| +    __ mflr(r0); | 
| +    __ push(r0); | 
| { | 
| AllowExternalCallThatCantCauseGC scope(masm); | 
| __ PrepareCallCFunction(0, 2, scratch); | 
| @@ -1318,7 +1341,8 @@ void MathPowStub::Generate(MacroAssembler* masm) { | 
| ExternalReference::power_double_double_function(isolate()), | 
| 0, 2); | 
| } | 
| -    __ pop(lr); | 
| +    __ pop(r0); | 
| +    __ mtlr(r0); | 
| __ MovFromFloatResult(double_result); | 
|  | 
| __ bind(&done); | 
| @@ -1335,7 +1359,7 @@ bool CEntryStub::NeedsImmovableCode() { | 
|  | 
| void CodeStub::GenerateStubsAheadOfTime(Isolate* isolate) { | 
| CEntryStub::GenerateAheadOfTime(isolate); | 
| -  WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); | 
| +//  WriteInt32ToHeapNumberStub::GenerateFixedRegStubsAheadOfTime(isolate); | 
| StoreBufferOverflowStub::GenerateFixedRegStubsAheadOfTime(isolate); | 
| StubFailureTrampolineStub::GenerateAheadOfTime(isolate); | 
| ArrayConstructorStubBase::GenerateStubsAheadOfTime(isolate); | 
| @@ -1372,52 +1396,74 @@ void CEntryStub::GenerateAheadOfTime(Isolate* isolate) { | 
|  | 
| void CEntryStub::Generate(MacroAssembler* masm) { | 
| // Called from JavaScript; parameters are on stack as if calling JS function. | 
| -  // r0: number of arguments including receiver | 
| -  // r1: pointer to builtin function | 
| +  // r3: number of arguments including receiver | 
| +  // r4: pointer to builtin function | 
| // fp: frame pointer  (restored after C call) | 
| // sp: stack pointer  (restored as callee's sp after C call) | 
| // cp: current context  (C callee-saved) | 
|  | 
| ProfileEntryHookStub::MaybeCallEntryHook(masm); | 
|  | 
| -  __ mov(r5, Operand(r1)); | 
| +  __ mr(r15, r4); | 
|  | 
| -  // Compute the argv pointer in a callee-saved register. | 
| -  __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2)); | 
| -  __ sub(r1, r1, Operand(kPointerSize)); | 
| +  // Compute the argv pointer. | 
| +  __ ShiftLeftImm(r4, r3, Operand(kPointerSizeLog2)); | 
| +  __ add(r4, r4, sp); | 
| +  __ subi(r4, r4, Operand(kPointerSize)); | 
|  | 
| // Enter the exit frame that transitions from JavaScript to C++. | 
| FrameScope scope(masm, StackFrame::MANUAL); | 
| -  __ EnterExitFrame(save_doubles_); | 
|  | 
| -  // Store a copy of argc in callee-saved registers for later. | 
| -  __ mov(r4, Operand(r0)); | 
| +  // Need at least one extra slot for return address location. | 
| +  int arg_stack_space = 1; | 
|  | 
| -  // r0, r4: number of arguments including receiver  (C callee-saved) | 
| -  // r1: pointer to the first argument (C callee-saved) | 
| -  // r5: pointer to builtin function  (C callee-saved) | 
| +  // PPC LINUX ABI: | 
| +#if V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS | 
| +  // Pass buffer for return value on stack if necessary | 
| +  if (result_size_ > 1) { | 
| +    DCHECK_EQ(2, result_size_); | 
| +    arg_stack_space += 2; | 
| +  } | 
| +#endif | 
|  | 
| -  // Result returned in r0 or r0+r1 by default. | 
| +  __ EnterExitFrame(save_doubles_, arg_stack_space); | 
|  | 
| -#if V8_HOST_ARCH_ARM | 
| -  int frame_alignment = MacroAssembler::ActivationFrameAlignment(); | 
| -  int frame_alignment_mask = frame_alignment - 1; | 
| -  if (FLAG_debug_code) { | 
| -    if (frame_alignment > kPointerSize) { | 
| -      Label alignment_as_expected; | 
| -      DCHECK(IsPowerOf2(frame_alignment)); | 
| -      __ tst(sp, Operand(frame_alignment_mask)); | 
| -      __ b(eq, &alignment_as_expected); | 
| -      // Don't use Check here, as it will call Runtime_Abort re-entering here. | 
| -      __ stop("Unexpected alignment"); | 
| -      __ bind(&alignment_as_expected); | 
| -    } | 
| +  // Store a copy of argc in callee-saved registers for later. | 
| +  __ mr(r14, r3); | 
| + | 
| +  // r3, r14: number of arguments including receiver  (C callee-saved) | 
| +  // r4: pointer to the first argument | 
| +  // r15: pointer to builtin function  (C callee-saved) | 
| + | 
| +  // Result returned in registers or stack, depending on result size and ABI. | 
| + | 
| +  Register isolate_reg = r5; | 
| +#if V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS | 
| +  if (result_size_ > 1) { | 
| +    // The return value is 16-byte non-scalar value. | 
| +    // Use frame storage reserved by calling function to pass return | 
| +    // buffer as implicit first argument. | 
| +    __ mr(r5, r4); | 
| +    __ mr(r4, r3); | 
| +    __ addi(r3, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize)); | 
| +    isolate_reg = r6; | 
| } | 
| #endif | 
|  | 
| // Call C built-in. | 
| -  // r0 = argc, r1 = argv | 
| -  __ mov(r2, Operand(ExternalReference::isolate_address(isolate()))); | 
| +  __ mov(isolate_reg, Operand(ExternalReference::isolate_address(isolate()))); | 
| + | 
| +#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR) | 
| +  // Native AIX/PPC64 Linux use a function descriptor. | 
| +  __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(r15, kPointerSize)); | 
| +  __ LoadP(ip, MemOperand(r15, 0));  // Instruction address | 
| +  Register target = ip; | 
| +#elif ABI_TOC_ADDRESSABILITY_VIA_IP | 
| +  __ Move(ip, r15); | 
| +  Register target = ip; | 
| +#else | 
| +  Register target = r15; | 
| +#endif | 
|  | 
| // To let the GC traverse the return address of the exit frames, we need to | 
| // know where the return address is. The CEntryStub is unmovable, so | 
| @@ -1426,30 +1472,43 @@ void CEntryStub::Generate(MacroAssembler* masm) { | 
| // Compute the return address in lr to return to after the jump below. Pc is | 
| // already at '+ 8' from the current instruction but return is after three | 
| // instructions so add another 4 to pc to get the return address. | 
| -  { | 
| -    // Prevent literal pool emission before return address. | 
| -    Assembler::BlockConstPoolScope block_const_pool(masm); | 
| -    __ add(lr, pc, Operand(4)); | 
| -    __ str(lr, MemOperand(sp, 0)); | 
| -    __ Call(r5); | 
| +  { Assembler::BlockTrampolinePoolScope block_trampoline_pool(masm); | 
| +    Label here; | 
| +    __ b(&here, SetLK); | 
| +    __ bind(&here); | 
| +    __ mflr(r8); | 
| + | 
| +// Constant used below is dependent on size of Call() macro instructions | 
| +    __ addi(r0, r8, Operand(20)); | 
| + | 
| +    __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize)); | 
| +    __ Call(target); | 
| } | 
|  | 
| -  __ VFPEnsureFPSCRState(r2); | 
| +  // roohack - do we need to (re)set FPU state? | 
| + | 
| +#if V8_TARGET_ARCH_PPC64 && !ABI_RETURNS_OBJECT_PAIRS_IN_REGS | 
| +  // If return value is on the stack, pop it to registers. | 
| +  if (result_size_ > 1) { | 
| +    __ LoadP(r4, MemOperand(r3, kPointerSize)); | 
| +    __ LoadP(r3, MemOperand(r3)); | 
| +  } | 
| +#endif | 
|  | 
| // Runtime functions should not return 'the hole'.  Allowing it to escape may | 
| // lead to crashes in the IC code later. | 
| if (FLAG_debug_code) { | 
| Label okay; | 
| -    __ CompareRoot(r0, Heap::kTheHoleValueRootIndex); | 
| -    __ b(ne, &okay); | 
| +    __ CompareRoot(r3, Heap::kTheHoleValueRootIndex); | 
| +    __ bne(&okay); | 
| __ stop("The hole escaped"); | 
| __ bind(&okay); | 
| } | 
|  | 
| // Check result for exception sentinel. | 
| Label exception_returned; | 
| -  __ CompareRoot(r0, Heap::kExceptionRootIndex); | 
| -  __ b(eq, &exception_returned); | 
| +  __ CompareRoot(r3, Heap::kExceptionRootIndex); | 
| +  __ beq(&exception_returned); | 
|  | 
| ExternalReference pending_exception_address( | 
| Isolate::kPendingExceptionAddress, isolate()); | 
| @@ -1458,168 +1517,162 @@ void CEntryStub::Generate(MacroAssembler* masm) { | 
| // should have returned the exception sentinel. | 
| if (FLAG_debug_code) { | 
| Label okay; | 
| -    __ mov(r2, Operand(pending_exception_address)); | 
| -    __ ldr(r2, MemOperand(r2)); | 
| -    __ CompareRoot(r2, Heap::kTheHoleValueRootIndex); | 
| +    __ mov(r5, Operand(pending_exception_address)); | 
| +    __ LoadP(r5, MemOperand(r5)); | 
| +    __ CompareRoot(r5, Heap::kTheHoleValueRootIndex); | 
| // Cannot use check here as it attempts to generate call into runtime. | 
| -    __ b(eq, &okay); | 
| +    __ beq(&okay); | 
| __ stop("Unexpected pending exception"); | 
| __ bind(&okay); | 
| } | 
|  | 
| // Exit C frame and return. | 
| -  // r0:r1: result | 
| +  // r3:r4: result | 
| // sp: stack pointer | 
| // fp: frame pointer | 
| -  // Callee-saved register r4 still holds argc. | 
| -  __ LeaveExitFrame(save_doubles_, r4, true); | 
| -  __ mov(pc, lr); | 
| +  // r14: still holds argc (callee-saved). | 
| +  __ LeaveExitFrame(save_doubles_, r14, true); | 
| +  __ blr(); | 
|  | 
| // Handling of exception. | 
| __ bind(&exception_returned); | 
|  | 
| // Retrieve the pending exception. | 
| -  __ mov(r2, Operand(pending_exception_address)); | 
| -  __ ldr(r0, MemOperand(r2)); | 
| +  __ mov(r5, Operand(pending_exception_address)); | 
| +  __ LoadP(r3, MemOperand(r5)); | 
|  | 
| // Clear the pending exception. | 
| -  __ LoadRoot(r3, Heap::kTheHoleValueRootIndex); | 
| -  __ str(r3, MemOperand(r2)); | 
| +  __ LoadRoot(r6, Heap::kTheHoleValueRootIndex); | 
| +  __ StoreP(r6, MemOperand(r5)); | 
|  | 
| // Special handling of termination exceptions which are uncatchable | 
| // by javascript code. | 
| Label throw_termination_exception; | 
| -  __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex); | 
| -  __ b(eq, &throw_termination_exception); | 
| +  __ CompareRoot(r3, Heap::kTerminationExceptionRootIndex); | 
| +  __ beq(&throw_termination_exception); | 
|  | 
| // Handle normal exception. | 
| -  __ Throw(r0); | 
| +  __ Throw(r3); | 
|  | 
| __ bind(&throw_termination_exception); | 
| -  __ ThrowUncatchable(r0); | 
| +  __ ThrowUncatchable(r3); | 
| } | 
|  | 
|  | 
| void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { | 
| -  // r0: code entry | 
| -  // r1: function | 
| -  // r2: receiver | 
| -  // r3: argc | 
| +  // r3: code entry | 
| +  // r4: function | 
| +  // r5: receiver | 
| +  // r6: argc | 
| // [sp+0]: argv | 
|  | 
| Label invoke, handler_entry, exit; | 
|  | 
| +  // Called from C | 
| +#if ABI_USES_FUNCTION_DESCRIPTORS | 
| +  __ function_descriptor(); | 
| +#endif | 
| + | 
| ProfileEntryHookStub::MaybeCallEntryHook(masm); | 
|  | 
| -  // Called from C, so do not pop argc and args on exit (preserve sp) | 
| -  // No need to save register-passed args | 
| -  // Save callee-saved registers (incl. cp and fp), sp, and lr | 
| -  __ stm(db_w, sp, kCalleeSaved | lr.bit()); | 
| +  // PPC LINUX ABI: | 
| +  // preserve LR in pre-reserved slot in caller's frame | 
| +  __ mflr(r0); | 
| +  __ StoreP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize)); | 
|  | 
| -  // Save callee-saved vfp registers. | 
| -  __ vstm(db_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); | 
| -  // Set up the reserved register for 0.0. | 
| -  __ vmov(kDoubleRegZero, 0.0); | 
| -  __ VFPEnsureFPSCRState(r4); | 
| +  // Save callee saved registers on the stack. | 
| +  __ MultiPush(kCalleeSaved); | 
|  | 
| -  // Get address of argv, see stm above. | 
| -  // r0: code entry | 
| -  // r1: function | 
| -  // r2: receiver | 
| -  // r3: argc | 
| +  // Floating point regs FPR0 - FRP13 are volatile | 
| +  // FPR14-FPR31 are non-volatile, but sub-calls will save them for us | 
|  | 
| -  // Set up argv in r4. | 
| -  int offset_to_argv = (kNumCalleeSaved + 1) * kPointerSize; | 
| -  offset_to_argv += kNumDoubleCalleeSaved * kDoubleSize; | 
| -  __ ldr(r4, MemOperand(sp, offset_to_argv)); | 
| +//  int offset_to_argv = kPointerSize * 22; // matches (22*4) above | 
| +//  __ lwz(r7, MemOperand(sp, offset_to_argv)); | 
|  | 
| // Push a frame with special values setup to mark it as an entry frame. | 
| -  // r0: code entry | 
| -  // r1: function | 
| -  // r2: receiver | 
| -  // r3: argc | 
| -  // r4: argv | 
| +  // r3: code entry | 
| +  // r4: function | 
| +  // r5: receiver | 
| +  // r6: argc | 
| +  // r7: argv | 
| +  __ li(r0, Operand(-1));  // Push a bad frame pointer to fail if it is used. | 
| +  __ push(r0); | 
| +#if V8_OOL_CONSTANT_POOL | 
| +  __ mov(kConstantPoolRegister, | 
| +         Operand(isolate()->factory()->empty_constant_pool_array())); | 
| +  __ push(kConstantPoolRegister); | 
| +#endif | 
| int marker = is_construct ? StackFrame::ENTRY_CONSTRUCT : StackFrame::ENTRY; | 
| -  if (FLAG_enable_ool_constant_pool) { | 
| -    __ mov(r8, Operand(isolate()->factory()->empty_constant_pool_array())); | 
| -  } | 
| -  __ mov(r7, Operand(Smi::FromInt(marker))); | 
| -  __ mov(r6, Operand(Smi::FromInt(marker))); | 
| -  __ mov(r5, | 
| -         Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | 
| -  __ ldr(r5, MemOperand(r5)); | 
| -  __ mov(ip, Operand(-1));  // Push a bad frame pointer to fail if it is used. | 
| -  __ stm(db_w, sp, r5.bit() | r6.bit() | r7.bit() | | 
| -                   (FLAG_enable_ool_constant_pool ? r8.bit() : 0) | | 
| -                   ip.bit()); | 
| +  __ LoadSmiLiteral(r0, Smi::FromInt(marker)); | 
| +  __ push(r0); | 
| +  __ push(r0); | 
| +  // Save copies of the top frame descriptor on the stack. | 
| +  __ mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | 
| +  __ LoadP(r0, MemOperand(r8)); | 
| +  __ push(r0); | 
|  | 
| // Set up frame pointer for the frame to be pushed. | 
| -  __ add(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); | 
| +  __ addi(fp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); | 
|  | 
| // If this is the outermost JS call, set js_entry_sp value. | 
| Label non_outermost_js; | 
| ExternalReference js_entry_sp(Isolate::kJSEntrySPAddress, isolate()); | 
| -  __ mov(r5, Operand(ExternalReference(js_entry_sp))); | 
| -  __ ldr(r6, MemOperand(r5)); | 
| -  __ cmp(r6, Operand::Zero()); | 
| -  __ b(ne, &non_outermost_js); | 
| -  __ str(fp, MemOperand(r5)); | 
| -  __ mov(ip, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); | 
| +  __ mov(r8, Operand(ExternalReference(js_entry_sp))); | 
| +  __ LoadP(r9, MemOperand(r8)); | 
| +  __ cmpi(r9, Operand::Zero()); | 
| +  __ bne(&non_outermost_js); | 
| +  __ StoreP(fp, MemOperand(r8)); | 
| +  __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME)); | 
| Label cont; | 
| __ b(&cont); | 
| __ bind(&non_outermost_js); | 
| -  __ mov(ip, Operand(Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME))); | 
| +  __ LoadSmiLiteral(ip, Smi::FromInt(StackFrame::INNER_JSENTRY_FRAME)); | 
| __ bind(&cont); | 
| -  __ push(ip); | 
| +  __ push(ip);  // frame-type | 
|  | 
| // Jump to a faked try block that does the invoke, with a faked catch | 
| // block that sets the pending exception. | 
| -  __ jmp(&invoke); | 
| +  __ b(&invoke); | 
| + | 
| +  __ bind(&handler_entry); | 
| +  handler_offset_ = handler_entry.pos(); | 
| +  // Caught exception: Store result (exception) in the pending exception | 
| +  // field in the JSEnv and return a failure sentinel.  Coming in here the | 
| +  // fp will be invalid because the PushTryHandler below sets it to 0 to | 
| +  // signal the existence of the JSEntry frame. | 
| +  __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 
| +                                       isolate()))); | 
|  | 
| -  // Block literal pool emission whilst taking the position of the handler | 
| -  // entry. This avoids making the assumption that literal pools are always | 
| -  // emitted after an instruction is emitted, rather than before. | 
| -  { | 
| -    Assembler::BlockConstPoolScope block_const_pool(masm); | 
| -    __ bind(&handler_entry); | 
| -    handler_offset_ = handler_entry.pos(); | 
| -    // Caught exception: Store result (exception) in the pending exception | 
| -    // field in the JSEnv and return a failure sentinel.  Coming in here the | 
| -    // fp will be invalid because the PushTryHandler below sets it to 0 to | 
| -    // signal the existence of the JSEntry frame. | 
| -    __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 
| -                                         isolate()))); | 
| -  } | 
| -  __ str(r0, MemOperand(ip)); | 
| -  __ LoadRoot(r0, Heap::kExceptionRootIndex); | 
| +  __ StoreP(r3, MemOperand(ip)); | 
| +  __ LoadRoot(r3, Heap::kExceptionRootIndex); | 
| __ b(&exit); | 
|  | 
| // Invoke: Link this frame into the handler chain.  There's only one | 
| // handler block in this code object, so its index is 0. | 
| __ bind(&invoke); | 
| -  // Must preserve r0-r4, r5-r6 are available. | 
| +  // Must preserve r0-r4, r5-r7 are available. (needs update for PPC) | 
| __ PushTryHandler(StackHandler::JS_ENTRY, 0); | 
| // If an exception not caught by another handler occurs, this handler | 
| -  // returns control to the code after the bl(&invoke) above, which | 
| +  // returns control to the code after the b(&invoke) above, which | 
| // restores all kCalleeSaved registers (including cp and fp) to their | 
| // saved values before returning a failure to C. | 
|  | 
| // Clear any pending exceptions. | 
| -  __ mov(r5, Operand(isolate()->factory()->the_hole_value())); | 
| +  __ mov(r8, Operand(isolate()->factory()->the_hole_value())); | 
| __ mov(ip, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 
| isolate()))); | 
| -  __ str(r5, MemOperand(ip)); | 
| +  __ StoreP(r8, MemOperand(ip)); | 
|  | 
| // Invoke the function by calling through JS entry trampoline builtin. | 
| // Notice that we cannot store a reference to the trampoline code directly in | 
| // this stub, because runtime stubs are not traversed when doing GC. | 
|  | 
| // Expected registers by Builtins::JSEntryTrampoline | 
| -  // r0: code entry | 
| -  // r1: function | 
| -  // r2: receiver | 
| -  // r3: argc | 
| -  // r4: argv | 
| +  // r3: code entry | 
| +  // r4: function | 
| +  // r5: receiver | 
| +  // r6: argc | 
| +  // r7: argv | 
| if (is_construct) { | 
| ExternalReference construct_entry(Builtins::kJSConstructEntryTrampoline, | 
| isolate()); | 
| @@ -1628,73 +1681,89 @@ void JSEntryStub::GenerateBody(MacroAssembler* masm, bool is_construct) { | 
| ExternalReference entry(Builtins::kJSEntryTrampoline, isolate()); | 
| __ mov(ip, Operand(entry)); | 
| } | 
| -  __ ldr(ip, MemOperand(ip));  // deref address | 
| -  __ add(ip, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
| +  __ LoadP(ip, MemOperand(ip));  // deref address | 
|  | 
| // Branch and link to JSEntryTrampoline. | 
| -  __ Call(ip); | 
| +  // the address points to the start of the code object, skip the header | 
| +  __ addi(r0, ip, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
| +  __ mtlr(r0); | 
| +  __ bclr(BA, SetLK);  // make the call | 
|  | 
| // Unlink this frame from the handler chain. | 
| __ PopTryHandler(); | 
|  | 
| -  __ bind(&exit);  // r0 holds result | 
| +  __ bind(&exit);  // r3 holds result | 
| // Check if the current stack frame is marked as the outermost JS frame. | 
| Label non_outermost_js_2; | 
| -  __ pop(r5); | 
| -  __ cmp(r5, Operand(Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME))); | 
| -  __ b(ne, &non_outermost_js_2); | 
| -  __ mov(r6, Operand::Zero()); | 
| -  __ mov(r5, Operand(ExternalReference(js_entry_sp))); | 
| -  __ str(r6, MemOperand(r5)); | 
| +  __ pop(r8); | 
| +  __ CmpSmiLiteral(r8, Smi::FromInt(StackFrame::OUTERMOST_JSENTRY_FRAME), r0); | 
| +  __ bne(&non_outermost_js_2); | 
| +  __ mov(r9, Operand::Zero()); | 
| +  __ mov(r8, Operand(ExternalReference(js_entry_sp))); | 
| +  __ StoreP(r9, MemOperand(r8)); | 
| __ bind(&non_outermost_js_2); | 
|  | 
| // Restore the top frame descriptors from the stack. | 
| -  __ pop(r3); | 
| +  __ pop(r6); | 
| __ mov(ip, | 
| Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | 
| -  __ str(r3, MemOperand(ip)); | 
| +  __ StoreP(r6, MemOperand(ip)); | 
|  | 
| // Reset the stack to the callee saved registers. | 
| -  __ add(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); | 
| +  __ addi(sp, sp, Operand(-EntryFrameConstants::kCallerFPOffset)); | 
|  | 
| // Restore callee-saved registers and return. | 
| #ifdef DEBUG | 
| if (FLAG_debug_code) { | 
| -    __ mov(lr, Operand(pc)); | 
| +    Label here; | 
| +    __ b(&here, SetLK); | 
| +    __ bind(&here); | 
| } | 
| #endif | 
|  | 
| -  // Restore callee-saved vfp registers. | 
| -  __ vldm(ia_w, sp, kFirstCalleeSavedDoubleReg, kLastCalleeSavedDoubleReg); | 
| +  __ MultiPop(kCalleeSaved); | 
|  | 
| -  __ ldm(ia_w, sp, kCalleeSaved | pc.bit()); | 
| +  __ LoadP(r0, MemOperand(sp, kStackFrameLRSlot * kPointerSize)); | 
| +  __ mtctr(r0); | 
| +  __ bctr(); | 
| } | 
|  | 
|  | 
| -// Uses registers r0 to r4. | 
| +// Uses registers r3 to r7. | 
| // Expected input (depending on whether args are in registers or on the stack): | 
| -// * object: r0 or at sp + 1 * kPointerSize. | 
| -// * function: r1 or at sp. | 
| +// * object: r3 or at sp + 1 * kPointerSize. | 
| +// * function: r4 or at sp. | 
| // | 
| // An inlined call site may have been generated before calling this stub. | 
| -// In this case the offset to the inline sites to patch are passed in r5 and r6. | 
| +// In this case the offset to the inline site to patch is passed in r8. | 
| // (See LCodeGen::DoInstanceOfKnownGlobal) | 
| void InstanceofStub::Generate(MacroAssembler* masm) { | 
| // Call site inlining and patching implies arguments in registers. | 
| DCHECK(HasArgsInRegisters() || !HasCallSiteInlineCheck()); | 
|  | 
| // Fixed register usage throughout the stub: | 
| -  const Register object = r0;  // Object (lhs). | 
| -  Register map = r3;  // Map of the object. | 
| -  const Register function = r1;  // Function (rhs). | 
| -  const Register prototype = r4;  // Prototype of the function. | 
| -  const Register scratch = r2; | 
| +  const Register object = r3;  // Object (lhs). | 
| +  Register map = r6;  // Map of the object. | 
| +  const Register function = r4;  // Function (rhs). | 
| +  const Register prototype = r7;  // Prototype of the function. | 
| +  const Register inline_site = r9; | 
| +  const Register scratch = r5; | 
| +  Register scratch3 = no_reg; | 
| + | 
| +  // delta = mov + unaligned LoadP + cmp + bne | 
| +#if V8_TARGET_ARCH_PPC64 | 
| +  const int32_t kDeltaToLoadBoolResult = | 
| +      (Assembler::kMovInstructions + 4) * Assembler::kInstrSize; | 
| +#else | 
| +  const int32_t kDeltaToLoadBoolResult = | 
| +      (Assembler::kMovInstructions + 3) * Assembler::kInstrSize; | 
| +#endif | 
|  | 
| Label slow, loop, is_instance, is_not_instance, not_js_object; | 
|  | 
| if (!HasArgsInRegisters()) { | 
| -    __ ldr(object, MemOperand(sp, 1 * kPointerSize)); | 
| -    __ ldr(function, MemOperand(sp, 0)); | 
| +    __ LoadP(object, MemOperand(sp, 1 * kPointerSize)); | 
| +    __ LoadP(function, MemOperand(sp, 0)); | 
| } | 
|  | 
| // Check that the left hand is a JS object and load map. | 
| @@ -1706,10 +1775,10 @@ void InstanceofStub::Generate(MacroAssembler* masm) { | 
| if (!HasCallSiteInlineCheck() && !ReturnTrueFalseObject()) { | 
| Label miss; | 
| __ CompareRoot(function, Heap::kInstanceofCacheFunctionRootIndex); | 
| -    __ b(ne, &miss); | 
| +    __ bne(&miss); | 
| __ CompareRoot(map, Heap::kInstanceofCacheMapRootIndex); | 
| -    __ b(ne, &miss); | 
| -    __ LoadRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); | 
| +    __ bne(&miss); | 
| +    __ LoadRoot(r3, Heap::kInstanceofCacheAnswerRootIndex); | 
| __ Ret(HasArgsInRegisters() ? 0 : 2); | 
|  | 
| __ bind(&miss); | 
| @@ -1731,81 +1800,72 @@ void InstanceofStub::Generate(MacroAssembler* masm) { | 
| DCHECK(HasArgsInRegisters()); | 
| // Patch the (relocated) inlined map check. | 
|  | 
| -    // The map_load_offset was stored in r5 | 
| +    // The offset was stored in r8 | 
| //   (See LCodeGen::DoDeferredLInstanceOfKnownGlobal). | 
| -    const Register map_load_offset = r5; | 
| -    __ sub(r9, lr, map_load_offset); | 
| -    // Get the map location in r5 and patch it. | 
| -    __ GetRelocatedValueLocation(r9, map_load_offset, scratch); | 
| -    __ ldr(map_load_offset, MemOperand(map_load_offset)); | 
| -    __ str(map, FieldMemOperand(map_load_offset, Cell::kValueOffset)); | 
| +    const Register offset = r8; | 
| +    __ mflr(inline_site); | 
| +    __ sub(inline_site, inline_site, offset); | 
| +    // Get the map location in r8 and patch it. | 
| +    __ GetRelocatedValue(inline_site, offset, scratch); | 
| +    __ StoreP(map, FieldMemOperand(offset, Cell::kValueOffset), r0); | 
| } | 
|  | 
| -  // Register mapping: r3 is object map and r4 is function prototype. | 
| -  // Get prototype of object into r2. | 
| -  __ ldr(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); | 
| +  // Register mapping: r6 is object map and r7 is function prototype. | 
| +  // Get prototype of object into r5. | 
| +  __ LoadP(scratch, FieldMemOperand(map, Map::kPrototypeOffset)); | 
|  | 
| // We don't need map any more. Use it as a scratch register. | 
| -  Register scratch2 = map; | 
| +  scratch3 = map; | 
| map = no_reg; | 
|  | 
| // Loop through the prototype chain looking for the function prototype. | 
| -  __ LoadRoot(scratch2, Heap::kNullValueRootIndex); | 
| +  __ LoadRoot(scratch3, Heap::kNullValueRootIndex); | 
| __ bind(&loop); | 
| -  __ cmp(scratch, Operand(prototype)); | 
| -  __ b(eq, &is_instance); | 
| -  __ cmp(scratch, scratch2); | 
| -  __ b(eq, &is_not_instance); | 
| -  __ ldr(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); | 
| -  __ ldr(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); | 
| -  __ jmp(&loop); | 
| +  __ cmp(scratch, prototype); | 
| +  __ beq(&is_instance); | 
| +  __ cmp(scratch, scratch3); | 
| +  __ beq(&is_not_instance); | 
| +  __ LoadP(scratch, FieldMemOperand(scratch, HeapObject::kMapOffset)); | 
| +  __ LoadP(scratch, FieldMemOperand(scratch, Map::kPrototypeOffset)); | 
| +  __ b(&loop); | 
| Factory* factory = isolate()->factory(); | 
|  | 
| __ bind(&is_instance); | 
| if (!HasCallSiteInlineCheck()) { | 
| -    __ mov(r0, Operand(Smi::FromInt(0))); | 
| -    __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); | 
| +    __ LoadSmiLiteral(r3, Smi::FromInt(0)); | 
| +    __ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex); | 
| if (ReturnTrueFalseObject()) { | 
| -      __ Move(r0, factory->true_value()); | 
| +      __ Move(r3, factory->true_value()); | 
| } | 
| } else { | 
| // Patch the call site to return true. | 
| -    __ LoadRoot(r0, Heap::kTrueValueRootIndex); | 
| -    // The bool_load_offset was stored in r6 | 
| -    //   (See LCodeGen::DoDeferredLInstanceOfKnownGlobal). | 
| -    const Register bool_load_offset = r6; | 
| -    __ sub(r9, lr, bool_load_offset); | 
| +    __ LoadRoot(r3, Heap::kTrueValueRootIndex); | 
| +    __ addi(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); | 
| // Get the boolean result location in scratch and patch it. | 
| -    __ GetRelocatedValueLocation(r9, scratch, scratch2); | 
| -    __ str(r0, MemOperand(scratch)); | 
| +    __ SetRelocatedValue(inline_site, scratch, r3); | 
|  | 
| if (!ReturnTrueFalseObject()) { | 
| -      __ mov(r0, Operand(Smi::FromInt(0))); | 
| +      __ LoadSmiLiteral(r3, Smi::FromInt(0)); | 
| } | 
| } | 
| __ Ret(HasArgsInRegisters() ? 0 : 2); | 
|  | 
| __ bind(&is_not_instance); | 
| if (!HasCallSiteInlineCheck()) { | 
| -    __ mov(r0, Operand(Smi::FromInt(1))); | 
| -    __ StoreRoot(r0, Heap::kInstanceofCacheAnswerRootIndex); | 
| +    __ LoadSmiLiteral(r3, Smi::FromInt(1)); | 
| +    __ StoreRoot(r3, Heap::kInstanceofCacheAnswerRootIndex); | 
| if (ReturnTrueFalseObject()) { | 
| -      __ Move(r0, factory->false_value()); | 
| +      __ Move(r3, factory->false_value()); | 
| } | 
| } else { | 
| // Patch the call site to return false. | 
| -    __ LoadRoot(r0, Heap::kFalseValueRootIndex); | 
| -    // The bool_load_offset was stored in r6 | 
| -    //   (See LCodeGen::DoDeferredLInstanceOfKnownGlobal). | 
| -    const Register bool_load_offset = r6; | 
| -    __ sub(r9, lr, bool_load_offset); | 
| -    ; | 
| +    __ LoadRoot(r3, Heap::kFalseValueRootIndex); | 
| +    __ addi(inline_site, inline_site, Operand(kDeltaToLoadBoolResult)); | 
| // Get the boolean result location in scratch and patch it. | 
| -    __ GetRelocatedValueLocation(r9, scratch, scratch2); | 
| -    __ str(r0, MemOperand(scratch)); | 
| +    __ SetRelocatedValue(inline_site, scratch, r3); | 
|  | 
| if (!ReturnTrueFalseObject()) { | 
| -      __ mov(r0, Operand(Smi::FromInt(1))); | 
| +      __ LoadSmiLiteral(r3, Smi::FromInt(1)); | 
| } | 
| } | 
| __ Ret(HasArgsInRegisters() ? 0 : 2); | 
| @@ -1815,16 +1875,16 @@ void InstanceofStub::Generate(MacroAssembler* masm) { | 
| // Before null, smi and string value checks, check that the rhs is a function | 
| // as for a non-function rhs an exception needs to be thrown. | 
| __ JumpIfSmi(function, &slow); | 
| -  __ CompareObjectType(function, scratch2, scratch, JS_FUNCTION_TYPE); | 
| -  __ b(ne, &slow); | 
| +  __ CompareObjectType(function, scratch3, scratch, JS_FUNCTION_TYPE); | 
| +  __ bne(&slow); | 
|  | 
| // Null is not instance of anything. | 
| -  __ cmp(scratch, Operand(isolate()->factory()->null_value())); | 
| -  __ b(ne, &object_not_null); | 
| +  __ Cmpi(scratch, Operand(isolate()->factory()->null_value()), r0); | 
| +  __ bne(&object_not_null); | 
| if (ReturnTrueFalseObject()) { | 
| -    __ Move(r0, factory->false_value()); | 
| +    __ Move(r3, factory->false_value()); | 
| } else { | 
| -    __ mov(r0, Operand(Smi::FromInt(1))); | 
| +    __ LoadSmiLiteral(r3, Smi::FromInt(1)); | 
| } | 
| __ Ret(HasArgsInRegisters() ? 0 : 2); | 
|  | 
| @@ -1832,9 +1892,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) { | 
| // Smi values are not instances of anything. | 
| __ JumpIfNotSmi(object, &object_not_null_or_smi); | 
| if (ReturnTrueFalseObject()) { | 
| -    __ Move(r0, factory->false_value()); | 
| +    __ Move(r3, factory->false_value()); | 
| } else { | 
| -    __ mov(r0, Operand(Smi::FromInt(1))); | 
| +    __ LoadSmiLiteral(r3, Smi::FromInt(1)); | 
| } | 
| __ Ret(HasArgsInRegisters() ? 0 : 2); | 
|  | 
| @@ -1842,9 +1902,9 @@ void InstanceofStub::Generate(MacroAssembler* masm) { | 
| // String values are not instances of anything. | 
| __ IsObjectJSStringType(object, scratch, &slow); | 
| if (ReturnTrueFalseObject()) { | 
| -    __ Move(r0, factory->false_value()); | 
| +    __ Move(r3, factory->false_value()); | 
| } else { | 
| -    __ mov(r0, Operand(Smi::FromInt(1))); | 
| +    __ LoadSmiLiteral(r3, Smi::FromInt(1)); | 
| } | 
| __ Ret(HasArgsInRegisters() ? 0 : 2); | 
|  | 
| @@ -1852,18 +1912,26 @@ void InstanceofStub::Generate(MacroAssembler* masm) { | 
| __ bind(&slow); | 
| if (!ReturnTrueFalseObject()) { | 
| if (HasArgsInRegisters()) { | 
| -      __ Push(r0, r1); | 
| +      __ Push(r3, r4); | 
| } | 
| __ InvokeBuiltin(Builtins::INSTANCE_OF, JUMP_FUNCTION); | 
| } else { | 
| { | 
| FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 
| -      __ Push(r0, r1); | 
| +      __ Push(r3, r4); | 
| __ InvokeBuiltin(Builtins::INSTANCE_OF, CALL_FUNCTION); | 
| } | 
| -    __ cmp(r0, Operand::Zero()); | 
| -    __ LoadRoot(r0, Heap::kTrueValueRootIndex, eq); | 
| -    __ LoadRoot(r0, Heap::kFalseValueRootIndex, ne); | 
| +    Label true_value, done; | 
| +    __ cmpi(r3, Operand::Zero()); | 
| +    __ beq(&true_value); | 
| + | 
| +    __ LoadRoot(r3, Heap::kFalseValueRootIndex); | 
| +    __ b(&done); | 
| + | 
| +    __ bind(&true_value); | 
| +    __ LoadRoot(r3, Heap::kTrueValueRootIndex); | 
| + | 
| +    __ bind(&done); | 
| __ Ret(HasArgsInRegisters() ? 0 : 2); | 
| } | 
| } | 
| @@ -1873,18 +1941,18 @@ void FunctionPrototypeStub::Generate(MacroAssembler* masm) { | 
| Label miss; | 
| Register receiver = LoadIC::ReceiverRegister(); | 
|  | 
| -  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r3, | 
| -                                                          r4, &miss); | 
| +  NamedLoadHandlerCompiler::GenerateLoadFunctionPrototype(masm, receiver, r6, | 
| +                                                          r7, &miss); | 
| __ bind(&miss); | 
| PropertyAccessCompiler::TailCallBuiltin( | 
| masm, PropertyAccessCompiler::MissBuiltin(Code::LOAD_IC)); | 
| } | 
|  | 
|  | 
| -Register InstanceofStub::left() { return r0; } | 
| +Register InstanceofStub::left() { return r3; } | 
|  | 
|  | 
| -Register InstanceofStub::right() { return r1; } | 
| +Register InstanceofStub::right() { return r4; } | 
|  | 
|  | 
| void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { | 
| @@ -1895,67 +1963,72 @@ void ArgumentsAccessStub::GenerateReadElement(MacroAssembler* masm) { | 
|  | 
| // Check that the key is a smi. | 
| Label slow; | 
| -  __ JumpIfNotSmi(r1, &slow); | 
| +  __ JumpIfNotSmi(r4, &slow); | 
|  | 
| // Check if the calling frame is an arguments adaptor frame. | 
| Label adaptor; | 
| -  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 
| -  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); | 
| -  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 
| -  __ b(eq, &adaptor); | 
| +  __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 
| +  __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset)); | 
| +  STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu); | 
| +  __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); | 
| +  __ beq(&adaptor); | 
|  | 
| // Check index against formal parameters count limit passed in | 
| -  // through register r0. Use unsigned comparison to get negative | 
| +  // through register r3. Use unsigned comparison to get negative | 
| // check for free. | 
| -  __ cmp(r1, r0); | 
| -  __ b(hs, &slow); | 
| +  __ cmpl(r4, r3); | 
| +  __ bge(&slow); | 
|  | 
| // Read the argument from the stack and return it. | 
| -  __ sub(r3, r0, r1); | 
| -  __ add(r3, fp, Operand::PointerOffsetFromSmiKey(r3)); | 
| -  __ ldr(r0, MemOperand(r3, kDisplacement)); | 
| -  __ Jump(lr); | 
| +  __ sub(r6, r3, r4); | 
| +  __ SmiToPtrArrayOffset(r6, r6); | 
| +  __ add(r6, fp, r6); | 
| +  __ LoadP(r3, MemOperand(r6, kDisplacement)); | 
| +  __ blr(); | 
|  | 
| // Arguments adaptor case: Check index against actual arguments | 
| // limit found in the arguments adaptor frame. Use unsigned | 
| // comparison to get negative check for free. | 
| __ bind(&adaptor); | 
| -  __ ldr(r0, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 
| -  __ cmp(r1, r0); | 
| -  __ b(cs, &slow); | 
| +  __ LoadP(r3, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 
| +  __ cmpl(r4, r3); | 
| +  __ bge(&slow); | 
|  | 
| // Read the argument from the adaptor frame and return it. | 
| -  __ sub(r3, r0, r1); | 
| -  __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r3)); | 
| -  __ ldr(r0, MemOperand(r3, kDisplacement)); | 
| -  __ Jump(lr); | 
| +  __ sub(r6, r3, r4); | 
| +  __ SmiToPtrArrayOffset(r6, r6); | 
| +  __ add(r6, r5, r6); | 
| +  __ LoadP(r3, MemOperand(r6, kDisplacement)); | 
| +  __ blr(); | 
|  | 
| // Slow-case: Handle non-smi or out-of-bounds access to arguments | 
| // by calling the runtime system. | 
| __ bind(&slow); | 
| -  __ push(r1); | 
| +  __ push(r4); | 
| __ TailCallRuntime(Runtime::kGetArgumentsProperty, 1, 1); | 
| } | 
|  | 
|  | 
| void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) { | 
| // sp[0] : number of parameters | 
| -  // sp[4] : receiver displacement | 
| -  // sp[8] : function | 
| +  // sp[1] : receiver displacement | 
| +  // sp[2] : function | 
|  | 
| // Check if the calling frame is an arguments adaptor frame. | 
| Label runtime; | 
| -  __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 
| -  __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset)); | 
| -  __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 
| -  __ b(ne, &runtime); | 
| +  __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 
| +  __ LoadP(r5, MemOperand(r6, StandardFrameConstants::kContextOffset)); | 
| +  STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu); | 
| +  __ CmpSmiLiteral(r5, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); | 
| +  __ bne(&runtime); | 
|  | 
| // Patch the arguments.length and the parameters pointer in the current frame. | 
| -  __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 
| -  __ str(r2, MemOperand(sp, 0 * kPointerSize)); | 
| -  __ add(r3, r3, Operand(r2, LSL, 1)); | 
| -  __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); | 
| -  __ str(r3, MemOperand(sp, 1 * kPointerSize)); | 
| +  __ LoadP(r5, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 
| +  __ StoreP(r5, MemOperand(sp, 0 * kPointerSize)); | 
| +  __ SmiToPtrArrayOffset(r5, r5); | 
| +  __ add(r6, r6, r5); | 
| +  __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset)); | 
| +  __ StoreP(r6, MemOperand(sp, 1 * kPointerSize)); | 
|  | 
| __ bind(&runtime); | 
| __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); | 
| @@ -1965,39 +2038,44 @@ void ArgumentsAccessStub::GenerateNewSloppySlow(MacroAssembler* masm) { | 
| void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { | 
| // Stack layout: | 
| //  sp[0] : number of parameters (tagged) | 
| -  //  sp[4] : address of receiver argument | 
| -  //  sp[8] : function | 
| +  //  sp[1] : address of receiver argument | 
| +  //  sp[2] : function | 
| // Registers used over whole function: | 
| -  //  r6 : allocated object (tagged) | 
| -  //  r9 : mapped parameter count (tagged) | 
| +  //  r9 : allocated object (tagged) | 
| +  //  r11 : mapped parameter count (tagged) | 
|  | 
| -  __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); | 
| -  // r1 = parameter count (tagged) | 
| +  __ LoadP(r4, MemOperand(sp, 0 * kPointerSize)); | 
| +  // r4 = parameter count (tagged) | 
|  | 
| // Check if the calling frame is an arguments adaptor frame. | 
| Label runtime; | 
| Label adaptor_frame, try_allocate; | 
| -  __ ldr(r3, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 
| -  __ ldr(r2, MemOperand(r3, StandardFrameConstants::kContextOffset)); | 
| -  __ cmp(r2, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 
| -  __ b(eq, &adaptor_frame); | 
| +  __ LoadP(r6, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 
| +  __ LoadP(r5, MemOperand(r6, StandardFrameConstants::kContextOffset)); | 
| +  STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu); | 
| +  __ CmpSmiLiteral(r5, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); | 
| +  __ beq(&adaptor_frame); | 
|  | 
| // No adaptor, parameter count = argument count. | 
| -  __ mov(r2, r1); | 
| +  __ mr(r5, r4); | 
| __ b(&try_allocate); | 
|  | 
| // We have an adaptor frame. Patch the parameters pointer. | 
| __ bind(&adaptor_frame); | 
| -  __ ldr(r2, MemOperand(r3, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 
| -  __ add(r3, r3, Operand(r2, LSL, 1)); | 
| -  __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); | 
| -  __ str(r3, MemOperand(sp, 1 * kPointerSize)); | 
| - | 
| -  // r1 = parameter count (tagged) | 
| -  // r2 = argument count (tagged) | 
| -  // Compute the mapped parameter count = min(r1, r2) in r1. | 
| -  __ cmp(r1, Operand(r2)); | 
| -  __ mov(r1, Operand(r2), LeaveCC, gt); | 
| +  __ LoadP(r5, MemOperand(r6, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 
| +  __ SmiToPtrArrayOffset(r7, r5); | 
| +  __ add(r6, r6, r7); | 
| +  __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset)); | 
| +  __ StoreP(r6, MemOperand(sp, 1 * kPointerSize)); | 
| + | 
| +  // r4 = parameter count (tagged) | 
| +  // r5 = argument count (tagged) | 
| +  // Compute the mapped parameter count = min(r4, r5) in r4. | 
| +  Label skip; | 
| +  __ cmp(r4, r5); | 
| +  __ blt(&skip); | 
| +  __ mr(r4, r5); | 
| +  __ bind(&skip); | 
|  | 
| __ bind(&try_allocate); | 
|  | 
| @@ -2006,85 +2084,104 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { | 
| const int kParameterMapHeaderSize = | 
| FixedArray::kHeaderSize + 2 * kPointerSize; | 
| // If there are no mapped parameters, we do not need the parameter_map. | 
| -  __ cmp(r1, Operand(Smi::FromInt(0))); | 
| -  __ mov(r9, Operand::Zero(), LeaveCC, eq); | 
| -  __ mov(r9, Operand(r1, LSL, 1), LeaveCC, ne); | 
| -  __ add(r9, r9, Operand(kParameterMapHeaderSize), LeaveCC, ne); | 
| +  Label skip2, skip3; | 
| +  __ CmpSmiLiteral(r4, Smi::FromInt(0), r0); | 
| +  __ bne(&skip2); | 
| +  __ li(r11, Operand::Zero()); | 
| +  __ b(&skip3); | 
| +  __ bind(&skip2); | 
| +  __ SmiToPtrArrayOffset(r11, r4); | 
| +  __ addi(r11, r11, Operand(kParameterMapHeaderSize)); | 
| +  __ bind(&skip3); | 
|  | 
| // 2. Backing store. | 
| -  __ add(r9, r9, Operand(r2, LSL, 1)); | 
| -  __ add(r9, r9, Operand(FixedArray::kHeaderSize)); | 
| +  __ SmiToPtrArrayOffset(r7, r5); | 
| +  __ add(r11, r11, r7); | 
| +  __ addi(r11, r11, Operand(FixedArray::kHeaderSize)); | 
|  | 
| // 3. Arguments object. | 
| -  __ add(r9, r9, Operand(Heap::kSloppyArgumentsObjectSize)); | 
| +  __ addi(r11, r11, Operand(Heap::kSloppyArgumentsObjectSize)); | 
|  | 
| // Do the allocation of all three objects in one go. | 
| -  __ Allocate(r9, r0, r3, r4, &runtime, TAG_OBJECT); | 
| +  __ Allocate(r11, r3, r6, r7, &runtime, TAG_OBJECT); | 
|  | 
| -  // r0 = address of new object(s) (tagged) | 
| -  // r2 = argument count (smi-tagged) | 
| +  // r3 = address of new object(s) (tagged) | 
| +  // r5 = argument count (smi-tagged) | 
| // Get the arguments boilerplate from the current native context into r4. | 
| const int kNormalOffset = | 
| Context::SlotOffset(Context::SLOPPY_ARGUMENTS_MAP_INDEX); | 
| const int kAliasedOffset = | 
| Context::SlotOffset(Context::ALIASED_ARGUMENTS_MAP_INDEX); | 
|  | 
| -  __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 
| -  __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset)); | 
| -  __ cmp(r1, Operand::Zero()); | 
| -  __ ldr(r4, MemOperand(r4, kNormalOffset), eq); | 
| -  __ ldr(r4, MemOperand(r4, kAliasedOffset), ne); | 
| - | 
| -  // r0 = address of new object (tagged) | 
| -  // r1 = mapped parameter count (tagged) | 
| -  // r2 = argument count (smi-tagged) | 
| -  // r4 = address of arguments map (tagged) | 
| -  __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset)); | 
| -  __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex); | 
| -  __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset)); | 
| -  __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset)); | 
| +  __ LoadP(r7, MemOperand(cp, | 
| +             Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 
| +  __ LoadP(r7, FieldMemOperand(r7, GlobalObject::kNativeContextOffset)); | 
| +  Label skip4, skip5; | 
| +  __ cmpi(r4, Operand::Zero()); | 
| +  __ bne(&skip4); | 
| +  __ LoadP(r7, MemOperand(r7, kNormalOffset)); | 
| +  __ b(&skip5); | 
| +  __ bind(&skip4); | 
| +  __ LoadP(r7, MemOperand(r7, kAliasedOffset)); | 
| +  __ bind(&skip5); | 
| + | 
| +  // r3 = address of new object (tagged) | 
| +  // r4 = mapped parameter count (tagged) | 
| +  // r5 = argument count (smi-tagged) | 
| +  // r7 = address of arguments map (tagged) | 
| +  __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0); | 
| +  __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex); | 
| +  __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0); | 
| +  __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0); | 
|  | 
| // Set up the callee in-object property. | 
| STATIC_ASSERT(Heap::kArgumentsCalleeIndex == 1); | 
| -  __ ldr(r3, MemOperand(sp, 2 * kPointerSize)); | 
| -  __ AssertNotSmi(r3); | 
| +  __ LoadP(r6, MemOperand(sp, 2 * kPointerSize)); | 
| +  __ AssertNotSmi(r6); | 
| const int kCalleeOffset = JSObject::kHeaderSize + | 
| Heap::kArgumentsCalleeIndex * kPointerSize; | 
| -  __ str(r3, FieldMemOperand(r0, kCalleeOffset)); | 
| +  __ StoreP(r6, FieldMemOperand(r3, kCalleeOffset), r0); | 
|  | 
| // Use the length (smi tagged) and set that as an in-object property too. | 
| -  __ AssertSmi(r2); | 
| +  __ AssertSmi(r5); | 
| STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); | 
| const int kLengthOffset = JSObject::kHeaderSize + | 
| Heap::kArgumentsLengthIndex * kPointerSize; | 
| -  __ str(r2, FieldMemOperand(r0, kLengthOffset)); | 
| +  __ StoreP(r5, FieldMemOperand(r3, kLengthOffset), r0); | 
|  | 
| // Set up the elements pointer in the allocated arguments object. | 
| -  // If we allocated a parameter map, r4 will point there, otherwise | 
| +  // If we allocated a parameter map, r7 will point there, otherwise | 
| // it will point to the backing store. | 
| -  __ add(r4, r0, Operand(Heap::kSloppyArgumentsObjectSize)); | 
| -  __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); | 
| +  __ addi(r7, r3, Operand(Heap::kSloppyArgumentsObjectSize)); | 
| +  __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0); | 
|  | 
| -  // r0 = address of new object (tagged) | 
| -  // r1 = mapped parameter count (tagged) | 
| -  // r2 = argument count (tagged) | 
| -  // r4 = address of parameter map or backing store (tagged) | 
| +  // r3 = address of new object (tagged) | 
| +  // r4 = mapped parameter count (tagged) | 
| +  // r5 = argument count (tagged) | 
| +  // r7 = address of parameter map or backing store (tagged) | 
| // Initialize parameter map. If there are no mapped arguments, we're done. | 
| -  Label skip_parameter_map; | 
| -  __ cmp(r1, Operand(Smi::FromInt(0))); | 
| -  // Move backing store address to r3, because it is | 
| +  Label skip_parameter_map, skip6; | 
| +  __ CmpSmiLiteral(r4, Smi::FromInt(0), r0); | 
| +  __ bne(&skip6); | 
| +  // Move backing store address to r6, because it is | 
| // expected there when filling in the unmapped arguments. | 
| -  __ mov(r3, r4, LeaveCC, eq); | 
| -  __ b(eq, &skip_parameter_map); | 
| - | 
| -  __ LoadRoot(r6, Heap::kSloppyArgumentsElementsMapRootIndex); | 
| -  __ str(r6, FieldMemOperand(r4, FixedArray::kMapOffset)); | 
| -  __ add(r6, r1, Operand(Smi::FromInt(2))); | 
| -  __ str(r6, FieldMemOperand(r4, FixedArray::kLengthOffset)); | 
| -  __ str(cp, FieldMemOperand(r4, FixedArray::kHeaderSize + 0 * kPointerSize)); | 
| -  __ add(r6, r4, Operand(r1, LSL, 1)); | 
| -  __ add(r6, r6, Operand(kParameterMapHeaderSize)); | 
| -  __ str(r6, FieldMemOperand(r4, FixedArray::kHeaderSize + 1 * kPointerSize)); | 
| +  __ mr(r6, r7); | 
| +  __ b(&skip_parameter_map); | 
| +  __ bind(&skip6); | 
| + | 
| +  __ LoadRoot(r9, Heap::kSloppyArgumentsElementsMapRootIndex); | 
| +  __ StoreP(r9, FieldMemOperand(r7, FixedArray::kMapOffset), r0); | 
| +  __ AddSmiLiteral(r9, r4, Smi::FromInt(2), r0); | 
| +  __ StoreP(r9, FieldMemOperand(r7, FixedArray::kLengthOffset), r0); | 
| +  __ StoreP(cp, FieldMemOperand(r7, | 
| +                                FixedArray::kHeaderSize + 0 * kPointerSize), | 
| +            r0); | 
| +  __ SmiToPtrArrayOffset(r9, r4); | 
| +  __ add(r9, r7, r9); | 
| +  __ addi(r9, r9, Operand(kParameterMapHeaderSize)); | 
| +  __ StoreP(r9, FieldMemOperand(r7, | 
| +                                FixedArray::kHeaderSize + 1 * kPointerSize), | 
| +            r0); | 
|  | 
| // Copy the parameter slots and the holes in the arguments. | 
| // We need to fill in mapped_parameter_count slots. They index the context, | 
| @@ -2095,74 +2192,71 @@ void ArgumentsAccessStub::GenerateNewSloppyFast(MacroAssembler* masm) { | 
| //       MIN_CONTEXT_SLOTS+parameter_count-mapped_parameter_count | 
| // We loop from right to left. | 
| Label parameters_loop, parameters_test; | 
| -  __ mov(r6, r1); | 
| -  __ ldr(r9, MemOperand(sp, 0 * kPointerSize)); | 
| -  __ add(r9, r9, Operand(Smi::FromInt(Context::MIN_CONTEXT_SLOTS))); | 
| -  __ sub(r9, r9, Operand(r1)); | 
| -  __ LoadRoot(r5, Heap::kTheHoleValueRootIndex); | 
| -  __ add(r3, r4, Operand(r6, LSL, 1)); | 
| -  __ add(r3, r3, Operand(kParameterMapHeaderSize)); | 
| - | 
| -  // r6 = loop variable (tagged) | 
| -  // r1 = mapping index (tagged) | 
| -  // r3 = address of backing store (tagged) | 
| -  // r4 = address of parameter map (tagged), which is also the address of new | 
| -  //      object + Heap::kSloppyArgumentsObjectSize (tagged) | 
| -  // r0 = temporary scratch (a.o., for address calculation) | 
| -  // r5 = the hole value | 
| -  __ jmp(¶meters_test); | 
| +  __ mr(r9, r4); | 
| +  __ LoadP(r11, MemOperand(sp, 0 * kPointerSize)); | 
| +  __ AddSmiLiteral(r11, r11, Smi::FromInt(Context::MIN_CONTEXT_SLOTS), r0); | 
| +  __ sub(r11, r11, r4); | 
| +  __ LoadRoot(r10, Heap::kTheHoleValueRootIndex); | 
| +  __ SmiToPtrArrayOffset(r6, r9); | 
| +  __ add(r6, r7, r6); | 
| +  __ addi(r6, r6, Operand(kParameterMapHeaderSize)); | 
| + | 
| +  // r9 = loop variable (tagged) | 
| +  // r4 = mapping index (tagged) | 
| +  // r6 = address of backing store (tagged) | 
| +  // r7 = address of parameter map (tagged) | 
| +  // r8 = temporary scratch (a.o., for address calculation) | 
| +  // r10 = the hole value | 
| +  __ b(¶meters_test); | 
|  | 
| __ bind(¶meters_loop); | 
| -  __ sub(r6, r6, Operand(Smi::FromInt(1))); | 
| -  __ mov(r0, Operand(r6, LSL, 1)); | 
| -  __ add(r0, r0, Operand(kParameterMapHeaderSize - kHeapObjectTag)); | 
| -  __ str(r9, MemOperand(r4, r0)); | 
| -  __ sub(r0, r0, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); | 
| -  __ str(r5, MemOperand(r3, r0)); | 
| -  __ add(r9, r9, Operand(Smi::FromInt(1))); | 
| +  __ SubSmiLiteral(r9, r9, Smi::FromInt(1), r0); | 
| +  __ SmiToPtrArrayOffset(r8, r9); | 
| +  __ addi(r8, r8, Operand(kParameterMapHeaderSize - kHeapObjectTag)); | 
| +  __ StorePX(r11, MemOperand(r8, r7)); | 
| +  __ subi(r8, r8, Operand(kParameterMapHeaderSize - FixedArray::kHeaderSize)); | 
| +  __ StorePX(r10, MemOperand(r8, r6)); | 
| +  __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0); | 
| __ bind(¶meters_test); | 
| -  __ cmp(r6, Operand(Smi::FromInt(0))); | 
| -  __ b(ne, ¶meters_loop); | 
| - | 
| -  // Restore r0 = new object (tagged) | 
| -  __ sub(r0, r4, Operand(Heap::kSloppyArgumentsObjectSize)); | 
| +  __ CmpSmiLiteral(r9, Smi::FromInt(0), r0); | 
| +  __ bne(¶meters_loop); | 
|  | 
| __ bind(&skip_parameter_map); | 
| -  // r0 = address of new object (tagged) | 
| -  // r2 = argument count (tagged) | 
| -  // r3 = address of backing store (tagged) | 
| -  // r5 = scratch | 
| +  // r5 = argument count (tagged) | 
| +  // r6 = address of backing store (tagged) | 
| +  // r8 = scratch | 
| // Copy arguments header and remaining slots (if there are any). | 
| -  __ LoadRoot(r5, Heap::kFixedArrayMapRootIndex); | 
| -  __ str(r5, FieldMemOperand(r3, FixedArray::kMapOffset)); | 
| -  __ str(r2, FieldMemOperand(r3, FixedArray::kLengthOffset)); | 
| +  __ LoadRoot(r8, Heap::kFixedArrayMapRootIndex); | 
| +  __ StoreP(r8, FieldMemOperand(r6, FixedArray::kMapOffset), r0); | 
| +  __ StoreP(r5, FieldMemOperand(r6, FixedArray::kLengthOffset), r0); | 
|  | 
| Label arguments_loop, arguments_test; | 
| -  __ mov(r9, r1); | 
| -  __ ldr(r4, MemOperand(sp, 1 * kPointerSize)); | 
| -  __ sub(r4, r4, Operand(r9, LSL, 1)); | 
| -  __ jmp(&arguments_test); | 
| +  __ mr(r11, r4); | 
| +  __ LoadP(r7, MemOperand(sp, 1 * kPointerSize)); | 
| +  __ SmiToPtrArrayOffset(r8, r11); | 
| +  __ sub(r7, r7, r8); | 
| +  __ b(&arguments_test); | 
|  | 
| __ bind(&arguments_loop); | 
| -  __ sub(r4, r4, Operand(kPointerSize)); | 
| -  __ ldr(r6, MemOperand(r4, 0)); | 
| -  __ add(r5, r3, Operand(r9, LSL, 1)); | 
| -  __ str(r6, FieldMemOperand(r5, FixedArray::kHeaderSize)); | 
| -  __ add(r9, r9, Operand(Smi::FromInt(1))); | 
| +  __ subi(r7, r7, Operand(kPointerSize)); | 
| +  __ LoadP(r9, MemOperand(r7, 0)); | 
| +  __ SmiToPtrArrayOffset(r8, r11); | 
| +  __ add(r8, r6, r8); | 
| +  __ StoreP(r9, FieldMemOperand(r8, FixedArray::kHeaderSize), r0); | 
| +  __ AddSmiLiteral(r11, r11, Smi::FromInt(1), r0); | 
|  | 
| __ bind(&arguments_test); | 
| -  __ cmp(r9, Operand(r2)); | 
| -  __ b(lt, &arguments_loop); | 
| +  __ cmp(r11, r5); | 
| +  __ blt(&arguments_loop); | 
|  | 
| // Return and remove the on-stack parameters. | 
| -  __ add(sp, sp, Operand(3 * kPointerSize)); | 
| +  __ addi(sp, sp, Operand(3 * kPointerSize)); | 
| __ Ret(); | 
|  | 
| // Do the runtime call to allocate the arguments object. | 
| -  // r0 = address of new object (tagged) | 
| -  // r2 = argument count (tagged) | 
| +  // r5 = argument count (tagged) | 
| __ bind(&runtime); | 
| -  __ str(r2, MemOperand(sp, 0 * kPointerSize));  // Patch argument count. | 
| +  __ StoreP(r5, MemOperand(sp, 0 * kPointerSize));  // Patch argument count. | 
| __ TailCallRuntime(Runtime::kNewSloppyArguments, 3, 1); | 
| } | 
|  | 
| @@ -2173,89 +2267,96 @@ void ArgumentsAccessStub::GenerateNewStrict(MacroAssembler* masm) { | 
| // sp[8] : function | 
| // Check if the calling frame is an arguments adaptor frame. | 
| Label adaptor_frame, try_allocate, runtime; | 
| -  __ ldr(r2, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 
| -  __ ldr(r3, MemOperand(r2, StandardFrameConstants::kContextOffset)); | 
| -  __ cmp(r3, Operand(Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR))); | 
| -  __ b(eq, &adaptor_frame); | 
| +  __ LoadP(r5, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 
| +  __ LoadP(r6, MemOperand(r5, StandardFrameConstants::kContextOffset)); | 
| +  STATIC_ASSERT(StackFrame::ARGUMENTS_ADAPTOR < 0x3fffu); | 
| +  __ CmpSmiLiteral(r6, Smi::FromInt(StackFrame::ARGUMENTS_ADAPTOR), r0); | 
| +  __ beq(&adaptor_frame); | 
|  | 
| // Get the length from the frame. | 
| -  __ ldr(r1, MemOperand(sp, 0)); | 
| +  __ LoadP(r4, MemOperand(sp, 0)); | 
| __ b(&try_allocate); | 
|  | 
| // Patch the arguments.length and the parameters pointer. | 
| __ bind(&adaptor_frame); | 
| -  __ ldr(r1, MemOperand(r2, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 
| -  __ str(r1, MemOperand(sp, 0)); | 
| -  __ add(r3, r2, Operand::PointerOffsetFromSmiKey(r1)); | 
| -  __ add(r3, r3, Operand(StandardFrameConstants::kCallerSPOffset)); | 
| -  __ str(r3, MemOperand(sp, 1 * kPointerSize)); | 
| +  __ LoadP(r4, MemOperand(r5, ArgumentsAdaptorFrameConstants::kLengthOffset)); | 
| +  __ StoreP(r4, MemOperand(sp, 0)); | 
| +  __ SmiToPtrArrayOffset(r6, r4); | 
| +  __ add(r6, r5, r6); | 
| +  __ addi(r6, r6, Operand(StandardFrameConstants::kCallerSPOffset)); | 
| +  __ StoreP(r6, MemOperand(sp, 1 * kPointerSize)); | 
|  | 
| // Try the new space allocation. Start out with computing the size | 
| // of the arguments object and the elements array in words. | 
| Label add_arguments_object; | 
| __ bind(&try_allocate); | 
| -  __ SmiUntag(r1, SetCC); | 
| -  __ b(eq, &add_arguments_object); | 
| -  __ add(r1, r1, Operand(FixedArray::kHeaderSize / kPointerSize)); | 
| +  __ cmpi(r4, Operand::Zero()); | 
| +  __ beq(&add_arguments_object); | 
| +  __ SmiUntag(r4); | 
| +  __ addi(r4, r4, Operand(FixedArray::kHeaderSize / kPointerSize)); | 
| __ bind(&add_arguments_object); | 
| -  __ add(r1, r1, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize)); | 
| +  __ addi(r4, r4, Operand(Heap::kStrictArgumentsObjectSize / kPointerSize)); | 
|  | 
| // Do the allocation of both objects in one go. | 
| -  __ Allocate(r1, r0, r2, r3, &runtime, | 
| +  __ Allocate(r4, r3, r5, r6, &runtime, | 
| static_cast<AllocationFlags>(TAG_OBJECT | SIZE_IN_WORDS)); | 
|  | 
| // Get the arguments boilerplate from the current native context. | 
| -  __ ldr(r4, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 
| -  __ ldr(r4, FieldMemOperand(r4, GlobalObject::kNativeContextOffset)); | 
| -  __ ldr(r4, MemOperand( | 
| -                 r4, Context::SlotOffset(Context::STRICT_ARGUMENTS_MAP_INDEX))); | 
| +  __ LoadP(r7, | 
| +           MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 
| +  __ LoadP(r7, FieldMemOperand(r7, GlobalObject::kNativeContextOffset)); | 
| +  __ LoadP(r7, MemOperand(r7, Context::SlotOffset( | 
| +         Context::STRICT_ARGUMENTS_MAP_INDEX))); | 
|  | 
| -  __ str(r4, FieldMemOperand(r0, JSObject::kMapOffset)); | 
| -  __ LoadRoot(r3, Heap::kEmptyFixedArrayRootIndex); | 
| -  __ str(r3, FieldMemOperand(r0, JSObject::kPropertiesOffset)); | 
| -  __ str(r3, FieldMemOperand(r0, JSObject::kElementsOffset)); | 
| +  __ StoreP(r7, FieldMemOperand(r3, JSObject::kMapOffset), r0); | 
| +  __ LoadRoot(r6, Heap::kEmptyFixedArrayRootIndex); | 
| +  __ StoreP(r6, FieldMemOperand(r3, JSObject::kPropertiesOffset), r0); | 
| +  __ StoreP(r6, FieldMemOperand(r3, JSObject::kElementsOffset), r0); | 
|  | 
| // Get the length (smi tagged) and set that as an in-object property too. | 
| STATIC_ASSERT(Heap::kArgumentsLengthIndex == 0); | 
| -  __ ldr(r1, MemOperand(sp, 0 * kPointerSize)); | 
| -  __ AssertSmi(r1); | 
| -  __ str(r1, FieldMemOperand(r0, JSObject::kHeaderSize + | 
| -      Heap::kArgumentsLengthIndex * kPointerSize)); | 
| +  __ LoadP(r4, MemOperand(sp, 0 * kPointerSize)); | 
| +  __ AssertSmi(r4); | 
| +  __ StoreP(r4, FieldMemOperand(r3, JSObject::kHeaderSize + | 
| +                                Heap::kArgumentsLengthIndex * kPointerSize), | 
| +            r0); | 
|  | 
| // If there are no actual arguments, we're done. | 
| Label done; | 
| -  __ cmp(r1, Operand::Zero()); | 
| -  __ b(eq, &done); | 
| +  __ cmpi(r4, Operand::Zero()); | 
| +  __ beq(&done); | 
|  | 
| // Get the parameters pointer from the stack. | 
| -  __ ldr(r2, MemOperand(sp, 1 * kPointerSize)); | 
| +  __ LoadP(r5, MemOperand(sp, 1 * kPointerSize)); | 
|  | 
| // Set up the elements pointer in the allocated arguments object and | 
| // initialize the header in the elements fixed array. | 
| -  __ add(r4, r0, Operand(Heap::kStrictArgumentsObjectSize)); | 
| -  __ str(r4, FieldMemOperand(r0, JSObject::kElementsOffset)); | 
| -  __ LoadRoot(r3, Heap::kFixedArrayMapRootIndex); | 
| -  __ str(r3, FieldMemOperand(r4, FixedArray::kMapOffset)); | 
| -  __ str(r1, FieldMemOperand(r4, FixedArray::kLengthOffset)); | 
| -  __ SmiUntag(r1); | 
| +  __ addi(r7, r3, Operand(Heap::kStrictArgumentsObjectSize)); | 
| +  __ StoreP(r7, FieldMemOperand(r3, JSObject::kElementsOffset), r0); | 
| +  __ LoadRoot(r6, Heap::kFixedArrayMapRootIndex); | 
| +  __ StoreP(r6, FieldMemOperand(r7, FixedArray::kMapOffset), r0); | 
| +  __ StoreP(r4, FieldMemOperand(r7, FixedArray::kLengthOffset), r0); | 
| +  // Untag the length for the loop. | 
| +  __ SmiUntag(r4); | 
|  | 
| // Copy the fixed array slots. | 
| Label loop; | 
| -  // Set up r4 to point to the first array slot. | 
| -  __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 
| +  // Set up r7 to point to the first array slot. | 
| +  __ addi(r7, r7, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 
| __ bind(&loop); | 
| -  // Pre-decrement r2 with kPointerSize on each iteration. | 
| +  // Pre-decrement r5 with kPointerSize on each iteration. | 
| // Pre-decrement in order to skip receiver. | 
| -  __ ldr(r3, MemOperand(r2, kPointerSize, NegPreIndex)); | 
| -  // Post-increment r4 with kPointerSize on each iteration. | 
| -  __ str(r3, MemOperand(r4, kPointerSize, PostIndex)); | 
| -  __ sub(r1, r1, Operand(1)); | 
| -  __ cmp(r1, Operand::Zero()); | 
| -  __ b(ne, &loop); | 
| +  __ LoadPU(r6, MemOperand(r5, -kPointerSize)); | 
| +  // Post-increment r7 with kPointerSize on each iteration. | 
| +  __ StoreP(r6, MemOperand(r7)); | 
| +  __ addi(r7, r7, Operand(kPointerSize)); | 
| +  __ subi(r4, r4, Operand(1)); | 
| +  __ cmpi(r4, Operand::Zero()); | 
| +  __ bne(&loop); | 
|  | 
| // Return and remove the on-stack parameters. | 
| __ bind(&done); | 
| -  __ add(sp, sp, Operand(3 * kPointerSize)); | 
| +  __ addi(sp, sp, Operand(3 * kPointerSize)); | 
| __ Ret(); | 
|  | 
| // Do the runtime call to allocate the arguments object. | 
| @@ -2283,70 +2384,79 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { | 
| const int kSubjectOffset = 2 * kPointerSize; | 
| const int kJSRegExpOffset = 3 * kPointerSize; | 
|  | 
| -  Label runtime; | 
| +  Label runtime, br_over, encoding_type_UC16; | 
| + | 
| // Allocation of registers for this function. These are in callee save | 
| // registers and will be preserved by the call to the native RegExp code, as | 
| // this code is called using the normal C calling convention. When calling | 
| // directly from generated code the native RegExp code will not do a GC and | 
| // therefore the content of these registers are safe to use after the call. | 
| -  Register subject = r4; | 
| -  Register regexp_data = r5; | 
| -  Register last_match_info_elements = no_reg;  // will be r6; | 
| +  Register subject = r14; | 
| +  Register regexp_data = r15; | 
| +  Register last_match_info_elements = r16; | 
| +  Register code = r17; | 
| + | 
| +  // Ensure register assigments are consistent with callee save masks | 
| +  DCHECK(subject.bit() & kCalleeSaved); | 
| +  DCHECK(regexp_data.bit() & kCalleeSaved); | 
| +  DCHECK(last_match_info_elements.bit() & kCalleeSaved); | 
| +  DCHECK(code.bit() & kCalleeSaved); | 
|  | 
| // Ensure that a RegExp stack is allocated. | 
| ExternalReference address_of_regexp_stack_memory_address = | 
| ExternalReference::address_of_regexp_stack_memory_address(isolate()); | 
| ExternalReference address_of_regexp_stack_memory_size = | 
| ExternalReference::address_of_regexp_stack_memory_size(isolate()); | 
| -  __ mov(r0, Operand(address_of_regexp_stack_memory_size)); | 
| -  __ ldr(r0, MemOperand(r0, 0)); | 
| -  __ cmp(r0, Operand::Zero()); | 
| -  __ b(eq, &runtime); | 
| +  __ mov(r3, Operand(address_of_regexp_stack_memory_size)); | 
| +  __ LoadP(r3, MemOperand(r3, 0)); | 
| +  __ cmpi(r3, Operand::Zero()); | 
| +  __ beq(&runtime); | 
|  | 
| // Check that the first argument is a JSRegExp object. | 
| -  __ ldr(r0, MemOperand(sp, kJSRegExpOffset)); | 
| -  __ JumpIfSmi(r0, &runtime); | 
| -  __ CompareObjectType(r0, r1, r1, JS_REGEXP_TYPE); | 
| -  __ b(ne, &runtime); | 
| +  __ LoadP(r3, MemOperand(sp, kJSRegExpOffset)); | 
| +  __ JumpIfSmi(r3, &runtime); | 
| +  __ CompareObjectType(r3, r4, r4, JS_REGEXP_TYPE); | 
| +  __ bne(&runtime); | 
|  | 
| // Check that the RegExp has been compiled (data contains a fixed array). | 
| -  __ ldr(regexp_data, FieldMemOperand(r0, JSRegExp::kDataOffset)); | 
| +  __ LoadP(regexp_data, FieldMemOperand(r3, JSRegExp::kDataOffset)); | 
| if (FLAG_debug_code) { | 
| -    __ SmiTst(regexp_data); | 
| -    __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected); | 
| -    __ CompareObjectType(regexp_data, r0, r0, FIXED_ARRAY_TYPE); | 
| +    __ TestIfSmi(regexp_data, r0); | 
| +    __ Check(ne, kUnexpectedTypeForRegExpDataFixedArrayExpected, cr0); | 
| +    __ CompareObjectType(regexp_data, r3, r3, FIXED_ARRAY_TYPE); | 
| __ Check(eq, kUnexpectedTypeForRegExpDataFixedArrayExpected); | 
| } | 
|  | 
| // regexp_data: RegExp data (FixedArray) | 
| // Check the type of the RegExp. Only continue if type is JSRegExp::IRREGEXP. | 
| -  __ ldr(r0, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); | 
| -  __ cmp(r0, Operand(Smi::FromInt(JSRegExp::IRREGEXP))); | 
| -  __ b(ne, &runtime); | 
| +  __ LoadP(r3, FieldMemOperand(regexp_data, JSRegExp::kDataTagOffset)); | 
| +  // DCHECK(Smi::FromInt(JSRegExp::IRREGEXP) < (char *)0xffffu); | 
| +  __ CmpSmiLiteral(r3, Smi::FromInt(JSRegExp::IRREGEXP), r0); | 
| +  __ bne(&runtime); | 
|  | 
| // regexp_data: RegExp data (FixedArray) | 
| // Check that the number of captures fit in the static offsets vector buffer. | 
| -  __ ldr(r2, | 
| +  __ LoadP(r5, | 
| FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); | 
| // Check (number_of_captures + 1) * 2 <= offsets vector size | 
| // Or          number_of_captures * 2 <= offsets vector size - 2 | 
| -  // Multiplying by 2 comes for free since r2 is smi-tagged. | 
| -  STATIC_ASSERT(kSmiTag == 0); | 
| -  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); | 
| +  // SmiToShortArrayOffset accomplishes the multiplication by 2 and | 
| +  // SmiUntag (which is a nop for 32-bit). | 
| +  __ SmiToShortArrayOffset(r5, r5); | 
| STATIC_ASSERT(Isolate::kJSRegexpStaticOffsetsVectorSize >= 2); | 
| -  __ cmp(r2, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2)); | 
| -  __ b(hi, &runtime); | 
| +  __ cmpli(r5, Operand(Isolate::kJSRegexpStaticOffsetsVectorSize - 2)); | 
| +  __ bgt(&runtime); | 
|  | 
| // Reset offset for possibly sliced string. | 
| -  __ mov(r9, Operand::Zero()); | 
| -  __ ldr(subject, MemOperand(sp, kSubjectOffset)); | 
| +  __ li(r11, Operand::Zero()); | 
| +  __ LoadP(subject, MemOperand(sp, kSubjectOffset)); | 
| __ JumpIfSmi(subject, &runtime); | 
| -  __ mov(r3, subject);  // Make a copy of the original subject string. | 
| -  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); | 
| -  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); | 
| +  __ mr(r6, subject);  // Make a copy of the original subject string. | 
| +  __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset)); | 
| +  __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset)); | 
| // subject: subject string | 
| -  // r3: subject string | 
| -  // r0: subject string instance type | 
| +  // r6: subject string | 
| +  // r3: subject string instance type | 
| // regexp_data: RegExp data (FixedArray) | 
| // Handle subject string according to its encoding and representation: | 
| // (1) Sequential string?  If yes, go to (5). | 
| @@ -2370,276 +2480,299 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { | 
| not_long_external /* 8 */; | 
|  | 
| // (1) Sequential string?  If yes, go to (5). | 
| -  __ and_(r1, | 
| -          r0, | 
| +  STATIC_ASSERT((kIsNotStringMask | | 
| +                  kStringRepresentationMask | | 
| +                  kShortExternalStringMask) == 0x93); | 
| +  __ andi(r4, | 
| +          r3, | 
| Operand(kIsNotStringMask | | 
| kStringRepresentationMask | | 
| -                  kShortExternalStringMask), | 
| -          SetCC); | 
| +                  kShortExternalStringMask)); | 
| STATIC_ASSERT((kStringTag | kSeqStringTag) == 0); | 
| -  __ b(eq, &seq_string);  // Go to (5). | 
| +  __ beq(&seq_string, cr0);  // Go to (5). | 
|  | 
| // (2) Anything but sequential or cons?  If yes, go to (6). | 
| STATIC_ASSERT(kConsStringTag < kExternalStringTag); | 
| STATIC_ASSERT(kSlicedStringTag > kExternalStringTag); | 
| STATIC_ASSERT(kIsNotStringMask > kExternalStringTag); | 
| STATIC_ASSERT(kShortExternalStringTag > kExternalStringTag); | 
| -  __ cmp(r1, Operand(kExternalStringTag)); | 
| -  __ b(ge, ¬_seq_nor_cons);  // Go to (6). | 
| +  STATIC_ASSERT(kExternalStringTag < 0xffffu); | 
| +  __ cmpi(r4, Operand(kExternalStringTag)); | 
| +  __ bge(¬_seq_nor_cons);  // Go to (6). | 
|  | 
| // (3) Cons string.  Check that it's flat. | 
| // Replace subject with first string and reload instance type. | 
| -  __ ldr(r0, FieldMemOperand(subject, ConsString::kSecondOffset)); | 
| -  __ CompareRoot(r0, Heap::kempty_stringRootIndex); | 
| -  __ b(ne, &runtime); | 
| -  __ ldr(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); | 
| +  __ LoadP(r3, FieldMemOperand(subject, ConsString::kSecondOffset)); | 
| +  __ CompareRoot(r3, Heap::kempty_stringRootIndex); | 
| +  __ bne(&runtime); | 
| +  __ LoadP(subject, FieldMemOperand(subject, ConsString::kFirstOffset)); | 
|  | 
| // (4) Is subject external?  If yes, go to (7). | 
| __ bind(&check_underlying); | 
| -  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); | 
| -  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); | 
| +  __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset)); | 
| +  __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset)); | 
| STATIC_ASSERT(kSeqStringTag == 0); | 
| -  __ tst(r0, Operand(kStringRepresentationMask)); | 
| +  STATIC_ASSERT(kStringRepresentationMask == 3); | 
| +  __ andi(r0, r3, Operand(kStringRepresentationMask)); | 
| // The underlying external string is never a short external string. | 
| STATIC_ASSERT(ExternalString::kMaxShortLength < ConsString::kMinLength); | 
| STATIC_ASSERT(ExternalString::kMaxShortLength < SlicedString::kMinLength); | 
| -  __ b(ne, &external_string);  // Go to (7). | 
| +  __ bne(&external_string, cr0);  // Go to (7). | 
|  | 
| // (5) Sequential string.  Load regexp code according to encoding. | 
| __ bind(&seq_string); | 
| // subject: sequential subject string (or look-alike, external string) | 
| -  // r3: original subject string | 
| -  // Load previous index and check range before r3 is overwritten.  We have to | 
| -  // use r3 instead of subject here because subject might have been only made | 
| +  // r6: original subject string | 
| +  // Load previous index and check range before r6 is overwritten.  We have to | 
| +  // use r6 instead of subject here because subject might have been only made | 
| // to look like a sequential string when it actually is an external string. | 
| -  __ ldr(r1, MemOperand(sp, kPreviousIndexOffset)); | 
| -  __ JumpIfNotSmi(r1, &runtime); | 
| -  __ ldr(r3, FieldMemOperand(r3, String::kLengthOffset)); | 
| -  __ cmp(r3, Operand(r1)); | 
| -  __ b(ls, &runtime); | 
| -  __ SmiUntag(r1); | 
| +  __ LoadP(r4, MemOperand(sp, kPreviousIndexOffset)); | 
| +  __ JumpIfNotSmi(r4, &runtime); | 
| +  __ LoadP(r6, FieldMemOperand(r6, String::kLengthOffset)); | 
| +  __ cmpl(r6, r4); | 
| +  __ ble(&runtime); | 
| +  __ SmiUntag(r4); | 
|  | 
| STATIC_ASSERT(4 == kOneByteStringTag); | 
| STATIC_ASSERT(kTwoByteStringTag == 0); | 
| -  __ and_(r0, r0, Operand(kStringEncodingMask)); | 
| -  __ mov(r3, Operand(r0, ASR, 2), SetCC); | 
| -  __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset), ne); | 
| -  __ ldr(r6, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset), eq); | 
| +  STATIC_ASSERT(kStringEncodingMask == 4); | 
| +  __ ExtractBitMask(r6, r3, kStringEncodingMask, SetRC); | 
| +  __ beq(&encoding_type_UC16, cr0); | 
| +  __ LoadP(code, FieldMemOperand(regexp_data, JSRegExp::kDataAsciiCodeOffset)); | 
| +  __ b(&br_over); | 
| +  __ bind(&encoding_type_UC16); | 
| +  __ LoadP(code, FieldMemOperand(regexp_data, JSRegExp::kDataUC16CodeOffset)); | 
| +  __ bind(&br_over); | 
|  | 
| // (E) Carry on.  String handling is done. | 
| -  // r6: irregexp code | 
| +  // code: irregexp code | 
| // Check that the irregexp code has been generated for the actual string | 
| // encoding. If it has, the field contains a code object otherwise it contains | 
| // a smi (code flushing support). | 
| -  __ JumpIfSmi(r6, &runtime); | 
| +  __ JumpIfSmi(code, &runtime); | 
|  | 
| -  // r1: previous index | 
| -  // r3: encoding of subject string (1 if ASCII, 0 if two_byte); | 
| -  // r6: code | 
| +  // r4: previous index | 
| +  // r6: encoding of subject string (1 if ASCII, 0 if two_byte); | 
| +  // code: Address of generated regexp code | 
| // subject: Subject string | 
| // regexp_data: RegExp data (FixedArray) | 
| // All checks done. Now push arguments for native regexp code. | 
| -  __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r0, r2); | 
| +  __ IncrementCounter(isolate()->counters()->regexp_entry_native(), 1, r3, r5); | 
|  | 
| // Isolates: note we add an additional parameter here (isolate pointer). | 
| -  const int kRegExpExecuteArguments = 9; | 
| -  const int kParameterRegisters = 4; | 
| +  const int kRegExpExecuteArguments = 10; | 
| +  const int kParameterRegisters = 8; | 
| __ EnterExitFrame(false, kRegExpExecuteArguments - kParameterRegisters); | 
|  | 
| // Stack pointer now points to cell where return address is to be written. | 
| // Arguments are before that on the stack or in registers. | 
|  | 
| -  // Argument 9 (sp[20]): Pass current isolate address. | 
| -  __ mov(r0, Operand(ExternalReference::isolate_address(isolate()))); | 
| -  __ str(r0, MemOperand(sp, 5 * kPointerSize)); | 
| - | 
| -  // Argument 8 (sp[16]): Indicate that this is a direct call from JavaScript. | 
| -  __ mov(r0, Operand(1)); | 
| -  __ str(r0, MemOperand(sp, 4 * kPointerSize)); | 
| - | 
| -  // Argument 7 (sp[12]): Start (high end) of backtracking stack memory area. | 
| -  __ mov(r0, Operand(address_of_regexp_stack_memory_address)); | 
| -  __ ldr(r0, MemOperand(r0, 0)); | 
| -  __ mov(r2, Operand(address_of_regexp_stack_memory_size)); | 
| -  __ ldr(r2, MemOperand(r2, 0)); | 
| -  __ add(r0, r0, Operand(r2)); | 
| -  __ str(r0, MemOperand(sp, 3 * kPointerSize)); | 
| - | 
| -  // Argument 6: Set the number of capture registers to zero to force global | 
| -  // regexps to behave as non-global.  This does not affect non-global regexps. | 
| -  __ mov(r0, Operand::Zero()); | 
| -  __ str(r0, MemOperand(sp, 2 * kPointerSize)); | 
| - | 
| -  // Argument 5 (sp[4]): static offsets vector buffer. | 
| -  __ mov(r0, | 
| +  // Argument 10 (in stack parameter area): Pass current isolate address. | 
| +  __ mov(r3, Operand(ExternalReference::isolate_address(isolate()))); | 
| +  __ StoreP(r3, MemOperand(sp, (kStackFrameExtraParamSlot + 1) * kPointerSize)); | 
| + | 
| +  // Argument 9 is a dummy that reserves the space used for | 
| +  // the return address added by the ExitFrame in native calls. | 
| + | 
| +  // Argument 8 (r10): Indicate that this is a direct call from JavaScript. | 
| +  __ li(r10, Operand(1)); | 
| + | 
| +  // Argument 7 (r9): Start (high end) of backtracking stack memory area. | 
| +  __ mov(r3, Operand(address_of_regexp_stack_memory_address)); | 
| +  __ LoadP(r3, MemOperand(r3, 0)); | 
| +  __ mov(r5, Operand(address_of_regexp_stack_memory_size)); | 
| +  __ LoadP(r5, MemOperand(r5, 0)); | 
| +  __ add(r9, r3, r5); | 
| + | 
| +  // Argument 6 (r8): Set the number of capture registers to zero to force | 
| +  // global egexps to behave as non-global.  This does not affect non-global | 
| +  // regexps. | 
| +  __ li(r8, Operand::Zero()); | 
| + | 
| +  // Argument 5 (r7): static offsets vector buffer. | 
| +  __ mov(r7, | 
| Operand(ExternalReference::address_of_static_offsets_vector( | 
| -             isolate()))); | 
| -  __ str(r0, MemOperand(sp, 1 * kPointerSize)); | 
| +                   isolate()))); | 
|  | 
| -  // For arguments 4 and 3 get string length, calculate start of string data and | 
| -  // calculate the shift of the index (0 for ASCII and 1 for two byte). | 
| -  __ add(r7, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); | 
| -  __ eor(r3, r3, Operand(1)); | 
| +  // For arguments 4 (r6) and 3 (r5) get string length, calculate start of | 
| +  // string data and calculate the shift of the index (0 for ASCII and 1 for | 
| +  // two byte). | 
| +  __ addi(r18, subject, Operand(SeqString::kHeaderSize - kHeapObjectTag)); | 
| +  __ xori(r6, r6, Operand(1)); | 
| // Load the length from the original subject string from the previous stack | 
| // frame. Therefore we have to use fp, which points exactly to two pointer | 
| // sizes below the previous sp. (Because creating a new stack frame pushes | 
| // the previous fp onto the stack and moves up sp by 2 * kPointerSize.) | 
| -  __ ldr(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize)); | 
| +  __ LoadP(subject, MemOperand(fp, kSubjectOffset + 2 * kPointerSize)); | 
| // If slice offset is not 0, load the length from the original sliced string. | 
| -  // Argument 4, r3: End of string data | 
| -  // Argument 3, r2: Start of string data | 
| +  // Argument 4, r6: End of string data | 
| +  // Argument 3, r5: Start of string data | 
| // Prepare start and end index of the input. | 
| -  __ add(r9, r7, Operand(r9, LSL, r3)); | 
| -  __ add(r2, r9, Operand(r1, LSL, r3)); | 
| +  __ ShiftLeft(r11, r11, r6); | 
| +  __ add(r11, r18, r11); | 
| +  __ ShiftLeft(r5, r4, r6); | 
| +  __ add(r5, r11, r5); | 
|  | 
| -  __ ldr(r7, FieldMemOperand(subject, String::kLengthOffset)); | 
| -  __ SmiUntag(r7); | 
| -  __ add(r3, r9, Operand(r7, LSL, r3)); | 
| +  __ LoadP(r18, FieldMemOperand(subject, String::kLengthOffset)); | 
| +  __ SmiUntag(r18); | 
| +  __ ShiftLeft(r6, r18, r6); | 
| +  __ add(r6, r11, r6); | 
|  | 
| -  // Argument 2 (r1): Previous index. | 
| +  // Argument 2 (r4): Previous index. | 
| // Already there | 
|  | 
| -  // Argument 1 (r0): Subject string. | 
| -  __ mov(r0, subject); | 
| +  // Argument 1 (r3): Subject string. | 
| +  __ mr(r3, subject); | 
|  | 
| // Locate the code entry and call it. | 
| -  __ add(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
| +  __ addi(code, code, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
| + | 
| + | 
| +#if ABI_USES_FUNCTION_DESCRIPTORS && defined(USE_SIMULATOR) | 
| +  // Even Simulated AIX/PPC64 Linux uses a function descriptor for the | 
| +  // RegExp routine.  Extract the instruction address here since | 
| +  // DirectCEntryStub::GenerateCall will not do it for calls out to | 
| +  // what it thinks is C code compiled for the simulator/host | 
| +  // platform. | 
| +  __ LoadP(code, MemOperand(code, 0));  // Instruction address | 
| +#endif | 
| + | 
| DirectCEntryStub stub(isolate()); | 
| -  stub.GenerateCall(masm, r6); | 
| +  stub.GenerateCall(masm, code); | 
|  | 
| __ LeaveExitFrame(false, no_reg, true); | 
|  | 
| -  last_match_info_elements = r6; | 
| - | 
| -  // r0: result | 
| +  // r3: result | 
| // subject: subject string (callee saved) | 
| // regexp_data: RegExp data (callee saved) | 
| // last_match_info_elements: Last match info elements (callee saved) | 
| // Check the result. | 
| Label success; | 
| -  __ cmp(r0, Operand(1)); | 
| +  __ cmpi(r3, Operand(1)); | 
| // We expect exactly one result since we force the called regexp to behave | 
| // as non-global. | 
| -  __ b(eq, &success); | 
| +  __ beq(&success); | 
| Label failure; | 
| -  __ cmp(r0, Operand(NativeRegExpMacroAssembler::FAILURE)); | 
| -  __ b(eq, &failure); | 
| -  __ cmp(r0, Operand(NativeRegExpMacroAssembler::EXCEPTION)); | 
| +  __ cmpi(r3, Operand(NativeRegExpMacroAssembler::FAILURE)); | 
| +  __ beq(&failure); | 
| +  __ cmpi(r3, Operand(NativeRegExpMacroAssembler::EXCEPTION)); | 
| // If not exception it can only be retry. Handle that in the runtime system. | 
| -  __ b(ne, &runtime); | 
| +  __ bne(&runtime); | 
| // Result must now be exception. If there is no pending exception already a | 
| // stack overflow (on the backtrack stack) was detected in RegExp code but | 
| // haven't created the exception yet. Handle that in the runtime system. | 
| // TODO(592): Rerunning the RegExp to get the stack overflow exception. | 
| -  __ mov(r1, Operand(isolate()->factory()->the_hole_value())); | 
| -  __ mov(r2, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 
| +  __ mov(r4, Operand(isolate()->factory()->the_hole_value())); | 
| +  __ mov(r5, Operand(ExternalReference(Isolate::kPendingExceptionAddress, | 
| isolate()))); | 
| -  __ ldr(r0, MemOperand(r2, 0)); | 
| -  __ cmp(r0, r1); | 
| -  __ b(eq, &runtime); | 
| +  __ LoadP(r3, MemOperand(r5, 0)); | 
| +  __ cmp(r3, r4); | 
| +  __ beq(&runtime); | 
|  | 
| -  __ str(r1, MemOperand(r2, 0));  // Clear pending exception. | 
| +  __ StoreP(r4, MemOperand(r5, 0));  // Clear pending exception. | 
|  | 
| // Check if the exception is a termination. If so, throw as uncatchable. | 
| -  __ CompareRoot(r0, Heap::kTerminationExceptionRootIndex); | 
| +  __ CompareRoot(r3, Heap::kTerminationExceptionRootIndex); | 
|  | 
| Label termination_exception; | 
| -  __ b(eq, &termination_exception); | 
| +  __ beq(&termination_exception); | 
|  | 
| -  __ Throw(r0); | 
| +  __ Throw(r3); | 
|  | 
| __ bind(&termination_exception); | 
| -  __ ThrowUncatchable(r0); | 
| +  __ ThrowUncatchable(r3); | 
|  | 
| __ bind(&failure); | 
| // For failure and exception return null. | 
| -  __ mov(r0, Operand(isolate()->factory()->null_value())); | 
| -  __ add(sp, sp, Operand(4 * kPointerSize)); | 
| +  __ mov(r3, Operand(isolate()->factory()->null_value())); | 
| +  __ addi(sp, sp, Operand(4 * kPointerSize)); | 
| __ Ret(); | 
|  | 
| // Process the result from the native regexp code. | 
| __ bind(&success); | 
| -  __ ldr(r1, | 
| -         FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); | 
| +  __ LoadP(r4, | 
| +           FieldMemOperand(regexp_data, JSRegExp::kIrregexpCaptureCountOffset)); | 
| // Calculate number of capture registers (number_of_captures + 1) * 2. | 
| -  // Multiplying by 2 comes for free since r1 is smi-tagged. | 
| -  STATIC_ASSERT(kSmiTag == 0); | 
| -  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); | 
| -  __ add(r1, r1, Operand(2));  // r1 was a smi. | 
| - | 
| -  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); | 
| -  __ JumpIfSmi(r0, &runtime); | 
| -  __ CompareObjectType(r0, r2, r2, JS_ARRAY_TYPE); | 
| -  __ b(ne, &runtime); | 
| +  // SmiToShortArrayOffset accomplishes the multiplication by 2 and | 
| +  // SmiUntag (which is a nop for 32-bit). | 
| +  __ SmiToShortArrayOffset(r4, r4); | 
| +  __ addi(r4, r4, Operand(2)); | 
| + | 
| +  __ LoadP(r3, MemOperand(sp, kLastMatchInfoOffset)); | 
| +  __ JumpIfSmi(r3, &runtime); | 
| +  __ CompareObjectType(r3, r5, r5, JS_ARRAY_TYPE); | 
| +  __ bne(&runtime); | 
| // Check that the JSArray is in fast case. | 
| -  __ ldr(last_match_info_elements, | 
| -         FieldMemOperand(r0, JSArray::kElementsOffset)); | 
| -  __ ldr(r0, FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); | 
| -  __ CompareRoot(r0, Heap::kFixedArrayMapRootIndex); | 
| -  __ b(ne, &runtime); | 
| +  __ LoadP(last_match_info_elements, | 
| +           FieldMemOperand(r3, JSArray::kElementsOffset)); | 
| +  __ LoadP(r3, | 
| +           FieldMemOperand(last_match_info_elements, HeapObject::kMapOffset)); | 
| +  __ CompareRoot(r3, Heap::kFixedArrayMapRootIndex); | 
| +  __ bne(&runtime); | 
| // Check that the last match info has space for the capture registers and the | 
| // additional information. | 
| -  __ ldr(r0, | 
| +  __ LoadP(r3, | 
| FieldMemOperand(last_match_info_elements, FixedArray::kLengthOffset)); | 
| -  __ add(r2, r1, Operand(RegExpImpl::kLastMatchOverhead)); | 
| -  __ cmp(r2, Operand::SmiUntag(r0)); | 
| -  __ b(gt, &runtime); | 
| +  __ addi(r5, r4, Operand(RegExpImpl::kLastMatchOverhead)); | 
| +  __ SmiUntag(r0, r3); | 
| +  __ cmp(r5, r0); | 
| +  __ bgt(&runtime); | 
|  | 
| -  // r1: number of capture registers | 
| -  // r4: subject string | 
| +  // r4: number of capture registers | 
| +  // subject: subject string | 
| // Store the capture count. | 
| -  __ SmiTag(r2, r1); | 
| -  __ str(r2, FieldMemOperand(last_match_info_elements, | 
| -                             RegExpImpl::kLastCaptureCountOffset)); | 
| +  __ SmiTag(r5, r4); | 
| +  __ StoreP(r5, FieldMemOperand(last_match_info_elements, | 
| +                                RegExpImpl::kLastCaptureCountOffset), r0); | 
| // Store last subject and last input. | 
| -  __ str(subject, | 
| -         FieldMemOperand(last_match_info_elements, | 
| -                         RegExpImpl::kLastSubjectOffset)); | 
| -  __ mov(r2, subject); | 
| +  __ StoreP(subject, | 
| +            FieldMemOperand(last_match_info_elements, | 
| +                            RegExpImpl::kLastSubjectOffset), r0); | 
| +  __ mr(r5, subject); | 
| __ RecordWriteField(last_match_info_elements, | 
| RegExpImpl::kLastSubjectOffset, | 
| subject, | 
| -                      r3, | 
| +                      r10, | 
| kLRHasNotBeenSaved, | 
| kDontSaveFPRegs); | 
| -  __ mov(subject, r2); | 
| -  __ str(subject, | 
| -         FieldMemOperand(last_match_info_elements, | 
| -                         RegExpImpl::kLastInputOffset)); | 
| +  __ mr(subject, r5); | 
| +  __ StoreP(subject, | 
| +            FieldMemOperand(last_match_info_elements, | 
| +                            RegExpImpl::kLastInputOffset), r0); | 
| __ RecordWriteField(last_match_info_elements, | 
| RegExpImpl::kLastInputOffset, | 
| subject, | 
| -                      r3, | 
| +                      r10, | 
| kLRHasNotBeenSaved, | 
| kDontSaveFPRegs); | 
|  | 
| // Get the static offsets vector filled by the native regexp code. | 
| ExternalReference address_of_static_offsets_vector = | 
| ExternalReference::address_of_static_offsets_vector(isolate()); | 
| -  __ mov(r2, Operand(address_of_static_offsets_vector)); | 
| +  __ mov(r5, Operand(address_of_static_offsets_vector)); | 
|  | 
| -  // r1: number of capture registers | 
| -  // r2: offsets vector | 
| -  Label next_capture, done; | 
| +  // r4: number of capture registers | 
| +  // r5: offsets vector | 
| +  Label next_capture; | 
| // Capture register counter starts from number of capture registers and | 
| // counts down until wraping after zero. | 
| -  __ add(r0, | 
| -         last_match_info_elements, | 
| -         Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag)); | 
| +  __ addi(r3, | 
| +          last_match_info_elements, | 
| +          Operand(RegExpImpl::kFirstCaptureOffset - kHeapObjectTag - | 
| +                  kPointerSize)); | 
| +  __ addi(r5, r5, Operand(-kIntSize));  // bias down for lwzu | 
| +  __ mtctr(r4); | 
| __ bind(&next_capture); | 
| -  __ sub(r1, r1, Operand(1), SetCC); | 
| -  __ b(mi, &done); | 
| // Read the value from the static offsets vector buffer. | 
| -  __ ldr(r3, MemOperand(r2, kPointerSize, PostIndex)); | 
| +  __ lwzu(r6, MemOperand(r5, kIntSize)); | 
| // Store the smi value in the last match info. | 
| -  __ SmiTag(r3); | 
| -  __ str(r3, MemOperand(r0, kPointerSize, PostIndex)); | 
| -  __ jmp(&next_capture); | 
| -  __ bind(&done); | 
| +  __ SmiTag(r6); | 
| +  __ StorePU(r6, MemOperand(r3, kPointerSize)); | 
| +  __ bdnz(&next_capture); | 
|  | 
| // Return last match info. | 
| -  __ ldr(r0, MemOperand(sp, kLastMatchInfoOffset)); | 
| -  __ add(sp, sp, Operand(4 * kPointerSize)); | 
| +  __ LoadP(r3, MemOperand(sp, kLastMatchInfoOffset)); | 
| +  __ addi(sp, sp, Operand(4 * kPointerSize)); | 
| __ Ret(); | 
|  | 
| // Do the runtime call to execute the regexp. | 
| @@ -2650,39 +2783,40 @@ void RegExpExecStub::Generate(MacroAssembler* masm) { | 
| // (6) Not a long external string?  If yes, go to (8). | 
| __ bind(¬_seq_nor_cons); | 
| // Compare flags are still set. | 
| -  __ b(gt, ¬_long_external);  // Go to (8). | 
| +  __ bgt(¬_long_external);  // Go to (8). | 
|  | 
| // (7) External string.  Make it, offset-wise, look like a sequential string. | 
| __ bind(&external_string); | 
| -  __ ldr(r0, FieldMemOperand(subject, HeapObject::kMapOffset)); | 
| -  __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); | 
| +  __ LoadP(r3, FieldMemOperand(subject, HeapObject::kMapOffset)); | 
| +  __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset)); | 
| if (FLAG_debug_code) { | 
| // Assert that we do not have a cons or slice (indirect strings) here. | 
| // Sequential strings have already been ruled out. | 
| -    __ tst(r0, Operand(kIsIndirectStringMask)); | 
| -    __ Assert(eq, kExternalStringExpectedButNotFound); | 
| +    STATIC_ASSERT(kIsIndirectStringMask == 1); | 
| +    __ andi(r0, r3, Operand(kIsIndirectStringMask)); | 
| +    __ Assert(eq, kExternalStringExpectedButNotFound, cr0); | 
| } | 
| -  __ ldr(subject, | 
| +  __ LoadP(subject, | 
| FieldMemOperand(subject, ExternalString::kResourceDataOffset)); | 
| // Move the pointer so that offset-wise, it looks like a sequential string. | 
| STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); | 
| -  __ sub(subject, | 
| +  __ subi(subject, | 
| subject, | 
| Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | 
| -  __ jmp(&seq_string);    // Go to (5). | 
| +  __ b(&seq_string);  // Go to (5). | 
|  | 
| // (8) Short external string or not a string?  If yes, bail out to runtime. | 
| __ bind(¬_long_external); | 
| STATIC_ASSERT(kNotStringTag != 0 && kShortExternalStringTag !=0); | 
| -  __ tst(r1, Operand(kIsNotStringMask | kShortExternalStringMask)); | 
| -  __ b(ne, &runtime); | 
| +  __ andi(r0, r4, Operand(kIsNotStringMask | kShortExternalStringMask)); | 
| +  __ bne(&runtime, cr0); | 
|  | 
| // (9) Sliced string.  Replace subject with parent.  Go to (4). | 
| -  // Load offset into r9 and replace subject string with parent. | 
| -  __ ldr(r9, FieldMemOperand(subject, SlicedString::kOffsetOffset)); | 
| -  __ SmiUntag(r9); | 
| -  __ ldr(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); | 
| -  __ jmp(&check_underlying);  // Go to (4). | 
| +  // Load offset into r11 and replace subject string with parent. | 
| +  __ LoadP(r11, FieldMemOperand(subject, SlicedString::kOffsetOffset)); | 
| +  __ SmiUntag(r11); | 
| +  __ LoadP(subject, FieldMemOperand(subject, SlicedString::kParentOffset)); | 
| +  __ b(&check_underlying);  // Go to (4). | 
| #endif  // V8_INTERPRETED_REGEXP | 
| } | 
|  | 
| @@ -2691,10 +2825,10 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { | 
| // Cache the called function in a feedback vector slot.  Cache states | 
| // are uninitialized, monomorphic (indicated by a JSFunction), and | 
| // megamorphic. | 
| -  // r0 : number of arguments to the construct function | 
| -  // r1 : the function to call | 
| -  // r2 : Feedback vector | 
| -  // r3 : slot in feedback vector (Smi) | 
| +  // r3 : number of arguments to the construct function | 
| +  // r4 : the function to call | 
| +  // r5 : Feedback vector | 
| +  // r6 : slot in feedback vector (Smi) | 
| Label initialize, done, miss, megamorphic, not_array_function; | 
|  | 
| DCHECK_EQ(*TypeFeedbackInfo::MegamorphicSentinel(masm->isolate()), | 
| @@ -2702,13 +2836,14 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { | 
| DCHECK_EQ(*TypeFeedbackInfo::UninitializedSentinel(masm->isolate()), | 
| masm->isolate()->heap()->uninitialized_symbol()); | 
|  | 
| -  // Load the cache state into r4. | 
| -  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3)); | 
| -  __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize)); | 
| +  // Load the cache state into r7. | 
| +  __ SmiToPtrArrayOffset(r7, r6); | 
| +  __ add(r7, r5, r7); | 
| +  __ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize)); | 
|  | 
| // A monomorphic cache hit or an already megamorphic state: invoke the | 
| // function without changing the state. | 
| -  __ cmp(r4, r1); | 
| +  __ cmp(r7, r4); | 
| __ b(eq, &done); | 
|  | 
| if (!FLAG_pretenuring_call_new) { | 
| @@ -2716,39 +2851,40 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { | 
| // If we didn't have a matching function, and we didn't find the megamorph | 
| // sentinel, then we have in the slot either some other function or an | 
| // AllocationSite. Do a map check on the object in ecx. | 
| -    __ ldr(r5, FieldMemOperand(r4, 0)); | 
| -    __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex); | 
| -    __ b(ne, &miss); | 
| +    __ LoadP(r8, FieldMemOperand(r7, 0)); | 
| +    __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex); | 
| +    __ bne(&miss); | 
|  | 
| // Make sure the function is the Array() function | 
| -    __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4); | 
| -    __ cmp(r1, r4); | 
| -    __ b(ne, &megamorphic); | 
| -    __ jmp(&done); | 
| +    __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7); | 
| +    __ cmp(r4, r7); | 
| +    __ bne(&megamorphic); | 
| +    __ b(&done); | 
| } | 
|  | 
| __ bind(&miss); | 
|  | 
| // A monomorphic miss (i.e, here the cache is not uninitialized) goes | 
| // megamorphic. | 
| -  __ CompareRoot(r4, Heap::kUninitializedSymbolRootIndex); | 
| -  __ b(eq, &initialize); | 
| +  __ CompareRoot(r7, Heap::kUninitializedSymbolRootIndex); | 
| +  __ beq(&initialize); | 
| // MegamorphicSentinel is an immortal immovable object (undefined) so no | 
| // write-barrier is needed. | 
| __ bind(&megamorphic); | 
| -  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3)); | 
| +  __ SmiToPtrArrayOffset(r7, r6); | 
| +  __ add(r7, r5, r7); | 
| __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex); | 
| -  __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize)); | 
| +  __ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0); | 
| __ jmp(&done); | 
|  | 
| // An uninitialized cache is patched with the function | 
| __ bind(&initialize); | 
|  | 
| if (!FLAG_pretenuring_call_new) { | 
| -    // Make sure the function is the Array() function | 
| -    __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4); | 
| -    __ cmp(r1, r4); | 
| -    __ b(ne, ¬_array_function); | 
| +    // Make sure the function is the Array() function. | 
| +    __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7); | 
| +    __ cmp(r4, r7); | 
| +    __ bne(¬_array_function); | 
|  | 
| // The target function is the Array constructor, | 
| // Create an AllocationSite if we don't already have it, store it in the | 
| @@ -2757,44 +2893,56 @@ static void GenerateRecordCallTarget(MacroAssembler* masm) { | 
| FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 
|  | 
| // Arguments register must be smi-tagged to call out. | 
| -      __ SmiTag(r0); | 
| -      __ Push(r3, r2, r1, r0); | 
| +      __ SmiTag(r3); | 
| +      __ Push(r6, r5, r4, r3); | 
|  | 
| CreateAllocationSiteStub create_stub(masm->isolate()); | 
| __ CallStub(&create_stub); | 
|  | 
| -      __ Pop(r3, r2, r1, r0); | 
| -      __ SmiUntag(r0); | 
| +      __ Pop(r6, r5, r4, r3); | 
| +      __ SmiUntag(r3); | 
| } | 
| __ b(&done); | 
|  | 
| __ bind(¬_array_function); | 
| } | 
|  | 
| -  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3)); | 
| -  __ add(r4, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 
| -  __ str(r1, MemOperand(r4, 0)); | 
| +  __ SmiToPtrArrayOffset(r7, r6); | 
| +  __ add(r7, r5, r7); | 
| +  __ addi(r7, r7, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 
| +  __ StoreP(r4, MemOperand(r7, 0)); | 
|  | 
| -  __ Push(r4, r2, r1); | 
| -  __ RecordWrite(r2, r4, r1, kLRHasNotBeenSaved, kDontSaveFPRegs, | 
| +  __ Push(r7, r5, r4); | 
| +  __ RecordWrite(r5, r7, r4, kLRHasNotBeenSaved, kDontSaveFPRegs, | 
| EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); | 
| -  __ Pop(r4, r2, r1); | 
| +  __ Pop(r7, r5, r4); | 
|  | 
| __ bind(&done); | 
| } | 
|  | 
|  | 
| static void EmitContinueIfStrictOrNative(MacroAssembler* masm, Label* cont) { | 
| -  // Do not transform the receiver for strict mode functions. | 
| -  __ ldr(r3, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); | 
| -  __ ldr(r4, FieldMemOperand(r3, SharedFunctionInfo::kCompilerHintsOffset)); | 
| -  __ tst(r4, Operand(1 << (SharedFunctionInfo::kStrictModeFunction + | 
| -                           kSmiTagSize))); | 
| -  __ b(ne, cont); | 
| - | 
| -  // Do not transform the receiver for native (Compilerhints already in r3). | 
| -  __ tst(r4, Operand(1 << (SharedFunctionInfo::kNative + kSmiTagSize))); | 
| -  __ b(ne, cont); | 
| +  // Do not transform the receiver for strict mode functions and natives. | 
| +  __ LoadP(r6, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); | 
| +  __ lwz(r7, FieldMemOperand(r6, SharedFunctionInfo::kCompilerHintsOffset)); | 
| +  __ TestBit(r7, | 
| +#if V8_TARGET_ARCH_PPC64 | 
| +             SharedFunctionInfo::kStrictModeFunction, | 
| +#else | 
| +             SharedFunctionInfo::kStrictModeFunction + kSmiTagSize, | 
| +#endif | 
| +             r0); | 
| +  __ bne(cont, cr0); | 
| + | 
| +  // Do not transform the receiver for native. | 
| +  __ TestBit(r7, | 
| +#if V8_TARGET_ARCH_PPC64 | 
| +             SharedFunctionInfo::kNative, | 
| +#else | 
| +             SharedFunctionInfo::kNative + kSmiTagSize, | 
| +#endif | 
| +             r0); | 
| +  __ bne(cont, cr0); | 
| } | 
|  | 
|  | 
| @@ -2802,12 +2950,13 @@ static void EmitSlowCase(MacroAssembler* masm, | 
| int argc, | 
| Label* non_function) { | 
| // Check for function proxy. | 
| -  __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE)); | 
| -  __ b(ne, non_function); | 
| -  __ push(r1);  // put proxy as additional argument | 
| -  __ mov(r0, Operand(argc + 1, RelocInfo::NONE32)); | 
| -  __ mov(r2, Operand::Zero()); | 
| -  __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY); | 
| +  STATIC_ASSERT(JS_FUNCTION_PROXY_TYPE < 0xffffu); | 
| +  __ cmpi(r7, Operand(JS_FUNCTION_PROXY_TYPE)); | 
| +  __ bne(non_function); | 
| +  __ push(r4);  // put proxy as additional argument | 
| +  __ li(r3, Operand(argc + 1)); | 
| +  __ li(r5, Operand::Zero()); | 
| +  __ GetBuiltinFunction(r4, Builtins::CALL_FUNCTION_PROXY); | 
| { | 
| Handle<Code> adaptor = | 
| masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(); | 
| @@ -2817,10 +2966,10 @@ static void EmitSlowCase(MacroAssembler* masm, | 
| // CALL_NON_FUNCTION expects the non-function callee as receiver (instead | 
| // of the original receiver from the call site). | 
| __ bind(non_function); | 
| -  __ str(r1, MemOperand(sp, argc * kPointerSize)); | 
| -  __ mov(r0, Operand(argc));  // Set up the number of arguments. | 
| -  __ mov(r2, Operand::Zero()); | 
| -  __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION); | 
| +  __ StoreP(r4, MemOperand(sp, argc * kPointerSize), r0); | 
| +  __ li(r3, Operand(argc));  // Set up the number of arguments. | 
| +  __ li(r5, Operand::Zero()); | 
| +  __ GetBuiltinFunction(r4, Builtins::CALL_NON_FUNCTION); | 
| __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), | 
| RelocInfo::CODE_TARGET); | 
| } | 
| @@ -2829,33 +2978,33 @@ static void EmitSlowCase(MacroAssembler* masm, | 
| static void EmitWrapCase(MacroAssembler* masm, int argc, Label* cont) { | 
| // Wrap the receiver and patch it back onto the stack. | 
| { FrameAndConstantPoolScope frame_scope(masm, StackFrame::INTERNAL); | 
| -    __ Push(r1, r3); | 
| +    __ Push(r4, r6); | 
| __ InvokeBuiltin(Builtins::TO_OBJECT, CALL_FUNCTION); | 
| -    __ pop(r1); | 
| +    __ pop(r4); | 
| } | 
| -  __ str(r0, MemOperand(sp, argc * kPointerSize)); | 
| -  __ jmp(cont); | 
| +  __ StoreP(r3, MemOperand(sp, argc * kPointerSize), r0); | 
| +  __ b(cont); | 
| } | 
|  | 
|  | 
| static void CallFunctionNoFeedback(MacroAssembler* masm, | 
| int argc, bool needs_checks, | 
| bool call_as_method) { | 
| -  // r1 : the function to call | 
| +  // r4 : the function to call | 
| Label slow, non_function, wrap, cont; | 
|  | 
| if (needs_checks) { | 
| // Check that the function is really a JavaScript function. | 
| -    // r1: pushed function (to be verified) | 
| -    __ JumpIfSmi(r1, &non_function); | 
| +    // r4: pushed function (to be verified) | 
| +    __ JumpIfSmi(r4, &non_function); | 
|  | 
| // Goto slow case if we do not have a function. | 
| -    __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE); | 
| -    __ b(ne, &slow); | 
| +    __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE); | 
| +    __ bne(&slow); | 
| } | 
|  | 
| // Fast-case: Invoke the function now. | 
| -  // r1: pushed function | 
| +  // r4: pushed function | 
| ParameterCount actual(argc); | 
|  | 
| if (call_as_method) { | 
| @@ -2864,20 +3013,20 @@ static void CallFunctionNoFeedback(MacroAssembler* masm, | 
| } | 
|  | 
| // Compute the receiver in sloppy mode. | 
| -    __ ldr(r3, MemOperand(sp, argc * kPointerSize)); | 
| +    __ LoadP(r6, MemOperand(sp, argc * kPointerSize), r0); | 
|  | 
| if (needs_checks) { | 
| -      __ JumpIfSmi(r3, &wrap); | 
| -      __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE); | 
| -      __ b(lt, &wrap); | 
| +      __ JumpIfSmi(r6, &wrap); | 
| +      __ CompareObjectType(r6, r7, r7, FIRST_SPEC_OBJECT_TYPE); | 
| +      __ blt(&wrap); | 
| } else { | 
| -      __ jmp(&wrap); | 
| +      __ b(&wrap); | 
| } | 
|  | 
| __ bind(&cont); | 
| } | 
|  | 
| -  __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper()); | 
| +  __ InvokeFunction(r4, actual, JUMP_FUNCTION, NullCallWrapper()); | 
|  | 
| if (needs_checks) { | 
| // Slow-case: Non-function called. | 
| @@ -2898,101 +3047,105 @@ void CallFunctionStub::Generate(MacroAssembler* masm) { | 
|  | 
|  | 
| void CallConstructStub::Generate(MacroAssembler* masm) { | 
| -  // r0 : number of arguments | 
| -  // r1 : the function to call | 
| -  // r2 : feedback vector | 
| -  // r3 : (only if r2 is not the megamorphic symbol) slot in feedback | 
| +  // r3 : number of arguments | 
| +  // r4 : the function to call | 
| +  // r5 : feedback vector | 
| +  // r6 : (only if r5 is not the megamorphic symbol) slot in feedback | 
| //      vector (Smi) | 
| Label slow, non_function_call; | 
|  | 
| // Check that the function is not a smi. | 
| -  __ JumpIfSmi(r1, &non_function_call); | 
| +  __ JumpIfSmi(r4, &non_function_call); | 
| // Check that the function is a JSFunction. | 
| -  __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE); | 
| -  __ b(ne, &slow); | 
| +  __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE); | 
| +  __ bne(&slow); | 
|  | 
| if (RecordCallTarget()) { | 
| GenerateRecordCallTarget(masm); | 
|  | 
| -    __ add(r5, r2, Operand::PointerOffsetFromSmiKey(r3)); | 
| +    __ SmiToPtrArrayOffset(r8, r6); | 
| +    __ add(r8, r5, r8); | 
| if (FLAG_pretenuring_call_new) { | 
| -      // Put the AllocationSite from the feedback vector into r2. | 
| +      // Put the AllocationSite from the feedback vector into r5. | 
| // By adding kPointerSize we encode that we know the AllocationSite | 
| -      // entry is at the feedback vector slot given by r3 + 1. | 
| -      __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize + kPointerSize)); | 
| +      // entry is at the feedback vector slot given by r6 + 1. | 
| +      __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize + kPointerSize)); | 
| } else { | 
| Label feedback_register_initialized; | 
| -      // Put the AllocationSite from the feedback vector into r2, or undefined. | 
| -      __ ldr(r2, FieldMemOperand(r5, FixedArray::kHeaderSize)); | 
| -      __ ldr(r5, FieldMemOperand(r2, AllocationSite::kMapOffset)); | 
| -      __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex); | 
| -      __ b(eq, &feedback_register_initialized); | 
| -      __ LoadRoot(r2, Heap::kUndefinedValueRootIndex); | 
| +      // Put the AllocationSite from the feedback vector into r5, or undefined. | 
| +      __ LoadP(r5, FieldMemOperand(r8, FixedArray::kHeaderSize)); | 
| +      __ LoadP(r8, FieldMemOperand(r5, AllocationSite::kMapOffset)); | 
| +      __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex); | 
| +      __ beq(&feedback_register_initialized); | 
| +      __ LoadRoot(r5, Heap::kUndefinedValueRootIndex); | 
| __ bind(&feedback_register_initialized); | 
| } | 
|  | 
| -    __ AssertUndefinedOrAllocationSite(r2, r5); | 
| +    __ AssertUndefinedOrAllocationSite(r5, r8); | 
| } | 
|  | 
| // Jump to the function-specific construct stub. | 
| -  Register jmp_reg = r4; | 
| -  __ ldr(jmp_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); | 
| -  __ ldr(jmp_reg, FieldMemOperand(jmp_reg, | 
| +  Register jmp_reg = r7; | 
| +  __ LoadP(jmp_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset)); | 
| +  __ LoadP(jmp_reg, FieldMemOperand(jmp_reg, | 
| SharedFunctionInfo::kConstructStubOffset)); | 
| -  __ add(pc, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
| +  __ addi(r0, jmp_reg, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
| +  __ Jump(r0); | 
|  | 
| -  // r0: number of arguments | 
| -  // r1: called object | 
| -  // r4: object type | 
| +  // r3: number of arguments | 
| +  // r4: called object | 
| +  // r7: object type | 
| Label do_call; | 
| __ bind(&slow); | 
| -  __ cmp(r4, Operand(JS_FUNCTION_PROXY_TYPE)); | 
| -  __ b(ne, &non_function_call); | 
| -  __ GetBuiltinFunction(r1, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); | 
| -  __ jmp(&do_call); | 
| +  STATIC_ASSERT(JS_FUNCTION_PROXY_TYPE < 0xffffu); | 
| +  __ cmpi(r7, Operand(JS_FUNCTION_PROXY_TYPE)); | 
| +  __ bne(&non_function_call); | 
| +  __ GetBuiltinFunction(r4, Builtins::CALL_FUNCTION_PROXY_AS_CONSTRUCTOR); | 
| +  __ b(&do_call); | 
|  | 
| __ bind(&non_function_call); | 
| -  __ GetBuiltinFunction(r1, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); | 
| +  __ GetBuiltinFunction(r4, Builtins::CALL_NON_FUNCTION_AS_CONSTRUCTOR); | 
| __ bind(&do_call); | 
| -  // Set expected number of arguments to zero (not changing r0). | 
| -  __ mov(r2, Operand::Zero()); | 
| +  // Set expected number of arguments to zero (not changing r3). | 
| +  __ li(r5, Operand::Zero()); | 
| __ Jump(masm->isolate()->builtins()->ArgumentsAdaptorTrampoline(), | 
| RelocInfo::CODE_TARGET); | 
| } | 
|  | 
|  | 
| static void EmitLoadTypeFeedbackVector(MacroAssembler* masm, Register vector) { | 
| -  __ ldr(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | 
| -  __ ldr(vector, FieldMemOperand(vector, | 
| -                                 JSFunction::kSharedFunctionInfoOffset)); | 
| -  __ ldr(vector, FieldMemOperand(vector, | 
| -                                 SharedFunctionInfo::kFeedbackVectorOffset)); | 
| +  __ LoadP(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | 
| +  __ LoadP(vector, FieldMemOperand(vector, | 
| +                                   JSFunction::kSharedFunctionInfoOffset)); | 
| +  __ LoadP(vector, FieldMemOperand(vector, | 
| +                                   SharedFunctionInfo::kFeedbackVectorOffset)); | 
| } | 
|  | 
|  | 
| void CallIC_ArrayStub::Generate(MacroAssembler* masm) { | 
| -  // r1 - function | 
| -  // r3 - slot id | 
| +  // r4 - function | 
| +  // r6 - slot id | 
| Label miss; | 
| int argc = state_.arg_count(); | 
| ParameterCount actual(argc); | 
|  | 
| -  EmitLoadTypeFeedbackVector(masm, r2); | 
| +  EmitLoadTypeFeedbackVector(masm, r5); | 
|  | 
| -  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r4); | 
| -  __ cmp(r1, r4); | 
| -  __ b(ne, &miss); | 
| +  __ LoadGlobalFunction(Context::ARRAY_FUNCTION_INDEX, r7); | 
| +  __ cmp(r4, r7); | 
| +  __ bne(&miss); | 
|  | 
| -  __ mov(r0, Operand(arg_count())); | 
| -  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3)); | 
| -  __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize)); | 
| +  __ mov(r3, Operand(arg_count())); | 
| +  __ SmiToPtrArrayOffset(r7, r6); | 
| +  __ add(r7, r5, r7); | 
| +  __ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize)); | 
|  | 
| -  // Verify that r4 contains an AllocationSite | 
| -  __ ldr(r5, FieldMemOperand(r4, HeapObject::kMapOffset)); | 
| -  __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex); | 
| -  __ b(ne, &miss); | 
| +  // Verify that r7 contains an AllocationSite | 
| +  __ LoadP(r8, FieldMemOperand(r7, HeapObject::kMapOffset)); | 
| +  __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex); | 
| +  __ bne(&miss); | 
|  | 
| -  __ mov(r2, r4); | 
| +  __ mr(r5, r7); | 
| ArrayConstructorStub stub(masm->isolate(), arg_count()); | 
| __ TailCallStub(&stub); | 
|  | 
| @@ -3011,36 +3164,37 @@ void CallIC_ArrayStub::Generate(MacroAssembler* masm) { | 
|  | 
|  | 
| void CallICStub::Generate(MacroAssembler* masm) { | 
| -  // r1 - function | 
| -  // r3 - slot id (Smi) | 
| +  // r4 - function | 
| +  // r6 - slot id (Smi) | 
| Label extra_checks_or_miss, slow_start; | 
| Label slow, non_function, wrap, cont; | 
| Label have_js_function; | 
| int argc = state_.arg_count(); | 
| ParameterCount actual(argc); | 
|  | 
| -  EmitLoadTypeFeedbackVector(masm, r2); | 
| +  EmitLoadTypeFeedbackVector(masm, r5); | 
|  | 
| -  // The checks. First, does r1 match the recorded monomorphic target? | 
| -  __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3)); | 
| -  __ ldr(r4, FieldMemOperand(r4, FixedArray::kHeaderSize)); | 
| -  __ cmp(r1, r4); | 
| -  __ b(ne, &extra_checks_or_miss); | 
| +  // The checks. First, does r4 match the recorded monomorphic target? | 
| +  __ SmiToPtrArrayOffset(r7, r6); | 
| +  __ add(r7, r5, r7); | 
| +  __ LoadP(r7, FieldMemOperand(r7, FixedArray::kHeaderSize)); | 
| +  __ cmp(r4, r7); | 
| +  __ bne(&extra_checks_or_miss); | 
|  | 
| __ bind(&have_js_function); | 
| if (state_.CallAsMethod()) { | 
| EmitContinueIfStrictOrNative(masm, &cont); | 
| // Compute the receiver in sloppy mode. | 
| -    __ ldr(r3, MemOperand(sp, argc * kPointerSize)); | 
| +    __ LoadP(r6, MemOperand(sp, argc * kPointerSize), r0); | 
|  | 
| -    __ JumpIfSmi(r3, &wrap); | 
| -    __ CompareObjectType(r3, r4, r4, FIRST_SPEC_OBJECT_TYPE); | 
| -    __ b(lt, &wrap); | 
| +    __ JumpIfSmi(r6, &wrap); | 
| +    __ CompareObjectType(r6, r7, r7, FIRST_SPEC_OBJECT_TYPE); | 
| +    __ blt(&wrap); | 
|  | 
| __ bind(&cont); | 
| } | 
|  | 
| -  __ InvokeFunction(r1, actual, JUMP_FUNCTION, NullCallWrapper()); | 
| +  __ InvokeFunction(r4, actual, JUMP_FUNCTION, NullCallWrapper()); | 
|  | 
| __ bind(&slow); | 
| EmitSlowCase(masm, argc, &non_function); | 
| @@ -3053,20 +3207,21 @@ void CallICStub::Generate(MacroAssembler* masm) { | 
| __ bind(&extra_checks_or_miss); | 
| Label miss; | 
|  | 
| -  __ CompareRoot(r4, Heap::kMegamorphicSymbolRootIndex); | 
| -  __ b(eq, &slow_start); | 
| -  __ CompareRoot(r4, Heap::kUninitializedSymbolRootIndex); | 
| -  __ b(eq, &miss); | 
| +  __ CompareRoot(r7, Heap::kMegamorphicSymbolRootIndex); | 
| +  __ beq(&slow_start); | 
| +  __ CompareRoot(r7, Heap::kUninitializedSymbolRootIndex); | 
| +  __ beq(&miss); | 
|  | 
| if (!FLAG_trace_ic) { | 
| // We are going megamorphic. If the feedback is a JSFunction, it is fine | 
| // to handle it here. More complex cases are dealt with in the runtime. | 
| -    __ AssertNotSmi(r4); | 
| -    __ CompareObjectType(r4, r5, r5, JS_FUNCTION_TYPE); | 
| -    __ b(ne, &miss); | 
| -    __ add(r4, r2, Operand::PointerOffsetFromSmiKey(r3)); | 
| +    __ AssertNotSmi(r7); | 
| +    __ CompareObjectType(r7, r8, r8, JS_FUNCTION_TYPE); | 
| +    __ bne(&miss); | 
| +    __ SmiToPtrArrayOffset(r7, r6); | 
| +    __ add(r7, r5, r7); | 
| __ LoadRoot(ip, Heap::kMegamorphicSymbolRootIndex); | 
| -    __ str(ip, FieldMemOperand(r4, FixedArray::kHeaderSize)); | 
| +    __ StoreP(ip, FieldMemOperand(r7, FixedArray::kHeaderSize), r0); | 
| __ jmp(&slow_start); | 
| } | 
|  | 
| @@ -3077,33 +3232,33 @@ void CallICStub::Generate(MacroAssembler* masm) { | 
| // the slow case | 
| __ bind(&slow_start); | 
| // Check that the function is really a JavaScript function. | 
| -  // r1: pushed function (to be verified) | 
| -  __ JumpIfSmi(r1, &non_function); | 
| +  // r4: pushed function (to be verified) | 
| +  __ JumpIfSmi(r4, &non_function); | 
|  | 
| // Goto slow case if we do not have a function. | 
| -  __ CompareObjectType(r1, r4, r4, JS_FUNCTION_TYPE); | 
| -  __ b(ne, &slow); | 
| -  __ jmp(&have_js_function); | 
| +  __ CompareObjectType(r4, r7, r7, JS_FUNCTION_TYPE); | 
| +  __ bne(&slow); | 
| +  __ b(&have_js_function); | 
| } | 
|  | 
|  | 
| void CallICStub::GenerateMiss(MacroAssembler* masm, IC::UtilityId id) { | 
| // Get the receiver of the function from the stack; 1 ~ return address. | 
| -  __ ldr(r4, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize)); | 
| +  __ LoadP(r7, MemOperand(sp, (state_.arg_count() + 1) * kPointerSize), r0); | 
|  | 
| { | 
| FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 
|  | 
| // Push the receiver and the function and feedback info. | 
| -    __ Push(r4, r1, r2, r3); | 
| +    __ Push(r7, r4, r5, r6); | 
|  | 
| // Call the entry. | 
| ExternalReference miss = ExternalReference(IC_Utility(id), | 
| masm->isolate()); | 
| __ CallExternalReference(miss, 4); | 
|  | 
| -    // Move result to edi and exit the internal frame. | 
| -    __ mov(r1, r0); | 
| +    // Move result to r4 and exit the internal frame. | 
| +    __ mr(r4, r3); | 
| } | 
| } | 
|  | 
| @@ -3119,20 +3274,20 @@ void StringCharCodeAtGenerator::GenerateFast(MacroAssembler* masm) { | 
| __ JumpIfSmi(object_, receiver_not_string_); | 
|  | 
| // Fetch the instance type of the receiver into result register. | 
| -  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 
| -  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 
| +  __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 
| +  __ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 
| // If the receiver is not a string trigger the non-string case. | 
| -  __ tst(result_, Operand(kIsNotStringMask)); | 
| -  __ b(ne, receiver_not_string_); | 
| +  __ andi(r0, result_, Operand(kIsNotStringMask)); | 
| +  __ bne(receiver_not_string_, cr0); | 
|  | 
| // If the index is non-smi trigger the non-smi case. | 
| __ JumpIfNotSmi(index_, &index_not_smi_); | 
| __ bind(&got_smi_index_); | 
|  | 
| // Check for index out of range. | 
| -  __ ldr(ip, FieldMemOperand(object_, String::kLengthOffset)); | 
| -  __ cmp(ip, Operand(index_)); | 
| -  __ b(ls, index_out_of_range_); | 
| +  __ LoadP(ip, FieldMemOperand(object_, String::kLengthOffset)); | 
| +  __ cmpl(ip, index_); | 
| +  __ ble(index_out_of_range_); | 
|  | 
| __ SmiUntag(index_); | 
|  | 
| @@ -3172,16 +3327,16 @@ void StringCharCodeAtGenerator::GenerateSlow( | 
| } | 
| // Save the conversion result before the pop instructions below | 
| // have a chance to overwrite it. | 
| -  __ Move(index_, r0); | 
| +  __ Move(index_, r3); | 
| __ pop(object_); | 
| // Reload the instance type. | 
| -  __ ldr(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 
| -  __ ldrb(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 
| +  __ LoadP(result_, FieldMemOperand(object_, HeapObject::kMapOffset)); | 
| +  __ lbz(result_, FieldMemOperand(result_, Map::kInstanceTypeOffset)); | 
| call_helper.AfterCall(masm); | 
| // If index is still not a smi, it must be out of range. | 
| __ JumpIfNotSmi(index_, index_out_of_range_); | 
| // Otherwise, return to the fast path. | 
| -  __ jmp(&got_smi_index_); | 
| +  __ b(&got_smi_index_); | 
|  | 
| // Call runtime. We get here when the receiver is a string and the | 
| // index is a number, but the code of getting the actual character | 
| @@ -3191,9 +3346,9 @@ void StringCharCodeAtGenerator::GenerateSlow( | 
| __ SmiTag(index_); | 
| __ Push(object_, index_); | 
| __ CallRuntime(Runtime::kStringCharCodeAtRT, 2); | 
| -  __ Move(result_, r0); | 
| +  __ Move(result_, r3); | 
| call_helper.AfterCall(masm); | 
| -  __ jmp(&exit_); | 
| +  __ b(&exit_); | 
|  | 
| __ Abort(kUnexpectedFallthroughFromCharCodeAtSlowCase); | 
| } | 
| @@ -3202,22 +3357,24 @@ void StringCharCodeAtGenerator::GenerateSlow( | 
| // ------------------------------------------------------------------------- | 
| // StringCharFromCodeGenerator | 
|  | 
| -void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { | 
| +  void StringCharFromCodeGenerator::GenerateFast(MacroAssembler* masm) { | 
| // Fast case of Heap::LookupSingleCharacterStringFromCode. | 
| -  STATIC_ASSERT(kSmiTag == 0); | 
| -  STATIC_ASSERT(kSmiShiftSize == 0); | 
| DCHECK(IsPowerOf2(String::kMaxOneByteCharCode + 1)); | 
| -  __ tst(code_, | 
| -         Operand(kSmiTagMask | | 
| -                 ((~String::kMaxOneByteCharCode) << kSmiTagSize))); | 
| -  __ b(ne, &slow_case_); | 
| +  __ LoadSmiLiteral(r0, Smi::FromInt(~String::kMaxOneByteCharCode)); | 
| +  __ ori(r0, r0, Operand(kSmiTagMask)); | 
| +  __ and_(r0, code_, r0); | 
| +  __ cmpi(r0, Operand::Zero()); | 
| +  __ bne(&slow_case_); | 
|  | 
| __ LoadRoot(result_, Heap::kSingleCharacterStringCacheRootIndex); | 
| // At this point code register contains smi tagged ASCII char code. | 
| -  __ add(result_, result_, Operand::PointerOffsetFromSmiKey(code_)); | 
| -  __ ldr(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); | 
| +  __ mr(r0, code_); | 
| +  __ SmiToPtrArrayOffset(code_, code_); | 
| +  __ add(result_, result_, code_); | 
| +  __ mr(code_, r0); | 
| +  __ LoadP(result_, FieldMemOperand(result_, FixedArray::kHeaderSize)); | 
| __ CompareRoot(result_, Heap::kUndefinedValueRootIndex); | 
| -  __ b(eq, &slow_case_); | 
| +  __ beq(&slow_case_); | 
| __ bind(&exit_); | 
| } | 
|  | 
| @@ -3231,9 +3388,9 @@ void StringCharFromCodeGenerator::GenerateSlow( | 
| call_helper.BeforeCall(masm); | 
| __ push(code_); | 
| __ CallRuntime(Runtime::kCharFromCode, 1); | 
| -  __ Move(result_, r0); | 
| +  __ Move(result_, r3); | 
| call_helper.AfterCall(masm); | 
| -  __ jmp(&exit_); | 
| +  __ b(&exit_); | 
|  | 
| __ Abort(kUnexpectedFallthroughFromCharFromCodeSlowCase); | 
| } | 
| @@ -3253,29 +3410,30 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, | 
| String::Encoding encoding) { | 
| if (FLAG_debug_code) { | 
| // Check that destination is word aligned. | 
| -    __ tst(dest, Operand(kPointerAlignmentMask)); | 
| -    __ Check(eq, kDestinationOfCopyNotAligned); | 
| +    __ andi(r0, dest, Operand(kPointerAlignmentMask)); | 
| +    __ Check(eq, kDestinationOfCopyNotAligned, cr0); | 
| } | 
|  | 
| -  // Assumes word reads and writes are little endian. | 
| // Nothing to do for zero characters. | 
| Label done; | 
| if (encoding == String::TWO_BYTE_ENCODING) { | 
| -    __ add(count, count, Operand(count), SetCC); | 
| +    // double the length | 
| +    __ add(count, count, count, LeaveOE, SetRC); | 
| +    __ beq(&done, cr0); | 
| +  } else { | 
| +    __ cmpi(count, Operand::Zero()); | 
| +    __ beq(&done); | 
| } | 
|  | 
| -  Register limit = count;  // Read until dest equals this. | 
| -  __ add(limit, dest, Operand(count)); | 
| - | 
| -  Label loop_entry, loop; | 
| -  // Copy bytes from src to dest until dest hits limit. | 
| -  __ b(&loop_entry); | 
| -  __ bind(&loop); | 
| -  __ ldrb(scratch, MemOperand(src, 1, PostIndex), lt); | 
| -  __ strb(scratch, MemOperand(dest, 1, PostIndex)); | 
| -  __ bind(&loop_entry); | 
| -  __ cmp(dest, Operand(limit)); | 
| -  __ b(lt, &loop); | 
| +  // Copy count bytes from src to dst. | 
| +  Label byte_loop; | 
| +  __ mtctr(count); | 
| +  __ bind(&byte_loop); | 
| +  __ lbz(scratch, MemOperand(src)); | 
| +  __ addi(src, src, Operand(1)); | 
| +  __ stb(scratch, MemOperand(dest)); | 
| +  __ addi(dest, dest, Operand(1)); | 
| +  __ bdnz(&byte_loop); | 
|  | 
| __ bind(&done); | 
| } | 
| @@ -3283,43 +3441,58 @@ void StringHelper::GenerateCopyCharacters(MacroAssembler* masm, | 
|  | 
| void StringHelper::GenerateHashInit(MacroAssembler* masm, | 
| Register hash, | 
| -                                    Register character) { | 
| +                                    Register character, | 
| +                                    Register scratch) { | 
| // hash = character + (character << 10); | 
| __ LoadRoot(hash, Heap::kHashSeedRootIndex); | 
| // Untag smi seed and add the character. | 
| -  __ add(hash, character, Operand(hash, LSR, kSmiTagSize)); | 
| +  __ SmiUntag(scratch, hash); | 
| +  __ add(hash, character, scratch); | 
| // hash += hash << 10; | 
| -  __ add(hash, hash, Operand(hash, LSL, 10)); | 
| +  __ slwi(scratch, hash, Operand(10)); | 
| +  __ add(hash, hash, scratch); | 
| // hash ^= hash >> 6; | 
| -  __ eor(hash, hash, Operand(hash, LSR, 6)); | 
| +  __ srwi(scratch, hash, Operand(6)); | 
| +  __ xor_(hash, hash, scratch); | 
| } | 
|  | 
|  | 
| void StringHelper::GenerateHashAddCharacter(MacroAssembler* masm, | 
| Register hash, | 
| -                                            Register character) { | 
| +                                            Register character, | 
| +                                            Register scratch) { | 
| // hash += character; | 
| -  __ add(hash, hash, Operand(character)); | 
| +  __ add(hash, hash, character); | 
| // hash += hash << 10; | 
| -  __ add(hash, hash, Operand(hash, LSL, 10)); | 
| +  __ slwi(scratch, hash, Operand(10)); | 
| +  __ add(hash, hash, scratch); | 
| // hash ^= hash >> 6; | 
| -  __ eor(hash, hash, Operand(hash, LSR, 6)); | 
| +  __ srwi(scratch, hash, Operand(6)); | 
| +  __ xor_(hash, hash, scratch); | 
| } | 
|  | 
|  | 
| void StringHelper::GenerateHashGetHash(MacroAssembler* masm, | 
| -                                       Register hash) { | 
| +                                       Register hash, | 
| +                                       Register scratch) { | 
| // hash += hash << 3; | 
| -  __ add(hash, hash, Operand(hash, LSL, 3)); | 
| +  __ slwi(scratch, hash, Operand(3)); | 
| +  __ add(hash, hash, scratch); | 
| // hash ^= hash >> 11; | 
| -  __ eor(hash, hash, Operand(hash, LSR, 11)); | 
| +  __ srwi(scratch, hash, Operand(11)); | 
| +  __ xor_(hash, hash, scratch); | 
| // hash += hash << 15; | 
| -  __ add(hash, hash, Operand(hash, LSL, 15)); | 
| +  __ slwi(scratch, hash, Operand(15)); | 
| +  __ add(hash, hash, scratch); | 
|  | 
| -  __ and_(hash, hash, Operand(String::kHashBitMask), SetCC); | 
| +  __ mov(scratch, Operand(String::kHashBitMask)); | 
| +  __ and_(hash, hash, scratch, SetRC); | 
|  | 
| // if (hash == 0) hash = 27; | 
| -  __ mov(hash, Operand(StringHasher::kZeroHash), LeaveCC, eq); | 
| +  Label done; | 
| +  __ bne(&done, cr0); | 
| +  __ li(hash, Operand(StringHasher::kZeroHash)); | 
| +  __ bind(&done); | 
| } | 
|  | 
|  | 
| @@ -3343,98 +3516,97 @@ void SubStringStub::Generate(MacroAssembler* masm) { | 
| const int kFromOffset = 1 * kPointerSize; | 
| const int kStringOffset = 2 * kPointerSize; | 
|  | 
| -  __ Ldrd(r2, r3, MemOperand(sp, kToOffset)); | 
| -  STATIC_ASSERT(kFromOffset == kToOffset + 4); | 
| -  STATIC_ASSERT(kSmiTag == 0); | 
| -  STATIC_ASSERT(kSmiTagSize + kSmiShiftSize == 1); | 
| - | 
| -  // Arithmetic shift right by one un-smi-tags. In this case we rotate right | 
| -  // instead because we bail out on non-smi values: ROR and ASR are equivalent | 
| -  // for smis but they set the flags in a way that's easier to optimize. | 
| -  __ mov(r2, Operand(r2, ROR, 1), SetCC); | 
| -  __ mov(r3, Operand(r3, ROR, 1), SetCC, cc); | 
| -  // If either to or from had the smi tag bit set, then C is set now, and N | 
| -  // has the same value: we rotated by 1, so the bottom bit is now the top bit. | 
| -  // We want to bailout to runtime here if From is negative.  In that case, the | 
| -  // next instruction is not executed and we fall through to bailing out to | 
| -  // runtime. | 
| -  // Executed if both r2 and r3 are untagged integers. | 
| -  __ sub(r2, r2, Operand(r3), SetCC, cc); | 
| -  // One of the above un-smis or the above SUB could have set N==1. | 
| -  __ b(mi, &runtime);  // Either "from" or "to" is not an smi, or from > to. | 
| +  __ LoadP(r5, MemOperand(sp, kToOffset)); | 
| +  __ LoadP(r6, MemOperand(sp, kFromOffset)); | 
| + | 
| +  // If either to or from had the smi tag bit set, then fail to generic runtime | 
| +  __ JumpIfNotSmi(r5, &runtime); | 
| +  __ JumpIfNotSmi(r6, &runtime); | 
| +  __ SmiUntag(r5); | 
| +  __ SmiUntag(r6, SetRC); | 
| +  // Both r5 and r6 are untagged integers. | 
| + | 
| +  // We want to bailout to runtime here if From is negative. | 
| +  __ blt(&runtime, cr0);  // From < 0. | 
| + | 
| +  __ cmpl(r6, r5); | 
| +  __ bgt(&runtime);  // Fail if from > to. | 
| +  __ sub(r5, r5, r6); | 
|  | 
| // Make sure first argument is a string. | 
| -  __ ldr(r0, MemOperand(sp, kStringOffset)); | 
| -  __ JumpIfSmi(r0, &runtime); | 
| -  Condition is_string = masm->IsObjectStringType(r0, r1); | 
| -  __ b(NegateCondition(is_string), &runtime); | 
| +  __ LoadP(r3, MemOperand(sp, kStringOffset)); | 
| +  __ JumpIfSmi(r3, &runtime); | 
| +  Condition is_string = masm->IsObjectStringType(r3, r4); | 
| +  __ b(NegateCondition(is_string), &runtime, cr0); | 
|  | 
| Label single_char; | 
| -  __ cmp(r2, Operand(1)); | 
| +  __ cmpi(r5, Operand(1)); | 
| __ b(eq, &single_char); | 
|  | 
| // Short-cut for the case of trivial substring. | 
| -  Label return_r0; | 
| -  // r0: original string | 
| -  // r2: result string length | 
| -  __ ldr(r4, FieldMemOperand(r0, String::kLengthOffset)); | 
| -  __ cmp(r2, Operand(r4, ASR, 1)); | 
| +  Label return_r3; | 
| +  // r3: original string | 
| +  // r5: result string length | 
| +  __ LoadP(r7, FieldMemOperand(r3, String::kLengthOffset)); | 
| +  __ SmiUntag(r0, r7); | 
| +  __ cmpl(r5, r0); | 
| // Return original string. | 
| -  __ b(eq, &return_r0); | 
| +  __ beq(&return_r3); | 
| // Longer than original string's length or negative: unsafe arguments. | 
| -  __ b(hi, &runtime); | 
| +  __ bgt(&runtime); | 
| // Shorter than original string's length: an actual substring. | 
|  | 
| // Deal with different string types: update the index if necessary | 
| -  // and put the underlying string into r5. | 
| -  // r0: original string | 
| -  // r1: instance type | 
| -  // r2: length | 
| -  // r3: from index (untagged) | 
| +  // and put the underlying string into r8. | 
| +  // r3: original string | 
| +  // r4: instance type | 
| +  // r5: length | 
| +  // r6: from index (untagged) | 
| Label underlying_unpacked, sliced_string, seq_or_external_string; | 
| // If the string is not indirect, it can only be sequential or external. | 
| STATIC_ASSERT(kIsIndirectStringMask == (kSlicedStringTag & kConsStringTag)); | 
| STATIC_ASSERT(kIsIndirectStringMask != 0); | 
| -  __ tst(r1, Operand(kIsIndirectStringMask)); | 
| -  __ b(eq, &seq_or_external_string); | 
| +  __ andi(r0, r4, Operand(kIsIndirectStringMask)); | 
| +  __ beq(&seq_or_external_string, cr0); | 
|  | 
| -  __ tst(r1, Operand(kSlicedNotConsMask)); | 
| -  __ b(ne, &sliced_string); | 
| +  __ andi(r0, r4, Operand(kSlicedNotConsMask)); | 
| +  __ bne(&sliced_string, cr0); | 
| // Cons string.  Check whether it is flat, then fetch first part. | 
| -  __ ldr(r5, FieldMemOperand(r0, ConsString::kSecondOffset)); | 
| -  __ CompareRoot(r5, Heap::kempty_stringRootIndex); | 
| -  __ b(ne, &runtime); | 
| -  __ ldr(r5, FieldMemOperand(r0, ConsString::kFirstOffset)); | 
| +  __ LoadP(r8, FieldMemOperand(r3, ConsString::kSecondOffset)); | 
| +  __ CompareRoot(r8, Heap::kempty_stringRootIndex); | 
| +  __ bne(&runtime); | 
| +  __ LoadP(r8, FieldMemOperand(r3, ConsString::kFirstOffset)); | 
| // Update instance type. | 
| -  __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset)); | 
| -  __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset)); | 
| -  __ jmp(&underlying_unpacked); | 
| +  __ LoadP(r4, FieldMemOperand(r8, HeapObject::kMapOffset)); | 
| +  __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | 
| +  __ b(&underlying_unpacked); | 
|  | 
| __ bind(&sliced_string); | 
| // Sliced string.  Fetch parent and correct start index by offset. | 
| -  __ ldr(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); | 
| -  __ ldr(r4, FieldMemOperand(r0, SlicedString::kOffsetOffset)); | 
| -  __ add(r3, r3, Operand(r4, ASR, 1));  // Add offset to index. | 
| +  __ LoadP(r8, FieldMemOperand(r3, SlicedString::kParentOffset)); | 
| +  __ LoadP(r7, FieldMemOperand(r3, SlicedString::kOffsetOffset)); | 
| +  __ SmiUntag(r4, r7); | 
| +  __ add(r6, r6, r4);  // Add offset to index. | 
| // Update instance type. | 
| -  __ ldr(r1, FieldMemOperand(r5, HeapObject::kMapOffset)); | 
| -  __ ldrb(r1, FieldMemOperand(r1, Map::kInstanceTypeOffset)); | 
| -  __ jmp(&underlying_unpacked); | 
| +  __ LoadP(r4, FieldMemOperand(r8, HeapObject::kMapOffset)); | 
| +  __ lbz(r4, FieldMemOperand(r4, Map::kInstanceTypeOffset)); | 
| +  __ b(&underlying_unpacked); | 
|  | 
| __ bind(&seq_or_external_string); | 
| // Sequential or external string.  Just move string to the expected register. | 
| -  __ mov(r5, r0); | 
| +  __ mr(r8, r3); | 
|  | 
| __ bind(&underlying_unpacked); | 
|  | 
| if (FLAG_string_slices) { | 
| Label copy_routine; | 
| -    // r5: underlying subject string | 
| -    // r1: instance type of underlying subject string | 
| -    // r2: length | 
| -    // r3: adjusted start index (untagged) | 
| -    __ cmp(r2, Operand(SlicedString::kMinLength)); | 
| +    // r8: underlying subject string | 
| +    // r4: instance type of underlying subject string | 
| +    // r5: length | 
| +    // r6: adjusted start index (untagged) | 
| +    __ cmpi(r5, Operand(SlicedString::kMinLength)); | 
| // Short slice.  Copy instead of slicing. | 
| -    __ b(lt, ©_routine); | 
| +    __ blt(©_routine); | 
| // Allocate new sliced string.  At this point we do not reload the instance | 
| // type including the string encoding because we simply rely on the info | 
| // provided by the original string.  It does not matter if the original | 
| @@ -3443,89 +3615,89 @@ void SubStringStub::Generate(MacroAssembler* masm) { | 
| Label two_byte_slice, set_slice_header; | 
| STATIC_ASSERT((kStringEncodingMask & kOneByteStringTag) != 0); | 
| STATIC_ASSERT((kStringEncodingMask & kTwoByteStringTag) == 0); | 
| -    __ tst(r1, Operand(kStringEncodingMask)); | 
| -    __ b(eq, &two_byte_slice); | 
| -    __ AllocateAsciiSlicedString(r0, r2, r6, r4, &runtime); | 
| -    __ jmp(&set_slice_header); | 
| +    __ andi(r0, r4, Operand(kStringEncodingMask)); | 
| +    __ beq(&two_byte_slice, cr0); | 
| +    __ AllocateAsciiSlicedString(r3, r5, r9, r10, &runtime); | 
| +    __ b(&set_slice_header); | 
| __ bind(&two_byte_slice); | 
| -    __ AllocateTwoByteSlicedString(r0, r2, r6, r4, &runtime); | 
| +    __ AllocateTwoByteSlicedString(r3, r5, r9, r10, &runtime); | 
| __ bind(&set_slice_header); | 
| -    __ mov(r3, Operand(r3, LSL, 1)); | 
| -    __ str(r5, FieldMemOperand(r0, SlicedString::kParentOffset)); | 
| -    __ str(r3, FieldMemOperand(r0, SlicedString::kOffsetOffset)); | 
| -    __ jmp(&return_r0); | 
| +    __ SmiTag(r6); | 
| +    __ StoreP(r8, FieldMemOperand(r3, SlicedString::kParentOffset), r0); | 
| +    __ StoreP(r6, FieldMemOperand(r3, SlicedString::kOffsetOffset), r0); | 
| +    __ b(&return_r3); | 
|  | 
| __ bind(©_routine); | 
| } | 
|  | 
| -  // r5: underlying subject string | 
| -  // r1: instance type of underlying subject string | 
| -  // r2: length | 
| -  // r3: adjusted start index (untagged) | 
| +  // r8: underlying subject string | 
| +  // r4: instance type of underlying subject string | 
| +  // r5: length | 
| +  // r6: adjusted start index (untagged) | 
| Label two_byte_sequential, sequential_string, allocate_result; | 
| STATIC_ASSERT(kExternalStringTag != 0); | 
| STATIC_ASSERT(kSeqStringTag == 0); | 
| -  __ tst(r1, Operand(kExternalStringTag)); | 
| -  __ b(eq, &sequential_string); | 
| +  __ andi(r0, r4, Operand(kExternalStringTag)); | 
| +  __ beq(&sequential_string, cr0); | 
|  | 
| // Handle external string. | 
| // Rule out short external strings. | 
| STATIC_ASSERT(kShortExternalStringTag != 0); | 
| -  __ tst(r1, Operand(kShortExternalStringTag)); | 
| -  __ b(ne, &runtime); | 
| -  __ ldr(r5, FieldMemOperand(r5, ExternalString::kResourceDataOffset)); | 
| -  // r5 already points to the first character of underlying string. | 
| -  __ jmp(&allocate_result); | 
| +  __ andi(r0, r4, Operand(kShortExternalStringTag)); | 
| +  __ bne(&runtime, cr0); | 
| +  __ LoadP(r8, FieldMemOperand(r8, ExternalString::kResourceDataOffset)); | 
| +  // r8 already points to the first character of underlying string. | 
| +  __ b(&allocate_result); | 
|  | 
| __ bind(&sequential_string); | 
| // Locate first character of underlying subject string. | 
| STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize); | 
| -  __ add(r5, r5, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | 
| +  __ addi(r8, r8, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | 
|  | 
| __ bind(&allocate_result); | 
| // Sequential acii string.  Allocate the result. | 
| STATIC_ASSERT((kOneByteStringTag & kStringEncodingMask) != 0); | 
| -  __ tst(r1, Operand(kStringEncodingMask)); | 
| -  __ b(eq, &two_byte_sequential); | 
| +  __ andi(r0, r4, Operand(kStringEncodingMask)); | 
| +  __ beq(&two_byte_sequential, cr0); | 
|  | 
| // Allocate and copy the resulting ASCII string. | 
| -  __ AllocateAsciiString(r0, r2, r4, r6, r1, &runtime); | 
| +  __ AllocateAsciiString(r3, r5, r7, r9, r10, &runtime); | 
|  | 
| // Locate first character of substring to copy. | 
| -  __ add(r5, r5, r3); | 
| +  __ add(r8, r8, r6); | 
| // Locate first character of result. | 
| -  __ add(r1, r0, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | 
| +  __ addi(r4, r3, Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | 
|  | 
| -  // r0: result string | 
| -  // r1: first character of result string | 
| -  // r2: result string length | 
| -  // r5: first character of substring to copy | 
| +  // r3: result string | 
| +  // r4: first character of result string | 
| +  // r5: result string length | 
| +  // r8: first character of substring to copy | 
| STATIC_ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 
| StringHelper::GenerateCopyCharacters( | 
| -      masm, r1, r5, r2, r3, String::ONE_BYTE_ENCODING); | 
| -  __ jmp(&return_r0); | 
| +      masm, r4, r8, r5, r6, String::ONE_BYTE_ENCODING); | 
| +  __ b(&return_r3); | 
|  | 
| // Allocate and copy the resulting two-byte string. | 
| __ bind(&two_byte_sequential); | 
| -  __ AllocateTwoByteString(r0, r2, r4, r6, r1, &runtime); | 
| +  __ AllocateTwoByteString(r3, r5, r7, r9, r10, &runtime); | 
|  | 
| // Locate first character of substring to copy. | 
| -  STATIC_ASSERT(kSmiTagSize == 1 && kSmiTag == 0); | 
| -  __ add(r5, r5, Operand(r3, LSL, 1)); | 
| +  __ ShiftLeftImm(r4, r6, Operand(1)); | 
| +  __ add(r8, r8, r4); | 
| // Locate first character of result. | 
| -  __ add(r1, r0, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | 
| +  __ addi(r4, r3, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag)); | 
|  | 
| -  // r0: result string. | 
| -  // r1: first character of result. | 
| -  // r2: result length. | 
| -  // r5: first character of substring to copy. | 
| +  // r3: result string. | 
| +  // r4: first character of result. | 
| +  // r5: result length. | 
| +  // r8: first character of substring to copy. | 
| STATIC_ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); | 
| StringHelper::GenerateCopyCharacters( | 
| -      masm, r1, r5, r2, r3, String::TWO_BYTE_ENCODING); | 
| +      masm, r4, r8, r5, r6, String::TWO_BYTE_ENCODING); | 
|  | 
| -  __ bind(&return_r0); | 
| +  __ bind(&return_r3); | 
| Counters* counters = isolate()->counters(); | 
| -  __ IncrementCounter(counters->sub_string_native(), 1, r3, r4); | 
| +  __ IncrementCounter(counters->sub_string_native(), 1, r6, r7); | 
| __ Drop(3); | 
| __ Ret(); | 
|  | 
| @@ -3534,13 +3706,13 @@ void SubStringStub::Generate(MacroAssembler* masm) { | 
| __ TailCallRuntime(Runtime::kSubString, 3, 1); | 
|  | 
| __ bind(&single_char); | 
| -  // r0: original string | 
| -  // r1: instance type | 
| -  // r2: length | 
| -  // r3: from index (untagged) | 
| -  __ SmiTag(r3, r3); | 
| +  // r3: original string | 
| +  // r4: instance type | 
| +  // r5: length | 
| +  // r6: from index (untagged) | 
| +  __ SmiTag(r6, r6); | 
| StringCharAtGenerator generator( | 
| -      r0, r3, r2, r0, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER); | 
| +      r3, r6, r5, r3, &runtime, &runtime, &runtime, STRING_INDEX_IS_NUMBER); | 
| generator.GenerateFast(masm); | 
| __ Drop(3); | 
| __ Ret(); | 
| @@ -3552,37 +3724,36 @@ void StringCompareStub::GenerateFlatAsciiStringEquals(MacroAssembler* masm, | 
| Register left, | 
| Register right, | 
| Register scratch1, | 
| -                                                      Register scratch2, | 
| -                                                      Register scratch3) { | 
| +                                                      Register scratch2) { | 
| Register length = scratch1; | 
|  | 
| // Compare lengths. | 
| Label strings_not_equal, check_zero_length; | 
| -  __ ldr(length, FieldMemOperand(left, String::kLengthOffset)); | 
| -  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); | 
| +  __ LoadP(length, FieldMemOperand(left, String::kLengthOffset)); | 
| +  __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset)); | 
| __ cmp(length, scratch2); | 
| -  __ b(eq, &check_zero_length); | 
| +  __ beq(&check_zero_length); | 
| __ bind(&strings_not_equal); | 
| -  __ mov(r0, Operand(Smi::FromInt(NOT_EQUAL))); | 
| +  __ LoadSmiLiteral(r3, Smi::FromInt(NOT_EQUAL)); | 
| __ Ret(); | 
|  | 
| // Check if the length is zero. | 
| Label compare_chars; | 
| __ bind(&check_zero_length); | 
| STATIC_ASSERT(kSmiTag == 0); | 
| -  __ cmp(length, Operand::Zero()); | 
| -  __ b(ne, &compare_chars); | 
| -  __ mov(r0, Operand(Smi::FromInt(EQUAL))); | 
| +  __ cmpi(length, Operand::Zero()); | 
| +  __ bne(&compare_chars); | 
| +  __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL)); | 
| __ Ret(); | 
|  | 
| // Compare characters. | 
| __ bind(&compare_chars); | 
| GenerateAsciiCharsCompareLoop(masm, | 
| -                                left, right, length, scratch2, scratch3, | 
| +                                left, right, length, scratch2, | 
| &strings_not_equal); | 
|  | 
| // Characters are equal. | 
| -  __ mov(r0, Operand(Smi::FromInt(EQUAL))); | 
| +  __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL)); | 
| __ Ret(); | 
| } | 
|  | 
| @@ -3592,35 +3763,43 @@ void StringCompareStub::GenerateCompareFlatAsciiStrings(MacroAssembler* masm, | 
| Register right, | 
| Register scratch1, | 
| Register scratch2, | 
| -                                                        Register scratch3, | 
| -                                                        Register scratch4) { | 
| -  Label result_not_equal, compare_lengths; | 
| +                                                        Register scratch3) { | 
| +  Label skip, result_not_equal, compare_lengths; | 
| // Find minimum length and length difference. | 
| -  __ ldr(scratch1, FieldMemOperand(left, String::kLengthOffset)); | 
| -  __ ldr(scratch2, FieldMemOperand(right, String::kLengthOffset)); | 
| -  __ sub(scratch3, scratch1, Operand(scratch2), SetCC); | 
| +  __ LoadP(scratch1, FieldMemOperand(left, String::kLengthOffset)); | 
| +  __ LoadP(scratch2, FieldMemOperand(right, String::kLengthOffset)); | 
| +  __ sub(scratch3, scratch1, scratch2, LeaveOE, SetRC); | 
| Register length_delta = scratch3; | 
| -  __ mov(scratch1, scratch2, LeaveCC, gt); | 
| +  __ ble(&skip, cr0); | 
| +  __ mr(scratch1, scratch2); | 
| +  __ bind(&skip); | 
| Register min_length = scratch1; | 
| STATIC_ASSERT(kSmiTag == 0); | 
| -  __ cmp(min_length, Operand::Zero()); | 
| -  __ b(eq, &compare_lengths); | 
| +  __ cmpi(min_length, Operand::Zero()); | 
| +  __ beq(&compare_lengths); | 
|  | 
| // Compare loop. | 
| GenerateAsciiCharsCompareLoop(masm, | 
| -                                left, right, min_length, scratch2, scratch4, | 
| +                                left, right, min_length, scratch2, | 
| &result_not_equal); | 
|  | 
| // Compare lengths - strings up to min-length are equal. | 
| __ bind(&compare_lengths); | 
| DCHECK(Smi::FromInt(EQUAL) == static_cast<Smi*>(0)); | 
| // Use length_delta as result if it's zero. | 
| -  __ mov(r0, Operand(length_delta), SetCC); | 
| +  __ mr(r3, length_delta); | 
| +  __ cmpi(r3, Operand::Zero()); | 
| __ bind(&result_not_equal); | 
| // Conditionally update the result based either on length_delta or | 
| // the last comparion performed in the loop above. | 
| -  __ mov(r0, Operand(Smi::FromInt(GREATER)), LeaveCC, gt); | 
| -  __ mov(r0, Operand(Smi::FromInt(LESS)), LeaveCC, lt); | 
| +  Label less_equal, equal; | 
| +  __ ble(&less_equal); | 
| +  __ LoadSmiLiteral(r3, Smi::FromInt(GREATER)); | 
| +  __ Ret(); | 
| +  __ bind(&less_equal); | 
| +  __ beq(&equal); | 
| +  __ LoadSmiLiteral(r3, Smi::FromInt(LESS)); | 
| +  __ bind(&equal); | 
| __ Ret(); | 
| } | 
|  | 
| @@ -3631,28 +3810,28 @@ void StringCompareStub::GenerateAsciiCharsCompareLoop( | 
| Register right, | 
| Register length, | 
| Register scratch1, | 
| -    Register scratch2, | 
| Label* chars_not_equal) { | 
| // Change index to run from -length to -1 by adding length to string | 
| // start. This means that loop ends when index reaches zero, which | 
| // doesn't need an additional compare. | 
| __ SmiUntag(length); | 
| -  __ add(scratch1, length, | 
| -         Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | 
| -  __ add(left, left, Operand(scratch1)); | 
| -  __ add(right, right, Operand(scratch1)); | 
| -  __ rsb(length, length, Operand::Zero()); | 
| +  __ addi(scratch1, length, | 
| +          Operand(SeqOneByteString::kHeaderSize - kHeapObjectTag)); | 
| +  __ add(left, left, scratch1); | 
| +  __ add(right, right, scratch1); | 
| +  __ subfic(length, length, Operand::Zero()); | 
| Register index = length;  // index = -length; | 
|  | 
| // Compare loop. | 
| Label loop; | 
| __ bind(&loop); | 
| -  __ ldrb(scratch1, MemOperand(left, index)); | 
| -  __ ldrb(scratch2, MemOperand(right, index)); | 
| -  __ cmp(scratch1, scratch2); | 
| -  __ b(ne, chars_not_equal); | 
| -  __ add(index, index, Operand(1), SetCC); | 
| -  __ b(ne, &loop); | 
| +  __ lbzx(scratch1, MemOperand(left, index)); | 
| +  __ lbzx(r0, MemOperand(right, index)); | 
| +  __ cmp(scratch1, r0); | 
| +  __ bne(chars_not_equal); | 
| +  __ addi(index, index, Operand(1)); | 
| +  __ cmpi(index, Operand::Zero()); | 
| +  __ bne(&loop); | 
| } | 
|  | 
|  | 
| @@ -3664,27 +3843,28 @@ void StringCompareStub::Generate(MacroAssembler* masm) { | 
| // Stack frame on entry. | 
| //  sp[0]: right string | 
| //  sp[4]: left string | 
| -  __ Ldrd(r0 , r1, MemOperand(sp));  // Load right in r0, left in r1. | 
| +  __ LoadP(r3, MemOperand(sp));  // Load right in r3, left in r4. | 
| +  __ LoadP(r4, MemOperand(sp, kPointerSize)); | 
|  | 
| Label not_same; | 
| -  __ cmp(r0, r1); | 
| -  __ b(ne, ¬_same); | 
| +  __ cmp(r3, r4); | 
| +  __ bne(¬_same); | 
| STATIC_ASSERT(EQUAL == 0); | 
| STATIC_ASSERT(kSmiTag == 0); | 
| -  __ mov(r0, Operand(Smi::FromInt(EQUAL))); | 
| -  __ IncrementCounter(counters->string_compare_native(), 1, r1, r2); | 
| -  __ add(sp, sp, Operand(2 * kPointerSize)); | 
| +  __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL)); | 
| +  __ IncrementCounter(counters->string_compare_native(), 1, r4, r5); | 
| +  __ addi(sp, sp, Operand(2 * kPointerSize)); | 
| __ Ret(); | 
|  | 
| __ bind(¬_same); | 
|  | 
| // Check that both objects are sequential ASCII strings. | 
| -  __ JumpIfNotBothSequentialAsciiStrings(r1, r0, r2, r3, &runtime); | 
| +  __ JumpIfNotBothSequentialAsciiStrings(r4, r3, r5, r6, &runtime); | 
|  | 
| // Compare flat ASCII strings natively. Remove arguments from stack first. | 
| -  __ IncrementCounter(counters->string_compare_native(), 1, r2, r3); | 
| -  __ add(sp, sp, Operand(2 * kPointerSize)); | 
| -  GenerateCompareFlatAsciiStrings(masm, r1, r0, r2, r3, r4, r5); | 
| +  __ IncrementCounter(counters->string_compare_native(), 1, r5, r6); | 
| +  __ addi(sp, sp, Operand(2 * kPointerSize)); | 
| +  GenerateCompareFlatAsciiStrings(masm, r4, r3, r5, r6, r7); | 
|  | 
| // Call the runtime; it returns -1 (less), 0 (equal), or 1 (greater) | 
| // tagged as a small integer. | 
| @@ -3695,25 +3875,25 @@ void StringCompareStub::Generate(MacroAssembler* masm) { | 
|  | 
| void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { | 
| // ----------- S t a t e ------------- | 
| -  //  -- r1    : left | 
| -  //  -- r0    : right | 
| +  //  -- r4    : left | 
| +  //  -- r3    : right | 
| //  -- lr    : return address | 
| // ----------------------------------- | 
|  | 
| -  // Load r2 with the allocation site.  We stick an undefined dummy value here | 
| +  // Load r5 with the allocation site.  We stick an undefined dummy value here | 
| // and replace it with the real allocation site later when we instantiate this | 
| // stub in BinaryOpICWithAllocationSiteStub::GetCodeCopyFromTemplate(). | 
| -  __ Move(r2, handle(isolate()->heap()->undefined_value())); | 
| +  __ Move(r5, handle(isolate()->heap()->undefined_value())); | 
|  | 
| // Make sure that we actually patched the allocation site. | 
| if (FLAG_debug_code) { | 
| -    __ tst(r2, Operand(kSmiTagMask)); | 
| -    __ Assert(ne, kExpectedAllocationSite); | 
| -    __ push(r2); | 
| -    __ ldr(r2, FieldMemOperand(r2, HeapObject::kMapOffset)); | 
| +    __ TestIfSmi(r5, r0); | 
| +    __ Assert(ne, kExpectedAllocationSite, cr0); | 
| +    __ push(r5); | 
| +    __ LoadP(r5, FieldMemOperand(r5, HeapObject::kMapOffset)); | 
| __ LoadRoot(ip, Heap::kAllocationSiteMapRootIndex); | 
| -    __ cmp(r2, ip); | 
| -    __ pop(r2); | 
| +    __ cmp(r5, ip); | 
| +    __ pop(r5); | 
| __ Assert(eq, kExpectedAllocationSite); | 
| } | 
|  | 
| @@ -3727,16 +3907,18 @@ void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { | 
| void ICCompareStub::GenerateSmis(MacroAssembler* masm) { | 
| DCHECK(state_ == CompareIC::SMI); | 
| Label miss; | 
| -  __ orr(r2, r1, r0); | 
| -  __ JumpIfNotSmi(r2, &miss); | 
| +  __ orx(r5, r4, r3); | 
| +  __ JumpIfNotSmi(r5, &miss); | 
|  | 
| if (GetCondition() == eq) { | 
| // For equality we do not care about the sign of the result. | 
| -    __ sub(r0, r0, r1, SetCC); | 
| +    // __ sub(r3, r3, r4, SetCC); | 
| +     __ sub(r3, r3, r4); | 
| } else { | 
| // Untag before subtracting to avoid handling overflow. | 
| -    __ SmiUntag(r1); | 
| -    __ sub(r0, r1, Operand::SmiUntag(r0)); | 
| +    __ SmiUntag(r4); | 
| +    __ SmiUntag(r3); | 
| +    __ sub(r3, r4, r3); | 
| } | 
| __ Ret(); | 
|  | 
| @@ -3751,48 +3933,55 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { | 
| Label generic_stub; | 
| Label unordered, maybe_undefined1, maybe_undefined2; | 
| Label miss; | 
| +  Label equal, less_than; | 
|  | 
| if (left_ == CompareIC::SMI) { | 
| -    __ JumpIfNotSmi(r1, &miss); | 
| +    __ JumpIfNotSmi(r4, &miss); | 
| } | 
| if (right_ == CompareIC::SMI) { | 
| -    __ JumpIfNotSmi(r0, &miss); | 
| +    __ JumpIfNotSmi(r3, &miss); | 
| } | 
|  | 
| // Inlining the double comparison and falling back to the general compare | 
| // stub if NaN is involved. | 
| // Load left and right operand. | 
| Label done, left, left_smi, right_smi; | 
| -  __ JumpIfSmi(r0, &right_smi); | 
| -  __ CheckMap(r0, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, | 
| +  __ JumpIfSmi(r3, &right_smi); | 
| +  __ CheckMap(r3, r5, Heap::kHeapNumberMapRootIndex, &maybe_undefined1, | 
| DONT_DO_SMI_CHECK); | 
| -  __ sub(r2, r0, Operand(kHeapObjectTag)); | 
| -  __ vldr(d1, r2, HeapNumber::kValueOffset); | 
| +  __ lfd(d1, FieldMemOperand(r3, HeapNumber::kValueOffset)); | 
| __ b(&left); | 
| __ bind(&right_smi); | 
| -  __ SmiToDouble(d1, r0); | 
| +  __ SmiToDouble(d1, r3); | 
|  | 
| __ bind(&left); | 
| -  __ JumpIfSmi(r1, &left_smi); | 
| -  __ CheckMap(r1, r2, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, | 
| +  __ JumpIfSmi(r4, &left_smi); | 
| +  __ CheckMap(r4, r5, Heap::kHeapNumberMapRootIndex, &maybe_undefined2, | 
| DONT_DO_SMI_CHECK); | 
| -  __ sub(r2, r1, Operand(kHeapObjectTag)); | 
| -  __ vldr(d0, r2, HeapNumber::kValueOffset); | 
| +  __ lfd(d0, FieldMemOperand(r4, HeapNumber::kValueOffset)); | 
| __ b(&done); | 
| __ bind(&left_smi); | 
| -  __ SmiToDouble(d0, r1); | 
| +  __ SmiToDouble(d0, r4); | 
|  | 
| __ bind(&done); | 
| -  // Compare operands. | 
| -  __ VFPCompareAndSetFlags(d0, d1); | 
| + | 
| +  // Compare operands | 
| +  __ fcmpu(d0, d1); | 
|  | 
| // Don't base result on status bits when a NaN is involved. | 
| -  __ b(vs, &unordered); | 
| +  __ bunordered(&unordered); | 
|  | 
| // Return a result of -1, 0, or 1, based on status bits. | 
| -  __ mov(r0, Operand(EQUAL), LeaveCC, eq); | 
| -  __ mov(r0, Operand(LESS), LeaveCC, lt); | 
| -  __ mov(r0, Operand(GREATER), LeaveCC, gt); | 
| +  __ beq(&equal); | 
| +  __ blt(&less_than); | 
| +  //  assume greater than | 
| +  __ li(r3, Operand(GREATER)); | 
| +  __ Ret(); | 
| +  __ bind(&equal); | 
| +  __ li(r3, Operand(EQUAL)); | 
| +  __ Ret(); | 
| +  __ bind(&less_than); | 
| +  __ li(r3, Operand(LESS)); | 
| __ Ret(); | 
|  | 
| __ bind(&unordered); | 
| @@ -3803,18 +3992,18 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { | 
|  | 
| __ bind(&maybe_undefined1); | 
| if (Token::IsOrderedRelationalCompareOp(op_)) { | 
| -    __ CompareRoot(r0, Heap::kUndefinedValueRootIndex); | 
| -    __ b(ne, &miss); | 
| -    __ JumpIfSmi(r1, &unordered); | 
| -    __ CompareObjectType(r1, r2, r2, HEAP_NUMBER_TYPE); | 
| -    __ b(ne, &maybe_undefined2); | 
| -    __ jmp(&unordered); | 
| +    __ CompareRoot(r3, Heap::kUndefinedValueRootIndex); | 
| +    __ bne(&miss); | 
| +    __ JumpIfSmi(r4, &unordered); | 
| +    __ CompareObjectType(r4, r5, r5, HEAP_NUMBER_TYPE); | 
| +    __ bne(&maybe_undefined2); | 
| +    __ b(&unordered); | 
| } | 
|  | 
| __ bind(&maybe_undefined2); | 
| if (Token::IsOrderedRelationalCompareOp(op_)) { | 
| -    __ CompareRoot(r1, Heap::kUndefinedValueRootIndex); | 
| -    __ b(eq, &unordered); | 
| +    __ CompareRoot(r4, Heap::kUndefinedValueRootIndex); | 
| +    __ beq(&unordered); | 
| } | 
|  | 
| __ bind(&miss); | 
| @@ -3824,35 +4013,37 @@ void ICCompareStub::GenerateNumbers(MacroAssembler* masm) { | 
|  | 
| void ICCompareStub::GenerateInternalizedStrings(MacroAssembler* masm) { | 
| DCHECK(state_ == CompareIC::INTERNALIZED_STRING); | 
| -  Label miss; | 
| +  Label miss, not_equal; | 
|  | 
| // Registers containing left and right operands respectively. | 
| -  Register left = r1; | 
| -  Register right = r0; | 
| -  Register tmp1 = r2; | 
| -  Register tmp2 = r3; | 
| +  Register left = r4; | 
| +  Register right = r3; | 
| +  Register tmp1 = r5; | 
| +  Register tmp2 = r6; | 
|  | 
| // Check that both operands are heap objects. | 
| __ JumpIfEitherSmi(left, right, &miss); | 
|  | 
| -  // Check that both operands are internalized strings. | 
| -  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 
| -  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 
| -  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 
| -  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 
| +  // Check that both operands are symbols. | 
| +  __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 
| +  __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 
| +  __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 
| +  __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 
| STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0); | 
| -  __ orr(tmp1, tmp1, Operand(tmp2)); | 
| -  __ tst(tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask)); | 
| -  __ b(ne, &miss); | 
| +  __ orx(tmp1, tmp1, tmp2); | 
| +  __ andi(r0, tmp1, Operand(kIsNotStringMask | kIsNotInternalizedMask)); | 
| +  __ bne(&miss, cr0); | 
|  | 
| // Internalized strings are compared by identity. | 
| __ cmp(left, right); | 
| -  // Make sure r0 is non-zero. At this point input operands are | 
| +  __ bne(¬_equal); | 
| +  // Make sure r3 is non-zero. At this point input operands are | 
| // guaranteed to be non-zero. | 
| -  DCHECK(right.is(r0)); | 
| +  DCHECK(right.is(r3)); | 
| STATIC_ASSERT(EQUAL == 0); | 
| STATIC_ASSERT(kSmiTag == 0); | 
| -  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); | 
| +  __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL)); | 
| +  __ bind(¬_equal); | 
| __ Ret(); | 
|  | 
| __ bind(&miss); | 
| @@ -3866,32 +4057,33 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { | 
| Label miss; | 
|  | 
| // Registers containing left and right operands respectively. | 
| -  Register left = r1; | 
| -  Register right = r0; | 
| -  Register tmp1 = r2; | 
| -  Register tmp2 = r3; | 
| +  Register left = r4; | 
| +  Register right = r3; | 
| +  Register tmp1 = r5; | 
| +  Register tmp2 = r6; | 
|  | 
| // Check that both operands are heap objects. | 
| __ JumpIfEitherSmi(left, right, &miss); | 
|  | 
| // Check that both operands are unique names. This leaves the instance | 
| // types loaded in tmp1 and tmp2. | 
| -  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 
| -  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 
| -  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 
| -  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 
| +  __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 
| +  __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 
| +  __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 
| +  __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 
|  | 
| __ JumpIfNotUniqueName(tmp1, &miss); | 
| __ JumpIfNotUniqueName(tmp2, &miss); | 
|  | 
| // Unique names are compared by identity. | 
| __ cmp(left, right); | 
| -  // Make sure r0 is non-zero. At this point input operands are | 
| +  __ bne(&miss); | 
| +  // Make sure r3 is non-zero. At this point input operands are | 
| // guaranteed to be non-zero. | 
| -  DCHECK(right.is(r0)); | 
| +  DCHECK(right.is(r3)); | 
| STATIC_ASSERT(EQUAL == 0); | 
| STATIC_ASSERT(kSmiTag == 0); | 
| -  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); | 
| +  __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL)); | 
| __ Ret(); | 
|  | 
| __ bind(&miss); | 
| @@ -3901,38 +4093,40 @@ void ICCompareStub::GenerateUniqueNames(MacroAssembler* masm) { | 
|  | 
| void ICCompareStub::GenerateStrings(MacroAssembler* masm) { | 
| DCHECK(state_ == CompareIC::STRING); | 
| -  Label miss; | 
| +  Label miss, not_identical, is_symbol; | 
|  | 
| bool equality = Token::IsEqualityOp(op_); | 
|  | 
| // Registers containing left and right operands respectively. | 
| -  Register left = r1; | 
| -  Register right = r0; | 
| -  Register tmp1 = r2; | 
| -  Register tmp2 = r3; | 
| -  Register tmp3 = r4; | 
| -  Register tmp4 = r5; | 
| +  Register left = r4; | 
| +  Register right = r3; | 
| +  Register tmp1 = r5; | 
| +  Register tmp2 = r6; | 
| +  Register tmp3 = r7; | 
| +  Register tmp4 = r8; | 
|  | 
| // Check that both operands are heap objects. | 
| __ JumpIfEitherSmi(left, right, &miss); | 
|  | 
| // Check that both operands are strings. This leaves the instance | 
| // types loaded in tmp1 and tmp2. | 
| -  __ ldr(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 
| -  __ ldr(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 
| -  __ ldrb(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 
| -  __ ldrb(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 
| +  __ LoadP(tmp1, FieldMemOperand(left, HeapObject::kMapOffset)); | 
| +  __ LoadP(tmp2, FieldMemOperand(right, HeapObject::kMapOffset)); | 
| +  __ lbz(tmp1, FieldMemOperand(tmp1, Map::kInstanceTypeOffset)); | 
| +  __ lbz(tmp2, FieldMemOperand(tmp2, Map::kInstanceTypeOffset)); | 
| STATIC_ASSERT(kNotStringTag != 0); | 
| -  __ orr(tmp3, tmp1, tmp2); | 
| -  __ tst(tmp3, Operand(kIsNotStringMask)); | 
| -  __ b(ne, &miss); | 
| +  __ orx(tmp3, tmp1, tmp2); | 
| +  __ andi(r0, tmp3, Operand(kIsNotStringMask)); | 
| +  __ bne(&miss, cr0); | 
|  | 
| // Fast check for identical strings. | 
| __ cmp(left, right); | 
| STATIC_ASSERT(EQUAL == 0); | 
| STATIC_ASSERT(kSmiTag == 0); | 
| -  __ mov(r0, Operand(Smi::FromInt(EQUAL)), LeaveCC, eq); | 
| -  __ Ret(eq); | 
| +  __ bne(¬_identical); | 
| +  __ LoadSmiLiteral(r3, Smi::FromInt(EQUAL)); | 
| +  __ Ret(); | 
| +  __ bind(¬_identical); | 
|  | 
| // Handle not identical strings. | 
|  | 
| @@ -3942,12 +4136,14 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { | 
| if (equality) { | 
| DCHECK(GetCondition() == eq); | 
| STATIC_ASSERT(kInternalizedTag == 0); | 
| -    __ orr(tmp3, tmp1, Operand(tmp2)); | 
| -    __ tst(tmp3, Operand(kIsNotInternalizedMask)); | 
| -    // Make sure r0 is non-zero. At this point input operands are | 
| +    __ orx(tmp3, tmp1, tmp2); | 
| +    __ andi(r0, tmp3, Operand(kIsNotInternalizedMask)); | 
| +    __ bne(&is_symbol, cr0); | 
| +    // Make sure r3 is non-zero. At this point input operands are | 
| // guaranteed to be non-zero. | 
| -    DCHECK(right.is(r0)); | 
| -    __ Ret(eq); | 
| +    DCHECK(right.is(r3)); | 
| +    __ Ret(); | 
| +    __ bind(&is_symbol); | 
| } | 
|  | 
| // Check that both strings are sequential ASCII. | 
| @@ -3958,10 +4154,10 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { | 
| // Compare flat ASCII strings. Returns when done. | 
| if (equality) { | 
| StringCompareStub::GenerateFlatAsciiStringEquals( | 
| -        masm, left, right, tmp1, tmp2, tmp3); | 
| +        masm, left, right, tmp1, tmp2); | 
| } else { | 
| StringCompareStub::GenerateCompareFlatAsciiStrings( | 
| -        masm, left, right, tmp1, tmp2, tmp3, tmp4); | 
| +        masm, left, right, tmp1, tmp2, tmp3); | 
| } | 
|  | 
| // Handle more complex cases in runtime. | 
| @@ -3981,16 +4177,16 @@ void ICCompareStub::GenerateStrings(MacroAssembler* masm) { | 
| void ICCompareStub::GenerateObjects(MacroAssembler* masm) { | 
| DCHECK(state_ == CompareIC::OBJECT); | 
| Label miss; | 
| -  __ and_(r2, r1, Operand(r0)); | 
| -  __ JumpIfSmi(r2, &miss); | 
| +  __ and_(r5, r4, r3); | 
| +  __ JumpIfSmi(r5, &miss); | 
|  | 
| -  __ CompareObjectType(r0, r2, r2, JS_OBJECT_TYPE); | 
| -  __ b(ne, &miss); | 
| -  __ CompareObjectType(r1, r2, r2, JS_OBJECT_TYPE); | 
| -  __ b(ne, &miss); | 
| +  __ CompareObjectType(r3, r5, r5, JS_OBJECT_TYPE); | 
| +  __ bne(&miss); | 
| +  __ CompareObjectType(r4, r5, r5, JS_OBJECT_TYPE); | 
| +  __ bne(&miss); | 
|  | 
| DCHECK(GetCondition() == eq); | 
| -  __ sub(r0, r0, Operand(r1)); | 
| +  __ sub(r3, r3, r4); | 
| __ Ret(); | 
|  | 
| __ bind(&miss); | 
| @@ -4000,16 +4196,16 @@ void ICCompareStub::GenerateObjects(MacroAssembler* masm) { | 
|  | 
| void ICCompareStub::GenerateKnownObjects(MacroAssembler* masm) { | 
| Label miss; | 
| -  __ and_(r2, r1, Operand(r0)); | 
| -  __ JumpIfSmi(r2, &miss); | 
| -  __ ldr(r2, FieldMemOperand(r0, HeapObject::kMapOffset)); | 
| -  __ ldr(r3, FieldMemOperand(r1, HeapObject::kMapOffset)); | 
| -  __ cmp(r2, Operand(known_map_)); | 
| -  __ b(ne, &miss); | 
| -  __ cmp(r3, Operand(known_map_)); | 
| -  __ b(ne, &miss); | 
| - | 
| -  __ sub(r0, r0, Operand(r1)); | 
| +  __ and_(r5, r4, r3); | 
| +  __ JumpIfSmi(r5, &miss); | 
| +  __ LoadP(r5, FieldMemOperand(r3, HeapObject::kMapOffset)); | 
| +  __ LoadP(r6, FieldMemOperand(r4, HeapObject::kMapOffset)); | 
| +  __ Cmpi(r5, Operand(known_map_), r0); | 
| +  __ bne(&miss); | 
| +  __ Cmpi(r6, Operand(known_map_), r0); | 
| +  __ bne(&miss); | 
| + | 
| +  __ sub(r3, r3, r4); | 
| __ Ret(); | 
|  | 
| __ bind(&miss); | 
| @@ -4025,49 +4221,63 @@ void ICCompareStub::GenerateMiss(MacroAssembler* masm) { | 
| ExternalReference(IC_Utility(IC::kCompareIC_Miss), isolate()); | 
|  | 
| FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 
| -    __ Push(r1, r0); | 
| -    __ Push(lr, r1, r0); | 
| -    __ mov(ip, Operand(Smi::FromInt(op_))); | 
| +    __ Push(r4, r3); | 
| +    __ mflr(r0); | 
| +    __ Push(r0, r4, r3); | 
| +    __ LoadSmiLiteral(ip, Smi::FromInt(op_)); | 
| __ push(ip); | 
| __ CallExternalReference(miss, 3); | 
| // Compute the entry point of the rewritten stub. | 
| -    __ add(r2, r0, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
| +    __ addi(r5, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); | 
| // Restore registers. | 
| -    __ pop(lr); | 
| -    __ Pop(r1, r0); | 
| +    __ pop(r0); | 
| +    __ mtlr(r0); | 
| +    __ Pop(r4, r3); | 
| } | 
|  | 
| -  __ Jump(r2); | 
| +  __ Jump(r5); | 
| } | 
|  | 
|  | 
| +// This stub is paired with DirectCEntryStub::GenerateCall | 
| void DirectCEntryStub::Generate(MacroAssembler* masm) { | 
| // Place the return address on the stack, making the call | 
| // GC safe. The RegExp backend also relies on this. | 
| -  __ str(lr, MemOperand(sp, 0)); | 
| -  __ blx(ip);  // Call the C++ function. | 
| -  __ VFPEnsureFPSCRState(r2); | 
| -  __ ldr(pc, MemOperand(sp, 0)); | 
| +  __ mflr(r0); | 
| +  __ StoreP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize)); | 
| +  __ Call(ip);  // Call the C++ function. | 
| +  __ LoadP(r0, MemOperand(sp, kStackFrameExtraParamSlot * kPointerSize)); | 
| +  __ mtlr(r0); | 
| +  __ blr(); | 
| } | 
|  | 
|  | 
| void DirectCEntryStub::GenerateCall(MacroAssembler* masm, | 
| Register target) { | 
| +#if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR) | 
| +  // Native AIX/PPC64 Linux use a function descriptor. | 
| +  __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(target, kPointerSize)); | 
| +  __ LoadP(ip, MemOperand(target, 0));  // Instruction address | 
| +#else | 
| +  // ip needs to be set for DirectCEentryStub::Generate, and also | 
| +  // for ABI_TOC_ADDRESSABILITY_VIA_IP. | 
| +  __ Move(ip, target); | 
| +#endif | 
| + | 
| intptr_t code = | 
| reinterpret_cast<intptr_t>(GetCode().location()); | 
| -  __ Move(ip, target); | 
| -  __ mov(lr, Operand(code, RelocInfo::CODE_TARGET)); | 
| -  __ blx(lr);  // Call the stub. | 
| +  __ mov(r0, Operand(code, RelocInfo::CODE_TARGET)); | 
| +  __ Call(r0);  // Call the stub. | 
| } | 
|  | 
|  | 
| void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, | 
| -                                                      Label* miss, | 
| -                                                      Label* done, | 
| -                                                      Register receiver, | 
| -                                                      Register properties, | 
| +                                                        Label* miss, | 
| +                                                        Label* done, | 
| +                                                        Register receiver, | 
| +                                                        Register properties, | 
| Handle<Name> name, | 
| -                                                      Register scratch0) { | 
| +                                                        Register scratch0) { | 
| DCHECK(name->IsUniqueName()); | 
| // If names of slots in range from 1 to kProbes - 1 for the hash value are | 
| // not equal to the name and kProbes-th slot is not used (its name is the | 
| @@ -4079,64 +4289,71 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, | 
| // Compute the masked index: (hash + i + i * i) & mask. | 
| Register index = scratch0; | 
| // Capacity is smi 2^n. | 
| -    __ ldr(index, FieldMemOperand(properties, kCapacityOffset)); | 
| -    __ sub(index, index, Operand(1)); | 
| -    __ and_(index, index, Operand( | 
| -        Smi::FromInt(name->Hash() + NameDictionary::GetProbeOffset(i)))); | 
| +    __ LoadP(index, FieldMemOperand(properties, kCapacityOffset)); | 
| +    __ subi(index, index, Operand(1)); | 
| +    __ LoadSmiLiteral(ip, Smi::FromInt(name->Hash() + | 
| +                                       NameDictionary::GetProbeOffset(i))); | 
| +    __ and_(index, index, ip); | 
|  | 
| // Scale the index by multiplying by the entry size. | 
| DCHECK(NameDictionary::kEntrySize == 3); | 
| -    __ add(index, index, Operand(index, LSL, 1));  // index *= 3. | 
| +    __ ShiftLeftImm(ip, index, Operand(1)); | 
| +    __ add(index, index, ip);  // index *= 3. | 
|  | 
| Register entity_name = scratch0; | 
| // Having undefined at this place means the name is not contained. | 
| -    DCHECK_EQ(kSmiTagSize, 1); | 
| Register tmp = properties; | 
| -    __ add(tmp, properties, Operand(index, LSL, 1)); | 
| -    __ ldr(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); | 
| +    __ SmiToPtrArrayOffset(ip, index); | 
| +    __ add(tmp, properties, ip); | 
| +    __ LoadP(entity_name, FieldMemOperand(tmp, kElementsStartOffset)); | 
|  | 
| DCHECK(!tmp.is(entity_name)); | 
| __ LoadRoot(tmp, Heap::kUndefinedValueRootIndex); | 
| __ cmp(entity_name, tmp); | 
| -    __ b(eq, done); | 
| +    __ beq(done); | 
|  | 
| // Load the hole ready for use below: | 
| __ LoadRoot(tmp, Heap::kTheHoleValueRootIndex); | 
|  | 
| // Stop if found the property. | 
| -    __ cmp(entity_name, Operand(Handle<Name>(name))); | 
| -    __ b(eq, miss); | 
| +    __ Cmpi(entity_name, Operand(Handle<Name>(name)), r0); | 
| +    __ beq(miss); | 
|  | 
| Label good; | 
| __ cmp(entity_name, tmp); | 
| -    __ b(eq, &good); | 
| +    __ beq(&good); | 
|  | 
| // Check if the entry name is not a unique name. | 
| -    __ ldr(entity_name, FieldMemOperand(entity_name, HeapObject::kMapOffset)); | 
| -    __ ldrb(entity_name, | 
| -            FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); | 
| +    __ LoadP(entity_name, FieldMemOperand(entity_name, | 
| +                                          HeapObject::kMapOffset)); | 
| +    __ lbz(entity_name, | 
| +           FieldMemOperand(entity_name, Map::kInstanceTypeOffset)); | 
| __ JumpIfNotUniqueName(entity_name, miss); | 
| __ bind(&good); | 
|  | 
| // Restore the properties. | 
| -    __ ldr(properties, | 
| -           FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 
| +    __ LoadP(properties, | 
| +             FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 
| } | 
|  | 
| const int spill_mask = | 
| -      (lr.bit() | r6.bit() | r5.bit() | r4.bit() | r3.bit() | | 
| -       r2.bit() | r1.bit() | r0.bit()); | 
| +      (r0.bit() | r9.bit() | r8.bit() | r7.bit() | r6.bit() | | 
| +       r5.bit() | r4.bit() | r3.bit()); | 
|  | 
| -  __ stm(db_w, sp, spill_mask); | 
| -  __ ldr(r0, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 
| -  __ mov(r1, Operand(Handle<Name>(name))); | 
| +  __ mflr(r0); | 
| +  __ MultiPush(spill_mask); | 
| + | 
| +  __ LoadP(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 
| +  __ mov(r4, Operand(Handle<Name>(name))); | 
| NameDictionaryLookupStub stub(masm->isolate(), NEGATIVE_LOOKUP); | 
| __ CallStub(&stub); | 
| -  __ cmp(r0, Operand::Zero()); | 
| -  __ ldm(ia_w, sp, spill_mask); | 
| +  __ cmpi(r3, Operand::Zero()); | 
| + | 
| +  __ MultiPop(spill_mask);  // MultiPop does not touch condition flags | 
| +  __ mtlr(r0); | 
|  | 
| -  __ b(eq, done); | 
| -  __ b(ne, miss); | 
| +  __ beq(done); | 
| +  __ bne(miss); | 
| } | 
|  | 
|  | 
| @@ -4145,12 +4362,12 @@ void NameDictionaryLookupStub::GenerateNegativeLookup(MacroAssembler* masm, | 
| // the |miss| label otherwise. | 
| // If lookup was successful |scratch2| will be equal to elements + 4 * index. | 
| void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, | 
| -                                                      Label* miss, | 
| -                                                      Label* done, | 
| -                                                      Register elements, | 
| -                                                      Register name, | 
| -                                                      Register scratch1, | 
| -                                                      Register scratch2) { | 
| +                                                        Label* miss, | 
| +                                                        Label* done, | 
| +                                                        Register elements, | 
| +                                                        Register name, | 
| +                                                        Register scratch1, | 
| +                                                        Register scratch2) { | 
| DCHECK(!elements.is(scratch1)); | 
| DCHECK(!elements.is(scratch2)); | 
| DCHECK(!name.is(scratch1)); | 
| @@ -4159,61 +4376,66 @@ void NameDictionaryLookupStub::GeneratePositiveLookup(MacroAssembler* masm, | 
| __ AssertName(name); | 
|  | 
| // Compute the capacity mask. | 
| -  __ ldr(scratch1, FieldMemOperand(elements, kCapacityOffset)); | 
| -  __ SmiUntag(scratch1); | 
| -  __ sub(scratch1, scratch1, Operand(1)); | 
| +  __ LoadP(scratch1, FieldMemOperand(elements, kCapacityOffset)); | 
| +  __ SmiUntag(scratch1);  // convert smi to int | 
| +  __ subi(scratch1, scratch1, Operand(1)); | 
|  | 
| // Generate an unrolled loop that performs a few probes before | 
| // giving up. Measurements done on Gmail indicate that 2 probes | 
| // cover ~93% of loads from dictionaries. | 
| for (int i = 0; i < kInlinedProbes; i++) { | 
| // Compute the masked index: (hash + i + i * i) & mask. | 
| -    __ ldr(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); | 
| +    __ lwz(scratch2, FieldMemOperand(name, Name::kHashFieldOffset)); | 
| if (i > 0) { | 
| // Add the probe offset (i + i * i) left shifted to avoid right shifting | 
| // the hash in a separate instruction. The value hash + i + i * i is right | 
| // shifted in the following and instruction. | 
| DCHECK(NameDictionary::GetProbeOffset(i) < | 
| 1 << (32 - Name::kHashFieldOffset)); | 
| -      __ add(scratch2, scratch2, Operand( | 
| -          NameDictionary::GetProbeOffset(i) << Name::kHashShift)); | 
| +      __ addi(scratch2, scratch2, Operand( | 
| +                  NameDictionary::GetProbeOffset(i) << Name::kHashShift)); | 
| } | 
| -    __ and_(scratch2, scratch1, Operand(scratch2, LSR, Name::kHashShift)); | 
| +    __ srwi(scratch2, scratch2, Operand(Name::kHashShift)); | 
| +    __ and_(scratch2, scratch1, scratch2); | 
|  | 
| // Scale the index by multiplying by the element size. | 
| DCHECK(NameDictionary::kEntrySize == 3); | 
| // scratch2 = scratch2 * 3. | 
| -    __ add(scratch2, scratch2, Operand(scratch2, LSL, 1)); | 
| +    __ ShiftLeftImm(ip, scratch2, Operand(1)); | 
| +    __ add(scratch2, scratch2, ip); | 
|  | 
| // Check if the key is identical to the name. | 
| -    __ add(scratch2, elements, Operand(scratch2, LSL, 2)); | 
| -    __ ldr(ip, FieldMemOperand(scratch2, kElementsStartOffset)); | 
| -    __ cmp(name, Operand(ip)); | 
| -    __ b(eq, done); | 
| +    __ ShiftLeftImm(ip, scratch2, Operand(kPointerSizeLog2)); | 
| +    __ add(scratch2, elements, ip); | 
| +    __ LoadP(ip, FieldMemOperand(scratch2, kElementsStartOffset)); | 
| +    __ cmp(name, ip); | 
| +    __ beq(done); | 
| } | 
|  | 
| const int spill_mask = | 
| -      (lr.bit() | r6.bit() | r5.bit() | r4.bit() | | 
| -       r3.bit() | r2.bit() | r1.bit() | r0.bit()) & | 
| +      (r0.bit() | r9.bit() | r8.bit() | r7.bit() | | 
| +       r6.bit() | r5.bit() | r4.bit() | r3.bit()) & | 
| ~(scratch1.bit() | scratch2.bit()); | 
|  | 
| -  __ stm(db_w, sp, spill_mask); | 
| -  if (name.is(r0)) { | 
| -    DCHECK(!elements.is(r1)); | 
| -    __ Move(r1, name); | 
| -    __ Move(r0, elements); | 
| +  __ mflr(r0); | 
| +  __ MultiPush(spill_mask); | 
| +  if (name.is(r3)) { | 
| +    DCHECK(!elements.is(r4)); | 
| +    __ mr(r4, name); | 
| +    __ mr(r3, elements); | 
| } else { | 
| -    __ Move(r0, elements); | 
| -    __ Move(r1, name); | 
| +    __ mr(r3, elements); | 
| +    __ mr(r4, name); | 
| } | 
| NameDictionaryLookupStub stub(masm->isolate(), POSITIVE_LOOKUP); | 
| __ CallStub(&stub); | 
| -  __ cmp(r0, Operand::Zero()); | 
| -  __ mov(scratch2, Operand(r2)); | 
| -  __ ldm(ia_w, sp, spill_mask); | 
| +  __ cmpi(r3, Operand::Zero()); | 
| +  __ mr(scratch2, r5); | 
| +  __ MultiPop(spill_mask); | 
| +  __ mtlr(r0); | 
|  | 
| -  __ b(ne, done); | 
| -  __ b(eq, miss); | 
| +  __ bne(done); | 
| +  __ beq(miss); | 
| } | 
|  | 
|  | 
| @@ -4222,29 +4444,30 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { | 
| // we cannot call anything that could cause a GC from this stub. | 
| // Registers: | 
| //  result: NameDictionary to probe | 
| -  //  r1: key | 
| +  //  r4: key | 
| //  dictionary: NameDictionary to probe. | 
| //  index: will hold an index of entry if lookup is successful. | 
| //         might alias with result_. | 
| // Returns: | 
| //  result_ is zero if lookup failed, non zero otherwise. | 
|  | 
| -  Register result = r0; | 
| -  Register dictionary = r0; | 
| -  Register key = r1; | 
| -  Register index = r2; | 
| -  Register mask = r3; | 
| -  Register hash = r4; | 
| -  Register undefined = r5; | 
| -  Register entry_key = r6; | 
| +  Register result = r3; | 
| +  Register dictionary = r3; | 
| +  Register key = r4; | 
| +  Register index = r5; | 
| +  Register mask = r6; | 
| +  Register hash = r7; | 
| +  Register undefined = r8; | 
| +  Register entry_key = r9; | 
| +  Register scratch = r9; | 
|  | 
| Label in_dictionary, maybe_in_dictionary, not_in_dictionary; | 
|  | 
| -  __ ldr(mask, FieldMemOperand(dictionary, kCapacityOffset)); | 
| +  __ LoadP(mask, FieldMemOperand(dictionary, kCapacityOffset)); | 
| __ SmiUntag(mask); | 
| -  __ sub(mask, mask, Operand(1)); | 
| +  __ subi(mask, mask, Operand(1)); | 
|  | 
| -  __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset)); | 
| +  __ lwz(hash, FieldMemOperand(key, Name::kHashFieldOffset)); | 
|  | 
| __ LoadRoot(undefined, Heap::kUndefinedValueRootIndex); | 
|  | 
| @@ -4257,33 +4480,36 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { | 
| // shifted in the following and instruction. | 
| DCHECK(NameDictionary::GetProbeOffset(i) < | 
| 1 << (32 - Name::kHashFieldOffset)); | 
| -      __ add(index, hash, Operand( | 
| -          NameDictionary::GetProbeOffset(i) << Name::kHashShift)); | 
| +      __ addi(index, hash, Operand( | 
| +                  NameDictionary::GetProbeOffset(i) << Name::kHashShift)); | 
| } else { | 
| -      __ mov(index, Operand(hash)); | 
| +      __ mr(index, hash); | 
| } | 
| -    __ and_(index, mask, Operand(index, LSR, Name::kHashShift)); | 
| +    __ srwi(r0, index, Operand(Name::kHashShift)); | 
| +    __ and_(index, mask, r0); | 
|  | 
| // Scale the index by multiplying by the entry size. | 
| DCHECK(NameDictionary::kEntrySize == 3); | 
| -    __ add(index, index, Operand(index, LSL, 1));  // index *= 3. | 
| +    __ ShiftLeftImm(scratch, index, Operand(1)); | 
| +    __ add(index, index, scratch);  // index *= 3. | 
|  | 
| DCHECK_EQ(kSmiTagSize, 1); | 
| -    __ add(index, dictionary, Operand(index, LSL, 2)); | 
| -    __ ldr(entry_key, FieldMemOperand(index, kElementsStartOffset)); | 
| +    __ ShiftLeftImm(scratch, index, Operand(kPointerSizeLog2)); | 
| +    __ add(index, dictionary, scratch); | 
| +    __ LoadP(entry_key, FieldMemOperand(index, kElementsStartOffset)); | 
|  | 
| // Having undefined at this place means the name is not contained. | 
| -    __ cmp(entry_key, Operand(undefined)); | 
| -    __ b(eq, ¬_in_dictionary); | 
| +    __ cmp(entry_key, undefined); | 
| +    __ beq(¬_in_dictionary); | 
|  | 
| // Stop if found the property. | 
| -    __ cmp(entry_key, Operand(key)); | 
| -    __ b(eq, &in_dictionary); | 
| +    __ cmp(entry_key, key); | 
| +    __ beq(&in_dictionary); | 
|  | 
| if (i != kTotalProbes - 1 && mode_ == NEGATIVE_LOOKUP) { | 
| // Check if the entry name is not a unique name. | 
| -      __ ldr(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); | 
| -      __ ldrb(entry_key, | 
| +      __ LoadP(entry_key, FieldMemOperand(entry_key, HeapObject::kMapOffset)); | 
| +      __ lbz(entry_key, | 
| FieldMemOperand(entry_key, Map::kInstanceTypeOffset)); | 
| __ JumpIfNotUniqueName(entry_key, &maybe_in_dictionary); | 
| } | 
| @@ -4294,16 +4520,16 @@ void NameDictionaryLookupStub::Generate(MacroAssembler* masm) { | 
| // treated as a lookup success. For positive lookup probing failure | 
| // should be treated as lookup failure. | 
| if (mode_ == POSITIVE_LOOKUP) { | 
| -    __ mov(result, Operand::Zero()); | 
| +    __ li(result, Operand::Zero()); | 
| __ Ret(); | 
| } | 
|  | 
| __ bind(&in_dictionary); | 
| -  __ mov(result, Operand(1)); | 
| +  __ li(result, Operand(1)); | 
| __ Ret(); | 
|  | 
| __ bind(¬_in_dictionary); | 
| -  __ mov(result, Operand::Zero()); | 
| +  __ li(result, Operand::Zero()); | 
| __ Ret(); | 
| } | 
|  | 
| @@ -4326,18 +4552,16 @@ void RecordWriteStub::Generate(MacroAssembler* masm) { | 
| Label skip_to_incremental_noncompacting; | 
| Label skip_to_incremental_compacting; | 
|  | 
| -  // The first two instructions are generated with labels so as to get the | 
| -  // offset fixed up correctly by the bind(Label*) call.  We patch it back and | 
| -  // forth between a compare instructions (a nop in this position) and the | 
| -  // real branch when we start and stop incremental heap marking. | 
| +  // The first two branch instructions are generated with labels so as to | 
| +  // get the offset fixed up correctly by the bind(Label*) call.  We patch | 
| +  // it back and forth between branch condition True and False | 
| +  // when we start and stop incremental heap marking. | 
| // See RecordWriteStub::Patch for details. | 
| -  { | 
| -    // Block literal pool emission, as the position of these two instructions | 
| -    // is assumed by the patching code. | 
| -    Assembler::BlockConstPoolScope block_const_pool(masm); | 
| -    __ b(&skip_to_incremental_noncompacting); | 
| -    __ b(&skip_to_incremental_compacting); | 
| -  } | 
| + | 
| +  // Clear the bit, branch on True for NOP action initially | 
| +  __ crclr(Assembler::encode_crbit(cr2, CR_LT)); | 
| +  __ blt(&skip_to_incremental_noncompacting, cr2); | 
| +  __ blt(&skip_to_incremental_compacting, cr2); | 
|  | 
| if (remembered_set_action_ == EMIT_REMEMBERED_SET) { | 
| __ RememberedSetHelper(object_, | 
| @@ -4356,10 +4580,7 @@ void RecordWriteStub::Generate(MacroAssembler* masm) { | 
|  | 
| // Initial mode of the stub is expected to be STORE_BUFFER_ONLY. | 
| // Will be checked in IncrementalMarking::ActivateGeneratedStub. | 
| -  DCHECK(Assembler::GetBranchOffset(masm->instr_at(0)) < (1 << 12)); | 
| -  DCHECK(Assembler::GetBranchOffset(masm->instr_at(4)) < (1 << 12)); | 
| -  PatchBranchIntoNop(masm, 0); | 
| -  PatchBranchIntoNop(masm, Assembler::kInstrSize); | 
| +  // patching not required on PPC as the initial path is effectively NOP | 
| } | 
|  | 
|  | 
| @@ -4369,7 +4590,7 @@ void RecordWriteStub::GenerateIncremental(MacroAssembler* masm, Mode mode) { | 
| if (remembered_set_action_ == EMIT_REMEMBERED_SET) { | 
| Label dont_need_remembered_set; | 
|  | 
| -    __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0)); | 
| +    __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0)); | 
| __ JumpIfNotInNewSpace(regs_.scratch0(),  // Value. | 
| regs_.scratch0(), | 
| &dont_need_remembered_set); | 
| @@ -4408,13 +4629,13 @@ void RecordWriteStub::InformIncrementalMarker(MacroAssembler* masm) { | 
| int argument_count = 3; | 
| __ PrepareCallCFunction(argument_count, regs_.scratch0()); | 
| Register address = | 
| -      r0.is(regs_.address()) ? regs_.scratch0() : regs_.address(); | 
| +      r3.is(regs_.address()) ? regs_.scratch0() : regs_.address(); | 
| DCHECK(!address.is(regs_.object())); | 
| -  DCHECK(!address.is(r0)); | 
| -  __ Move(address, regs_.address()); | 
| -  __ Move(r0, regs_.object()); | 
| -  __ Move(r1, address); | 
| -  __ mov(r2, Operand(ExternalReference::isolate_address(isolate()))); | 
| +  DCHECK(!address.is(r3)); | 
| +  __ mr(address, regs_.address()); | 
| +  __ mr(r3, regs_.object()); | 
| +  __ mr(r4, address); | 
| +  __ mov(r5, Operand(ExternalReference::isolate_address(isolate()))); | 
|  | 
| AllowExternalCallThatCantCauseGC scope(masm); | 
| __ CallCFunction( | 
| @@ -4432,15 +4653,18 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( | 
| Label need_incremental; | 
| Label need_incremental_pop_scratch; | 
|  | 
| -  __ and_(regs_.scratch0(), regs_.object(), Operand(~Page::kPageAlignmentMask)); | 
| -  __ ldr(regs_.scratch1(), | 
| -         MemOperand(regs_.scratch0(), | 
| -                    MemoryChunk::kWriteBarrierCounterOffset)); | 
| -  __ sub(regs_.scratch1(), regs_.scratch1(), Operand(1), SetCC); | 
| -  __ str(regs_.scratch1(), | 
| +  DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0); | 
| +  __ lis(r0, Operand((~Page::kPageAlignmentMask >> 16))); | 
| +  __ and_(regs_.scratch0(), regs_.object(), r0); | 
| +  __ LoadP(regs_.scratch1(), | 
| MemOperand(regs_.scratch0(), | 
| MemoryChunk::kWriteBarrierCounterOffset)); | 
| -  __ b(mi, &need_incremental); | 
| +  __ subi(regs_.scratch1(), regs_.scratch1(), Operand(1)); | 
| +  __ StoreP(regs_.scratch1(), | 
| +            MemOperand(regs_.scratch0(), | 
| +                       MemoryChunk::kWriteBarrierCounterOffset)); | 
| +  __ cmpi(regs_.scratch1(), Operand::Zero());  // PPC, we could do better here | 
| +  __ blt(&need_incremental); | 
|  | 
| // Let's look at the color of the object:  If it is not black we don't have | 
| // to inform the incremental marker. | 
| @@ -4460,7 +4684,7 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( | 
| __ bind(&on_black); | 
|  | 
| // Get the value from the slot. | 
| -  __ ldr(regs_.scratch0(), MemOperand(regs_.address(), 0)); | 
| +  __ LoadP(regs_.scratch0(), MemOperand(regs_.address(), 0)); | 
|  | 
| if (mode == INCREMENTAL_COMPACTION) { | 
| Label ensure_not_white; | 
| @@ -4512,11 +4736,11 @@ void RecordWriteStub::CheckNeedsToInformIncrementalMarker( | 
|  | 
| void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { | 
| // ----------- S t a t e ------------- | 
| -  //  -- r0    : element value to store | 
| -  //  -- r3    : element index as smi | 
| +  //  -- r3    : element value to store | 
| +  //  -- r6    : element index as smi | 
| //  -- sp[0] : array literal index in function as smi | 
| //  -- sp[4] : array literal | 
| -  // clobbers r1, r2, r4 | 
| +  // clobbers r3, r5, r7 | 
| // ----------------------------------- | 
|  | 
| Label element_done; | 
| @@ -4526,48 +4750,55 @@ void StoreArrayLiteralElementStub::Generate(MacroAssembler* masm) { | 
| Label fast_elements; | 
|  | 
| // Get array literal index, array literal and its map. | 
| -  __ ldr(r4, MemOperand(sp, 0 * kPointerSize)); | 
| -  __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); | 
| -  __ ldr(r2, FieldMemOperand(r1, JSObject::kMapOffset)); | 
| +  __ LoadP(r7, MemOperand(sp, 0 * kPointerSize)); | 
| +  __ LoadP(r4, MemOperand(sp, 1 * kPointerSize)); | 
| +  __ LoadP(r5, FieldMemOperand(r4, JSObject::kMapOffset)); | 
|  | 
| -  __ CheckFastElements(r2, r5, &double_elements); | 
| +  __ CheckFastElements(r5, r8, &double_elements); | 
| // FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS | 
| -  __ JumpIfSmi(r0, &smi_element); | 
| -  __ CheckFastSmiElements(r2, r5, &fast_elements); | 
| +  __ JumpIfSmi(r3, &smi_element); | 
| +  __ CheckFastSmiElements(r5, r8, &fast_elements); | 
|  | 
| // Store into the array literal requires a elements transition. Call into | 
| // the runtime. | 
| __ bind(&slow_elements); | 
| // call. | 
| -  __ Push(r1, r3, r0); | 
| -  __ ldr(r5, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | 
| -  __ ldr(r5, FieldMemOperand(r5, JSFunction::kLiteralsOffset)); | 
| -  __ Push(r5, r4); | 
| +  __ Push(r4, r6, r3); | 
| +  __ LoadP(r8, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | 
| +  __ LoadP(r8, FieldMemOperand(r8, JSFunction::kLiteralsOffset)); | 
| +  __ Push(r8, r7); | 
| __ TailCallRuntime(Runtime::kStoreArrayLiteralElement, 5, 1); | 
|  | 
| // Array literal has ElementsKind of FAST_*_ELEMENTS and value is an object. | 
| __ bind(&fast_elements); | 
| -  __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); | 
| -  __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3)); | 
| -  __ add(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 
| -  __ str(r0, MemOperand(r6, 0)); | 
| +  __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset)); | 
| +  __ SmiToPtrArrayOffset(r9, r6); | 
| +  __ add(r9, r8, r9); | 
| +#if V8_TARGET_ARCH_PPC64 | 
| +  // add due to offset alignment requirements of StorePU | 
| +  __ addi(r9, r9, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 
| +  __ StoreP(r3, MemOperand(r9)); | 
| +#else | 
| +  __ StorePU(r3, MemOperand(r9, FixedArray::kHeaderSize - kHeapObjectTag)); | 
| +#endif | 
| // Update the write barrier for the array store. | 
| -  __ RecordWrite(r5, r6, r0, kLRHasNotBeenSaved, kDontSaveFPRegs, | 
| +  __ RecordWrite(r8, r9, r3, kLRHasNotBeenSaved, kDontSaveFPRegs, | 
| EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); | 
| __ Ret(); | 
|  | 
| // Array literal has ElementsKind of FAST_*_SMI_ELEMENTS or FAST_*_ELEMENTS, | 
| // and value is Smi. | 
| __ bind(&smi_element); | 
| -  __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); | 
| -  __ add(r6, r5, Operand::PointerOffsetFromSmiKey(r3)); | 
| -  __ str(r0, FieldMemOperand(r6, FixedArray::kHeaderSize)); | 
| +  __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset)); | 
| +  __ SmiToPtrArrayOffset(r9, r6); | 
| +  __ add(r9, r8, r9); | 
| +  __ StoreP(r3, FieldMemOperand(r9, FixedArray::kHeaderSize), r0); | 
| __ Ret(); | 
|  | 
| // Array literal has ElementsKind of FAST_DOUBLE_ELEMENTS. | 
| __ bind(&double_elements); | 
| -  __ ldr(r5, FieldMemOperand(r1, JSObject::kElementsOffset)); | 
| -  __ StoreNumberToDoubleElements(r0, r3, r5, r6, d0, &slow_elements); | 
| +  __ LoadP(r8, FieldMemOperand(r4, JSObject::kElementsOffset)); | 
| +  __ StoreNumberToDoubleElements(r3, r6, r8, r9, d0, &slow_elements); | 
| __ Ret(); | 
| } | 
|  | 
| @@ -4577,25 +4808,31 @@ void StubFailureTrampolineStub::Generate(MacroAssembler* masm) { | 
| __ Call(ces.GetCode(), RelocInfo::CODE_TARGET); | 
| int parameter_count_offset = | 
| StubFailureTrampolineFrame::kCallerStackParameterCountFrameOffset; | 
| -  __ ldr(r1, MemOperand(fp, parameter_count_offset)); | 
| +  __ LoadP(r4, MemOperand(fp, parameter_count_offset)); | 
| if (function_mode_ == JS_FUNCTION_STUB_MODE) { | 
| -    __ add(r1, r1, Operand(1)); | 
| +    __ addi(r4, r4, Operand(1)); | 
| } | 
| masm->LeaveFrame(StackFrame::STUB_FAILURE_TRAMPOLINE); | 
| -  __ mov(r1, Operand(r1, LSL, kPointerSizeLog2)); | 
| -  __ add(sp, sp, r1); | 
| +  __ slwi(r4, r4, Operand(kPointerSizeLog2)); | 
| +  __ add(sp, sp, r4); | 
| __ Ret(); | 
| } | 
|  | 
|  | 
| void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { | 
| if (masm->isolate()->function_entry_hook() != NULL) { | 
| +    PredictableCodeSizeScope predictable(masm, | 
| +#if V8_TARGET_ARCH_PPC64 | 
| +                                         12 * Assembler::kInstrSize); | 
| +#else | 
| +                                         9 * Assembler::kInstrSize); | 
| +#endif | 
| ProfileEntryHookStub stub(masm->isolate()); | 
| -    int code_size = masm->CallStubSize(&stub) + 2 * Assembler::kInstrSize; | 
| -    PredictableCodeSizeScope predictable(masm, code_size); | 
| -    __ push(lr); | 
| +    __ mflr(r0); | 
| +    __ push(r0); | 
| __ CallStub(&stub); | 
| -    __ pop(lr); | 
| +    __ pop(r0); | 
| +    __ mtlr(r0); | 
| } | 
| } | 
|  | 
| @@ -4603,48 +4840,57 @@ void ProfileEntryHookStub::MaybeCallEntryHook(MacroAssembler* masm) { | 
| void ProfileEntryHookStub::Generate(MacroAssembler* masm) { | 
| // The entry hook is a "push lr" instruction, followed by a call. | 
| const int32_t kReturnAddressDistanceFromFunctionStart = | 
| -      3 * Assembler::kInstrSize; | 
| +      Assembler::kCallTargetAddressOffset + 2 * Assembler::kInstrSize; | 
|  | 
| -  // This should contain all kCallerSaved registers. | 
| +  // This should contain all kJSCallerSaved registers. | 
| const RegList kSavedRegs = | 
| -      1 <<  0 |  // r0 | 
| -      1 <<  1 |  // r1 | 
| -      1 <<  2 |  // r2 | 
| -      1 <<  3 |  // r3 | 
| -      1 <<  5 |  // r5 | 
| -      1 <<  9;   // r9 | 
| -  // We also save lr, so the count here is one higher than the mask indicates. | 
| -  const int32_t kNumSavedRegs = 7; | 
| +     kJSCallerSaved |  // Caller saved registers. | 
| +     r15.bit();        // Saved stack pointer. | 
|  | 
| -  DCHECK((kCallerSaved & kSavedRegs) == kCallerSaved); | 
| +  // We also save lr, so the count here is one higher than the mask indicates. | 
| +  const int32_t kNumSavedRegs = kNumJSCallerSaved + 2; | 
|  | 
| // Save all caller-save registers as this may be called from anywhere. | 
| -  __ stm(db_w, sp, kSavedRegs | lr.bit()); | 
| +  __ mflr(r0); | 
| +  __ MultiPush(kSavedRegs | r0.bit()); | 
|  | 
| // Compute the function's address for the first argument. | 
| -  __ sub(r0, lr, Operand(kReturnAddressDistanceFromFunctionStart)); | 
| +  __ mr(r3, r0); | 
| +  __ subi(r3, r3, Operand(kReturnAddressDistanceFromFunctionStart)); | 
|  | 
| // The caller's return address is above the saved temporaries. | 
| // Grab that for the second argument to the hook. | 
| -  __ add(r1, sp, Operand(kNumSavedRegs * kPointerSize)); | 
| +  __ addi(r4, sp, Operand(kNumSavedRegs * kPointerSize)); | 
|  | 
| // Align the stack if necessary. | 
| int frame_alignment = masm->ActivationFrameAlignment(); | 
| if (frame_alignment > kPointerSize) { | 
| -    __ mov(r5, sp); | 
| +    __ mr(r15, sp); | 
| DCHECK(IsPowerOf2(frame_alignment)); | 
| -    __ and_(sp, sp, Operand(-frame_alignment)); | 
| +    __ ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment))); | 
| } | 
|  | 
| -#if V8_HOST_ARCH_ARM | 
| -  int32_t entry_hook = | 
| -      reinterpret_cast<int32_t>(isolate()->function_entry_hook()); | 
| +#if !defined(USE_SIMULATOR) | 
| +  uintptr_t entry_hook = | 
| +      reinterpret_cast<uintptr_t>(isolate()->function_entry_hook()); | 
| __ mov(ip, Operand(entry_hook)); | 
| + | 
| +#if ABI_USES_FUNCTION_DESCRIPTORS | 
| +  // Function descriptor | 
| +  __ LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(ip, kPointerSize)); | 
| +  __ LoadP(ip, MemOperand(ip, 0)); | 
| +#elif ABI_TOC_ADDRESSABILITY_VIA_IP | 
| +  // ip set above, so nothing to do. | 
| +#endif | 
| + | 
| +  // PPC LINUX ABI: | 
| +  __ li(r0, Operand::Zero()); | 
| +  __ StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize)); | 
| #else | 
| // Under the simulator we need to indirect the entry hook through a | 
| // trampoline function at a known address. | 
| // It additionally takes an isolate as a third parameter | 
| -  __ mov(r2, Operand(ExternalReference::isolate_address(isolate()))); | 
| +  __ mov(r5, Operand(ExternalReference::isolate_address(isolate()))); | 
|  | 
| ApiFunction dispatcher(FUNCTION_ADDR(EntryHookTrampoline)); | 
| __ mov(ip, Operand(ExternalReference(&dispatcher, | 
| @@ -4653,13 +4899,19 @@ void ProfileEntryHookStub::Generate(MacroAssembler* masm) { | 
| #endif | 
| __ Call(ip); | 
|  | 
| +#if !defined(USE_SIMULATOR) | 
| +  __ addi(sp, sp, Operand(kNumRequiredStackFrameSlots * kPointerSize)); | 
| +#endif | 
| + | 
| // Restore the stack pointer if needed. | 
| if (frame_alignment > kPointerSize) { | 
| -    __ mov(sp, r5); | 
| +    __ mr(sp, r15); | 
| } | 
|  | 
| -  // Also pop pc to get Ret(0). | 
| -  __ ldm(ia_w, sp, kSavedRegs | pc.bit()); | 
| +  // Also pop lr to get Ret(0). | 
| +  __ MultiPop(kSavedRegs | r0.bit()); | 
| +  __ mtlr(r0); | 
| +  __ Ret(); | 
| } | 
|  | 
|  | 
| @@ -4674,7 +4926,7 @@ static void CreateArrayDispatch(MacroAssembler* masm, | 
| TERMINAL_FAST_ELEMENTS_KIND); | 
| for (int i = 0; i <= last_index; ++i) { | 
| ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); | 
| -      __ cmp(r3, Operand(kind)); | 
| +      __ Cmpi(r6, Operand(kind), r0); | 
| T stub(masm->isolate(), kind); | 
| __ TailCallStub(&stub, eq); | 
| } | 
| @@ -4689,10 +4941,10 @@ static void CreateArrayDispatch(MacroAssembler* masm, | 
|  | 
| static void CreateArrayDispatchOneArgument(MacroAssembler* masm, | 
| AllocationSiteOverrideMode mode) { | 
| -  // r2 - allocation site (if mode != DISABLE_ALLOCATION_SITES) | 
| -  // r3 - kind (if mode != DISABLE_ALLOCATION_SITES) | 
| -  // r0 - number of arguments | 
| -  // r1 - constructor? | 
| +  // r5 - allocation site (if mode != DISABLE_ALLOCATION_SITES) | 
| +  // r6 - kind (if mode != DISABLE_ALLOCATION_SITES) | 
| +  // r3 - number of arguments | 
| +  // r4 - constructor? | 
| // sp[0] - last argument | 
| Label normal_sequence; | 
| if (mode == DONT_OVERRIDE) { | 
| @@ -4704,14 +4956,14 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm, | 
| DCHECK(FAST_HOLEY_DOUBLE_ELEMENTS == 5); | 
|  | 
| // is the low bit set? If so, we are holey and that is good. | 
| -    __ tst(r3, Operand(1)); | 
| -    __ b(ne, &normal_sequence); | 
| +    __ andi(r0, r6, Operand(1)); | 
| +    __ bne(&normal_sequence, cr0); | 
| } | 
|  | 
| // look at the first argument | 
| -  __ ldr(r5, MemOperand(sp, 0)); | 
| -  __ cmp(r5, Operand::Zero()); | 
| -  __ b(eq, &normal_sequence); | 
| +  __ LoadP(r8, MemOperand(sp, 0)); | 
| +  __ cmpi(r8, Operand::Zero()); | 
| +  __ beq(&normal_sequence); | 
|  | 
| if (mode == DISABLE_ALLOCATION_SITES) { | 
| ElementsKind initial = GetInitialFastElementsKind(); | 
| @@ -4730,28 +4982,30 @@ static void CreateArrayDispatchOneArgument(MacroAssembler* masm, | 
| } else if (mode == DONT_OVERRIDE) { | 
| // We are going to create a holey array, but our kind is non-holey. | 
| // Fix kind and retry (only if we have an allocation site in the slot). | 
| -    __ add(r3, r3, Operand(1)); | 
| +    __ addi(r6, r6, Operand(1)); | 
|  | 
| if (FLAG_debug_code) { | 
| -      __ ldr(r5, FieldMemOperand(r2, 0)); | 
| -      __ CompareRoot(r5, Heap::kAllocationSiteMapRootIndex); | 
| +      __ LoadP(r8, FieldMemOperand(r5, 0)); | 
| +      __ CompareRoot(r8, Heap::kAllocationSiteMapRootIndex); | 
| __ Assert(eq, kExpectedAllocationSite); | 
| } | 
|  | 
| -    // Save the resulting elements kind in type info. We can't just store r3 | 
| +    // Save the resulting elements kind in type info. We can't just store r6 | 
| // in the AllocationSite::transition_info field because elements kind is | 
| // restricted to a portion of the field...upper bits need to be left alone. | 
| STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); | 
| -    __ ldr(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset)); | 
| -    __ add(r4, r4, Operand(Smi::FromInt(kFastElementsKindPackedToHoley))); | 
| -    __ str(r4, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset)); | 
| +    __ LoadP(r7, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset)); | 
| +    __ AddSmiLiteral(r7, r7, Smi::FromInt(kFastElementsKindPackedToHoley), r0); | 
| +    __ StoreP(r7, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset), | 
| +              r0); | 
|  | 
| __ bind(&normal_sequence); | 
| int last_index = GetSequenceIndexFromFastElementsKind( | 
| TERMINAL_FAST_ELEMENTS_KIND); | 
| for (int i = 0; i <= last_index; ++i) { | 
| ElementsKind kind = GetFastElementsKindFromSequenceIndex(i); | 
| -      __ cmp(r3, Operand(kind)); | 
| +      __ mov(r0, Operand(kind)); | 
| +      __ cmp(r6, r0); | 
| ArraySingleArgumentConstructorStub stub(masm->isolate(), kind); | 
| __ TailCallStub(&stub, eq); | 
| } | 
| @@ -4810,13 +5064,13 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub( | 
| AllocationSiteOverrideMode mode) { | 
| if (argument_count_ == ANY) { | 
| Label not_zero_case, not_one_case; | 
| -    __ tst(r0, r0); | 
| -    __ b(ne, ¬_zero_case); | 
| +    __ cmpi(r3, Operand::Zero()); | 
| +    __ bne(¬_zero_case); | 
| CreateArrayDispatch<ArrayNoArgumentConstructorStub>(masm, mode); | 
|  | 
| __ bind(¬_zero_case); | 
| -    __ cmp(r0, Operand(1)); | 
| -    __ b(gt, ¬_one_case); | 
| +    __ cmpi(r3, Operand(1)); | 
| +    __ bgt(¬_one_case); | 
| CreateArrayDispatchOneArgument(masm, mode); | 
|  | 
| __ bind(¬_one_case); | 
| @@ -4835,9 +5089,9 @@ void ArrayConstructorStub::GenerateDispatchToArrayStub( | 
|  | 
| void ArrayConstructorStub::Generate(MacroAssembler* masm) { | 
| // ----------- S t a t e ------------- | 
| -  //  -- r0 : argc (only if argument_count_ == ANY) | 
| -  //  -- r1 : constructor | 
| -  //  -- r2 : AllocationSite or undefined | 
| +  //  -- r3 : argc (only if argument_count_ == ANY) | 
| +  //  -- r4 : constructor | 
| +  //  -- r5 : AllocationSite or undefined | 
| //  -- sp[0] : return address | 
| //  -- sp[4] : last argument | 
| // ----------------------------------- | 
| @@ -4847,26 +5101,26 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { | 
| // builtin Array functions which always have maps. | 
|  | 
| // Initial map for the builtin Array function should be a map. | 
| -    __ ldr(r4, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); | 
| +    __ LoadP(r7, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset)); | 
| // Will both indicate a NULL and a Smi. | 
| -    __ tst(r4, Operand(kSmiTagMask)); | 
| -    __ Assert(ne, kUnexpectedInitialMapForArrayFunction); | 
| -    __ CompareObjectType(r4, r4, r5, MAP_TYPE); | 
| +    __ TestIfSmi(r7, r0); | 
| +    __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0); | 
| +    __ CompareObjectType(r7, r7, r8, MAP_TYPE); | 
| __ Assert(eq, kUnexpectedInitialMapForArrayFunction); | 
|  | 
| -    // We should either have undefined in r2 or a valid AllocationSite | 
| -    __ AssertUndefinedOrAllocationSite(r2, r4); | 
| +    // We should either have undefined in r5 or a valid AllocationSite | 
| +    __ AssertUndefinedOrAllocationSite(r5, r7); | 
| } | 
|  | 
| Label no_info; | 
| // Get the elements kind and case on that. | 
| -  __ CompareRoot(r2, Heap::kUndefinedValueRootIndex); | 
| -  __ b(eq, &no_info); | 
| +  __ CompareRoot(r5, Heap::kUndefinedValueRootIndex); | 
| +  __ beq(&no_info); | 
|  | 
| -  __ ldr(r3, FieldMemOperand(r2, AllocationSite::kTransitionInfoOffset)); | 
| -  __ SmiUntag(r3); | 
| +  __ LoadP(r6, FieldMemOperand(r5, AllocationSite::kTransitionInfoOffset)); | 
| +  __ SmiUntag(r6); | 
| STATIC_ASSERT(AllocationSite::ElementsKindBits::kShift == 0); | 
| -  __ and_(r3, r3, Operand(AllocationSite::ElementsKindBits::kMask)); | 
| +  __ And(r6, r6, Operand(AllocationSite::ElementsKindBits::kMask)); | 
| GenerateDispatchToArrayStub(masm, DONT_OVERRIDE); | 
|  | 
| __ bind(&no_info); | 
| @@ -4876,19 +5130,19 @@ void ArrayConstructorStub::Generate(MacroAssembler* masm) { | 
|  | 
| void InternalArrayConstructorStub::GenerateCase( | 
| MacroAssembler* masm, ElementsKind kind) { | 
| -  __ cmp(r0, Operand(1)); | 
| +  __ cmpli(r3, Operand(1)); | 
|  | 
| InternalArrayNoArgumentConstructorStub stub0(isolate(), kind); | 
| -  __ TailCallStub(&stub0, lo); | 
| +  __ TailCallStub(&stub0, lt); | 
|  | 
| InternalArrayNArgumentsConstructorStub stubN(isolate(), kind); | 
| -  __ TailCallStub(&stubN, hi); | 
| +  __ TailCallStub(&stubN, gt); | 
|  | 
| if (IsFastPackedElementsKind(kind)) { | 
| // We might need to create a holey array | 
| // look at the first argument | 
| -    __ ldr(r3, MemOperand(sp, 0)); | 
| -    __ cmp(r3, Operand::Zero()); | 
| +    __ LoadP(r6, MemOperand(sp, 0)); | 
| +    __ cmpi(r6, Operand::Zero()); | 
|  | 
| InternalArraySingleArgumentConstructorStub | 
| stub1_holey(isolate(), GetHoleyElementsKind(kind)); | 
| @@ -4902,8 +5156,8 @@ void InternalArrayConstructorStub::GenerateCase( | 
|  | 
| void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { | 
| // ----------- S t a t e ------------- | 
| -  //  -- r0 : argc | 
| -  //  -- r1 : constructor | 
| +  //  -- r3 : argc | 
| +  //  -- r4 : constructor | 
| //  -- sp[0] : return address | 
| //  -- sp[4] : last argument | 
| // ----------------------------------- | 
| @@ -4913,35 +5167,34 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { | 
| // builtin Array functions which always have maps. | 
|  | 
| // Initial map for the builtin Array function should be a map. | 
| -    __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); | 
| +    __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset)); | 
| // Will both indicate a NULL and a Smi. | 
| -    __ tst(r3, Operand(kSmiTagMask)); | 
| -    __ Assert(ne, kUnexpectedInitialMapForArrayFunction); | 
| -    __ CompareObjectType(r3, r3, r4, MAP_TYPE); | 
| +    __ TestIfSmi(r6, r0); | 
| +    __ Assert(ne, kUnexpectedInitialMapForArrayFunction, cr0); | 
| +    __ CompareObjectType(r6, r6, r7, MAP_TYPE); | 
| __ Assert(eq, kUnexpectedInitialMapForArrayFunction); | 
| } | 
|  | 
| // Figure out the right elements kind | 
| -  __ ldr(r3, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); | 
| -  // Load the map's "bit field 2" into |result|. We only need the first byte, | 
| -  // but the following bit field extraction takes care of that anyway. | 
| -  __ ldr(r3, FieldMemOperand(r3, Map::kBitField2Offset)); | 
| +  __ LoadP(r6, FieldMemOperand(r4, JSFunction::kPrototypeOrInitialMapOffset)); | 
| +  // Load the map's "bit field 2" into |result|. | 
| +  __ lbz(r6, FieldMemOperand(r6, Map::kBitField2Offset)); | 
| // Retrieve elements_kind from bit field 2. | 
| -  __ DecodeField<Map::ElementsKindBits>(r3); | 
| +  __ DecodeField<Map::ElementsKindBits>(r6); | 
|  | 
| if (FLAG_debug_code) { | 
| Label done; | 
| -    __ cmp(r3, Operand(FAST_ELEMENTS)); | 
| -    __ b(eq, &done); | 
| -    __ cmp(r3, Operand(FAST_HOLEY_ELEMENTS)); | 
| +    __ cmpi(r6, Operand(FAST_ELEMENTS)); | 
| +    __ beq(&done); | 
| +    __ cmpi(r6, Operand(FAST_HOLEY_ELEMENTS)); | 
| __ Assert(eq, | 
| kInvalidElementsKindForInternalArrayOrInternalPackedArray); | 
| __ bind(&done); | 
| } | 
|  | 
| Label fast_elements_case; | 
| -  __ cmp(r3, Operand(FAST_ELEMENTS)); | 
| -  __ b(eq, &fast_elements_case); | 
| +  __ cmpi(r6, Operand(FAST_ELEMENTS)); | 
| +  __ beq(&fast_elements_case); | 
| GenerateCase(masm, FAST_HOLEY_ELEMENTS); | 
|  | 
| __ bind(&fast_elements_case); | 
| @@ -4951,10 +5204,10 @@ void InternalArrayConstructorStub::Generate(MacroAssembler* masm) { | 
|  | 
| void CallApiFunctionStub::Generate(MacroAssembler* masm) { | 
| // ----------- S t a t e ------------- | 
| -  //  -- r0                  : callee | 
| -  //  -- r4                  : call_data | 
| -  //  -- r2                  : holder | 
| -  //  -- r1                  : api_function_address | 
| +  //  -- r3                  : callee | 
| +  //  -- r7                  : call_data | 
| +  //  -- r5                  : holder | 
| +  //  -- r4                  : api_function_address | 
| //  -- cp                  : context | 
| //  -- | 
| //  -- sp[0]               : last argument | 
| @@ -4963,10 +5216,10 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) { | 
| //  -- sp[argc * 4]        : receiver | 
| // ----------------------------------- | 
|  | 
| -  Register callee = r0; | 
| -  Register call_data = r4; | 
| -  Register holder = r2; | 
| -  Register api_function_address = r1; | 
| +  Register callee = r3; | 
| +  Register call_data = r7; | 
| +  Register holder = r5; | 
| +  Register api_function_address = r4; | 
| Register context = cp; | 
|  | 
| int argc = ArgumentBits::decode(bit_field_); | 
| @@ -4987,7 +5240,7 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) { | 
| // context save | 
| __ push(context); | 
| // load context from callee | 
| -  __ ldr(context, FieldMemOperand(callee, JSFunction::kContextOffset)); | 
| +  __ LoadP(context, FieldMemOperand(callee, JSFunction::kContextOffset)); | 
|  | 
| // callee | 
| __ push(callee); | 
| @@ -5011,30 +5264,35 @@ void CallApiFunctionStub::Generate(MacroAssembler* masm) { | 
| __ push(holder); | 
|  | 
| // Prepare arguments. | 
| -  __ mov(scratch, sp); | 
| +  __ mr(scratch, sp); | 
|  | 
| // Allocate the v8::Arguments structure in the arguments' space since | 
| // it's not controlled by GC. | 
| -  const int kApiStackSpace = 4; | 
| +  // PPC LINUX ABI: | 
| +  // | 
| +  // Create 5 extra slots on stack: | 
| +  //    [0] space for DirectCEntryStub's LR save | 
| +  //    [1-4] FunctionCallbackInfo | 
| +  const int kApiStackSpace = 5; | 
|  | 
| FrameScope frame_scope(masm, StackFrame::MANUAL); | 
| __ EnterExitFrame(false, kApiStackSpace); | 
|  | 
| -  DCHECK(!api_function_address.is(r0) && !scratch.is(r0)); | 
| -  // r0 = FunctionCallbackInfo& | 
| +  DCHECK(!api_function_address.is(r3) && !scratch.is(r3)); | 
| +  // r3 = FunctionCallbackInfo& | 
| // Arguments is after the return address. | 
| -  __ add(r0, sp, Operand(1 * kPointerSize)); | 
| +  __ addi(r3, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize)); | 
| // FunctionCallbackInfo::implicit_args_ | 
| -  __ str(scratch, MemOperand(r0, 0 * kPointerSize)); | 
| +  __ StoreP(scratch, MemOperand(r3, 0 * kPointerSize)); | 
| // FunctionCallbackInfo::values_ | 
| -  __ add(ip, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize)); | 
| -  __ str(ip, MemOperand(r0, 1 * kPointerSize)); | 
| +  __ addi(ip, scratch, Operand((FCA::kArgsLength - 1 + argc) * kPointerSize)); | 
| +  __ StoreP(ip, MemOperand(r3, 1 * kPointerSize)); | 
| // FunctionCallbackInfo::length_ = argc | 
| -  __ mov(ip, Operand(argc)); | 
| -  __ str(ip, MemOperand(r0, 2 * kPointerSize)); | 
| +  __ li(ip, Operand(argc)); | 
| +  __ stw(ip, MemOperand(r3, 2 * kPointerSize)); | 
| // FunctionCallbackInfo::is_construct_call = 0 | 
| -  __ mov(ip, Operand::Zero()); | 
| -  __ str(ip, MemOperand(r0, 3 * kPointerSize)); | 
| +  __ li(ip, Operand::Zero()); | 
| +  __ stw(ip, MemOperand(r3, 2 * kPointerSize + kIntSize)); | 
|  | 
| const int kStackUnwindSpace = argc + FCA::kArgsLength + 1; | 
| ExternalReference thunk_ref = | 
| @@ -5065,22 +5323,50 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) { | 
| //  -- sp[0]                  : name | 
| //  -- sp[4 - kArgsLength*4]  : PropertyCallbackArguments object | 
| //  -- ... | 
| -  //  -- r2                     : api_function_address | 
| +  //  -- r5                     : api_function_address | 
| // ----------------------------------- | 
|  | 
| -  Register api_function_address = r2; | 
| - | 
| -  __ mov(r0, sp);  // r0 = Handle<Name> | 
| -  __ add(r1, r0, Operand(1 * kPointerSize));  // r1 = PCA | 
| +  Register api_function_address = r5; | 
| + | 
| +  __ mr(r3, sp);  // r0 = Handle<Name> | 
| +  __ addi(r4, r3, Operand(1 * kPointerSize));  // r4 = PCA | 
| + | 
| +  // If ABI passes Handles (pointer-sized struct) in a register: | 
| +  // | 
| +  // Create 2 extra slots on stack: | 
| +  //    [0] space for DirectCEntryStub's LR save | 
| +  //    [1] AccessorInfo& | 
| +  // | 
| +  // Otherwise: | 
| +  // | 
| +  // Create 3 extra slots on stack: | 
| +  //    [0] space for DirectCEntryStub's LR save | 
| +  //    [1] copy of Handle (first arg) | 
| +  //    [2] AccessorInfo& | 
| +#if ABI_PASSES_HANDLES_IN_REGS | 
| +  const int kAccessorInfoSlot = kStackFrameExtraParamSlot + 1; | 
| +  const int kApiStackSpace = 2; | 
| +#else | 
| +  const int kArg0Slot = kStackFrameExtraParamSlot + 1; | 
| +  const int kAccessorInfoSlot = kArg0Slot + 1; | 
| +  const int kApiStackSpace = 3; | 
| +#endif | 
|  | 
| -  const int kApiStackSpace = 1; | 
| FrameScope frame_scope(masm, StackFrame::MANUAL); | 
| __ EnterExitFrame(false, kApiStackSpace); | 
|  | 
| +#if !ABI_PASSES_HANDLES_IN_REGS | 
| +  // pass 1st arg by reference | 
| +  __ StoreP(r3, | 
| +            MemOperand(sp, kArg0Slot * kPointerSize)); | 
| +  __ addi(r3, sp, Operand(kArg0Slot * kPointerSize)); | 
| +#endif | 
| + | 
| // Create PropertyAccessorInfo instance on the stack above the exit frame with | 
| -  // r1 (internal::Object** args_) as the data. | 
| -  __ str(r1, MemOperand(sp, 1 * kPointerSize)); | 
| -  __ add(r1, sp, Operand(1 * kPointerSize));  // r1 = AccessorInfo& | 
| +  // r4 (internal::Object** args_) as the data. | 
| +  __ StoreP(r4, MemOperand(sp, kAccessorInfoSlot * kPointerSize)); | 
| +  // r4 = AccessorInfo& | 
| +  __ addi(r4, sp, Operand(kAccessorInfoSlot * kPointerSize)); | 
|  | 
| const int kStackUnwindSpace = PropertyCallbackArguments::kArgsLength + 1; | 
|  | 
| @@ -5098,4 +5384,4 @@ void CallApiGetterStub::Generate(MacroAssembler* masm) { | 
|  | 
| } }  // namespace v8::internal | 
|  | 
| -#endif  // V8_TARGET_ARCH_ARM | 
| +#endif  // V8_TARGET_ARCH_PPC | 
|  |