| OLD | NEW | 
|---|
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. | 
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without | 
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are | 
| 4 // met: | 4 // met: | 
| 5 // | 5 // | 
| 6 //     * Redistributions of source code must retain the above copyright | 6 //     * Redistributions of source code must retain the above copyright | 
| 7 //       notice, this list of conditions and the following disclaimer. | 7 //       notice, this list of conditions and the following disclaimer. | 
| 8 //     * Redistributions in binary form must reproduce the above | 8 //     * Redistributions in binary form must reproduce the above | 
| 9 //       copyright notice, this list of conditions and the following | 9 //       copyright notice, this list of conditions and the following | 
| 10 //       disclaimer in the documentation and/or other materials provided | 10 //       disclaimer in the documentation and/or other materials provided | 
| (...skipping 3990 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 4001   // Compare flat ASCII strings natively. Remove arguments from stack first. | 4001   // Compare flat ASCII strings natively. Remove arguments from stack first. | 
| 4002   __ IncrementCounter(counters->string_compare_native(), 1, a2, a3); | 4002   __ IncrementCounter(counters->string_compare_native(), 1, a2, a3); | 
| 4003   __ Addu(sp, sp, Operand(2 * kPointerSize)); | 4003   __ Addu(sp, sp, Operand(2 * kPointerSize)); | 
| 4004   GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1); | 4004   GenerateCompareFlatAsciiStrings(masm, a1, a0, a2, a3, t0, t1); | 
| 4005 | 4005 | 
| 4006   __ bind(&runtime); | 4006   __ bind(&runtime); | 
| 4007   __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1); | 4007   __ TailCallRuntime(Runtime::kHiddenStringCompare, 2, 1); | 
| 4008 } | 4008 } | 
| 4009 | 4009 | 
| 4010 | 4010 | 
| 4011 void ArrayPushStub::Generate(MacroAssembler* masm) { |  | 
| 4012   Register receiver = a0; |  | 
| 4013   Register scratch = a1; |  | 
| 4014 |  | 
| 4015   int argc = arguments_count(); |  | 
| 4016 |  | 
| 4017   if (argc == 0) { |  | 
| 4018     // Nothing to do, just return the length. |  | 
| 4019     __ lw(v0, FieldMemOperand(receiver, JSArray::kLengthOffset)); |  | 
| 4020     __ DropAndRet(argc + 1); |  | 
| 4021     return; |  | 
| 4022   } |  | 
| 4023 |  | 
| 4024   Isolate* isolate = masm->isolate(); |  | 
| 4025 |  | 
| 4026   if (argc != 1) { |  | 
| 4027     __ TailCallExternalReference( |  | 
| 4028         ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); |  | 
| 4029     return; |  | 
| 4030   } |  | 
| 4031 |  | 
| 4032   Label call_builtin, attempt_to_grow_elements, with_write_barrier; |  | 
| 4033 |  | 
| 4034   Register elements = t2; |  | 
| 4035   Register end_elements = t1; |  | 
| 4036   // Get the elements array of the object. |  | 
| 4037   __ lw(elements, FieldMemOperand(receiver, JSArray::kElementsOffset)); |  | 
| 4038 |  | 
| 4039   if (IsFastSmiOrObjectElementsKind(elements_kind())) { |  | 
| 4040     // Check that the elements are in fast mode and writable. |  | 
| 4041     __ CheckMap(elements, |  | 
| 4042                 scratch, |  | 
| 4043                 Heap::kFixedArrayMapRootIndex, |  | 
| 4044                 &call_builtin, |  | 
| 4045                 DONT_DO_SMI_CHECK); |  | 
| 4046   } |  | 
| 4047 |  | 
| 4048   // Get the array's length into scratch and calculate new length. |  | 
| 4049   __ lw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); |  | 
| 4050   __ Addu(scratch, scratch, Operand(Smi::FromInt(argc))); |  | 
| 4051 |  | 
| 4052   // Get the elements' length. |  | 
| 4053   __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset)); |  | 
| 4054 |  | 
| 4055   const int kEndElementsOffset = |  | 
| 4056       FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize; |  | 
| 4057 |  | 
| 4058   if (IsFastSmiOrObjectElementsKind(elements_kind())) { |  | 
| 4059     // Check if we could survive without allocation. |  | 
| 4060     __ Branch(&attempt_to_grow_elements, gt, scratch, Operand(t0)); |  | 
| 4061 |  | 
| 4062     // Check if value is a smi. |  | 
| 4063     __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize)); |  | 
| 4064     __ JumpIfNotSmi(t0, &with_write_barrier); |  | 
| 4065 |  | 
| 4066     // Store the value. |  | 
| 4067     // We may need a register containing the address end_elements below, |  | 
| 4068     // so write back the value in end_elements. |  | 
| 4069     __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize); |  | 
| 4070     __ Addu(end_elements, elements, end_elements); |  | 
| 4071     __ Addu(end_elements, end_elements, kEndElementsOffset); |  | 
| 4072     __ sw(t0, MemOperand(end_elements)); |  | 
| 4073   } else { |  | 
| 4074     // Check if we could survive without allocation. |  | 
| 4075     __ Branch(&call_builtin, gt, scratch, Operand(t0)); |  | 
| 4076 |  | 
| 4077     __ lw(t0, MemOperand(sp, (argc - 1) * kPointerSize)); |  | 
| 4078     __ StoreNumberToDoubleElements(t0, scratch, elements, a3, t1, a2, |  | 
| 4079                                    &call_builtin, argc * kDoubleSize); |  | 
| 4080   } |  | 
| 4081 |  | 
| 4082   // Save new length. |  | 
| 4083   __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); |  | 
| 4084   __ mov(v0, scratch); |  | 
| 4085   __ DropAndRet(argc + 1); |  | 
| 4086 |  | 
| 4087   if (IsFastDoubleElementsKind(elements_kind())) { |  | 
| 4088     __ bind(&call_builtin); |  | 
| 4089     __ TailCallExternalReference( |  | 
| 4090         ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); |  | 
| 4091     return; |  | 
| 4092   } |  | 
| 4093 |  | 
| 4094   __ bind(&with_write_barrier); |  | 
| 4095 |  | 
| 4096   if (IsFastSmiElementsKind(elements_kind())) { |  | 
| 4097     if (FLAG_trace_elements_transitions) __ jmp(&call_builtin); |  | 
| 4098 |  | 
| 4099     __ lw(t3, FieldMemOperand(t0, HeapObject::kMapOffset)); |  | 
| 4100     __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); |  | 
| 4101     __ Branch(&call_builtin, eq, t3, Operand(at)); |  | 
| 4102 |  | 
| 4103     ElementsKind target_kind = IsHoleyElementsKind(elements_kind()) |  | 
| 4104         ? FAST_HOLEY_ELEMENTS : FAST_ELEMENTS; |  | 
| 4105     __ lw(a3, ContextOperand(cp, Context::GLOBAL_OBJECT_INDEX)); |  | 
| 4106     __ lw(a3, FieldMemOperand(a3, GlobalObject::kNativeContextOffset)); |  | 
| 4107     __ lw(a3, ContextOperand(a3, Context::JS_ARRAY_MAPS_INDEX)); |  | 
| 4108     const int header_size = FixedArrayBase::kHeaderSize; |  | 
| 4109     // Verify that the object can be transitioned in place. |  | 
| 4110     const int origin_offset = header_size + elements_kind() * kPointerSize; |  | 
| 4111     __ lw(a2, FieldMemOperand(receiver, origin_offset)); |  | 
| 4112     __ lw(at, FieldMemOperand(a3, HeapObject::kMapOffset)); |  | 
| 4113     __ Branch(&call_builtin, ne, a2, Operand(at)); |  | 
| 4114 |  | 
| 4115 |  | 
| 4116     const int target_offset = header_size + target_kind * kPointerSize; |  | 
| 4117     __ lw(a3, FieldMemOperand(a3, target_offset)); |  | 
| 4118     __ mov(a2, receiver); |  | 
| 4119     ElementsTransitionGenerator::GenerateMapChangeElementsTransition( |  | 
| 4120         masm, DONT_TRACK_ALLOCATION_SITE, NULL); |  | 
| 4121   } |  | 
| 4122 |  | 
| 4123   // Save new length. |  | 
| 4124   __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); |  | 
| 4125 |  | 
| 4126   // Store the value. |  | 
| 4127   // We may need a register containing the address end_elements below, so write |  | 
| 4128   // back the value in end_elements. |  | 
| 4129   __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize); |  | 
| 4130   __ Addu(end_elements, elements, end_elements); |  | 
| 4131   __ Addu(end_elements, end_elements, kEndElementsOffset); |  | 
| 4132   __ sw(t0, MemOperand(end_elements)); |  | 
| 4133 |  | 
| 4134   __ RecordWrite(elements, |  | 
| 4135                  end_elements, |  | 
| 4136                  t0, |  | 
| 4137                  kRAHasNotBeenSaved, |  | 
| 4138                  kDontSaveFPRegs, |  | 
| 4139                  EMIT_REMEMBERED_SET, |  | 
| 4140                  OMIT_SMI_CHECK); |  | 
| 4141   __ mov(v0, scratch); |  | 
| 4142   __ DropAndRet(argc + 1); |  | 
| 4143 |  | 
| 4144   __ bind(&attempt_to_grow_elements); |  | 
| 4145   // scratch: array's length + 1. |  | 
| 4146 |  | 
| 4147   if (!FLAG_inline_new) { |  | 
| 4148     __ bind(&call_builtin); |  | 
| 4149     __ TailCallExternalReference( |  | 
| 4150         ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); |  | 
| 4151     return; |  | 
| 4152   } |  | 
| 4153 |  | 
| 4154   __ lw(a2, MemOperand(sp, (argc - 1) * kPointerSize)); |  | 
| 4155   // Growing elements that are SMI-only requires special handling in case the |  | 
| 4156   // new element is non-Smi. For now, delegate to the builtin. |  | 
| 4157   if (IsFastSmiElementsKind(elements_kind())) { |  | 
| 4158     __ JumpIfNotSmi(a2, &call_builtin); |  | 
| 4159   } |  | 
| 4160 |  | 
| 4161   // We could be lucky and the elements array could be at the top of new-space. |  | 
| 4162   // In this case we can just grow it in place by moving the allocation pointer |  | 
| 4163   // up. |  | 
| 4164   ExternalReference new_space_allocation_top = |  | 
| 4165       ExternalReference::new_space_allocation_top_address(isolate); |  | 
| 4166   ExternalReference new_space_allocation_limit = |  | 
| 4167       ExternalReference::new_space_allocation_limit_address(isolate); |  | 
| 4168 |  | 
| 4169   const int kAllocationDelta = 4; |  | 
| 4170   ASSERT(kAllocationDelta >= argc); |  | 
| 4171   // Load top and check if it is the end of elements. |  | 
| 4172   __ sll(end_elements, scratch, kPointerSizeLog2 - kSmiTagSize); |  | 
| 4173   __ Addu(end_elements, elements, end_elements); |  | 
| 4174   __ Addu(end_elements, end_elements, Operand(kEndElementsOffset)); |  | 
| 4175   __ li(t0, Operand(new_space_allocation_top)); |  | 
| 4176   __ lw(a3, MemOperand(t0)); |  | 
| 4177   __ Branch(&call_builtin, ne, a3, Operand(end_elements)); |  | 
| 4178 |  | 
| 4179   __ li(t3, Operand(new_space_allocation_limit)); |  | 
| 4180   __ lw(t3, MemOperand(t3)); |  | 
| 4181   __ Addu(a3, a3, Operand(kAllocationDelta * kPointerSize)); |  | 
| 4182   __ Branch(&call_builtin, hi, a3, Operand(t3)); |  | 
| 4183 |  | 
| 4184   // We fit and could grow elements. |  | 
| 4185   // Update new_space_allocation_top. |  | 
| 4186   __ sw(a3, MemOperand(t0)); |  | 
| 4187   // Push the argument. |  | 
| 4188   __ sw(a2, MemOperand(end_elements)); |  | 
| 4189   // Fill the rest with holes. |  | 
| 4190   __ LoadRoot(a3, Heap::kTheHoleValueRootIndex); |  | 
| 4191   for (int i = 1; i < kAllocationDelta; i++) { |  | 
| 4192     __ sw(a3, MemOperand(end_elements, i * kPointerSize)); |  | 
| 4193   } |  | 
| 4194 |  | 
| 4195   // Update elements' and array's sizes. |  | 
| 4196   __ sw(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset)); |  | 
| 4197   __ lw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset)); |  | 
| 4198   __ Addu(t0, t0, Operand(Smi::FromInt(kAllocationDelta))); |  | 
| 4199   __ sw(t0, FieldMemOperand(elements, FixedArray::kLengthOffset)); |  | 
| 4200 |  | 
| 4201   // Elements are in new space, so write barrier is not required. |  | 
| 4202   __ mov(v0, scratch); |  | 
| 4203   __ DropAndRet(argc + 1); |  | 
| 4204 |  | 
| 4205   __ bind(&call_builtin); |  | 
| 4206   __ TailCallExternalReference( |  | 
| 4207       ExternalReference(Builtins::c_ArrayPush, isolate), argc + 1, 1); |  | 
| 4208 } |  | 
| 4209 |  | 
| 4210 |  | 
| 4211 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { | 4011 void BinaryOpICWithAllocationSiteStub::Generate(MacroAssembler* masm) { | 
| 4212   // ----------- S t a t e ------------- | 4012   // ----------- S t a t e ------------- | 
| 4213   //  -- a1    : left | 4013   //  -- a1    : left | 
| 4214   //  -- a0    : right | 4014   //  -- a0    : right | 
| 4215   //  -- ra    : return address | 4015   //  -- ra    : return address | 
| 4216   // ----------------------------------- | 4016   // ----------------------------------- | 
| 4217   Isolate* isolate = masm->isolate(); | 4017   Isolate* isolate = masm->isolate(); | 
| 4218 | 4018 | 
| 4219   // Load a2 with the allocation site. We stick an undefined dummy value here | 4019   // Load a2 with the allocation site. We stick an undefined dummy value here | 
| 4220   // and replace it with the real allocation site later when we instantiate this | 4020   // and replace it with the real allocation site later when we instantiate this | 
| (...skipping 1432 matching lines...) Expand 10 before | Expand all | Expand 10 after  Loading... | 
| 5653                               MemOperand(fp, 6 * kPointerSize), | 5453                               MemOperand(fp, 6 * kPointerSize), | 
| 5654                               NULL); | 5454                               NULL); | 
| 5655 } | 5455 } | 
| 5656 | 5456 | 
| 5657 | 5457 | 
| 5658 #undef __ | 5458 #undef __ | 
| 5659 | 5459 | 
| 5660 } }  // namespace v8::internal | 5460 } }  // namespace v8::internal | 
| 5661 | 5461 | 
| 5662 #endif  // V8_TARGET_ARCH_MIPS | 5462 #endif  // V8_TARGET_ARCH_MIPS | 
| OLD | NEW | 
|---|