| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_ARM | 7 #if V8_TARGET_ARCH_PPC |
| 8 | 8 |
| 9 #include "src/codegen.h" | 9 #include "src/codegen.h" |
| 10 #include "src/ic/ic.h" | 10 #include "src/ic/ic.h" |
| 11 #include "src/ic/ic-compiler.h" | 11 #include "src/ic/ic-compiler.h" |
| 12 #include "src/ic/stub-cache.h" | 12 #include "src/ic/stub-cache.h" |
| 13 | 13 |
| 14 namespace v8 { | 14 namespace v8 { |
| 15 namespace internal { | 15 namespace internal { |
| 16 | 16 |
| 17 | 17 |
| 18 // ---------------------------------------------------------------------------- | 18 // ---------------------------------------------------------------------------- |
| 19 // Static IC stub generators. | 19 // Static IC stub generators. |
| 20 // | 20 // |
| 21 | 21 |
| 22 #define __ ACCESS_MASM(masm) | 22 #define __ ACCESS_MASM(masm) |
| 23 | 23 |
| 24 | 24 |
| 25 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type, | 25 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type, |
| 26 Label* global_object) { | 26 Label* global_object) { |
| 27 // Register usage: | 27 // Register usage: |
| 28 // type: holds the receiver instance type on entry. | 28 // type: holds the receiver instance type on entry. |
| 29 __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE)); | 29 __ cmpi(type, Operand(JS_GLOBAL_OBJECT_TYPE)); |
| 30 __ b(eq, global_object); | 30 __ beq(global_object); |
| 31 __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE)); | 31 __ cmpi(type, Operand(JS_BUILTINS_OBJECT_TYPE)); |
| 32 __ b(eq, global_object); | 32 __ beq(global_object); |
| 33 __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE)); | 33 __ cmpi(type, Operand(JS_GLOBAL_PROXY_TYPE)); |
| 34 __ b(eq, global_object); | 34 __ beq(global_object); |
| 35 } | 35 } |
| 36 | 36 |
| 37 | 37 |
| 38 // Helper function used from LoadIC GenerateNormal. | 38 // Helper function used from LoadIC GenerateNormal. |
| 39 // | 39 // |
| 40 // elements: Property dictionary. It is not clobbered if a jump to the miss | 40 // elements: Property dictionary. It is not clobbered if a jump to the miss |
| 41 // label is done. | 41 // label is done. |
| 42 // name: Property name. It is not clobbered if a jump to the miss label is | 42 // name: Property name. It is not clobbered if a jump to the miss label is |
| 43 // done | 43 // done |
| 44 // result: Register for the result. It is only updated if a jump to the miss | 44 // result: Register for the result. It is only updated if a jump to the miss |
| (...skipping 17 matching lines...) Expand all Loading... |
| 62 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements, | 62 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements, |
| 63 name, scratch1, scratch2); | 63 name, scratch1, scratch2); |
| 64 | 64 |
| 65 // If probing finds an entry check that the value is a normal | 65 // If probing finds an entry check that the value is a normal |
| 66 // property. | 66 // property. |
| 67 __ bind(&done); // scratch2 == elements + 4 * index | 67 __ bind(&done); // scratch2 == elements + 4 * index |
| 68 const int kElementsStartOffset = | 68 const int kElementsStartOffset = |
| 69 NameDictionary::kHeaderSize + | 69 NameDictionary::kHeaderSize + |
| 70 NameDictionary::kElementsStartIndex * kPointerSize; | 70 NameDictionary::kElementsStartIndex * kPointerSize; |
| 71 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; | 71 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; |
| 72 __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); | 72 __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); |
| 73 __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize)); | 73 __ mr(r0, scratch2); |
| 74 __ b(ne, miss); | 74 __ LoadSmiLiteral(scratch2, Smi::FromInt(PropertyDetails::TypeField::kMask)); |
| 75 __ and_(scratch2, scratch1, scratch2, SetRC); |
| 76 __ bne(miss, cr0); |
| 77 __ mr(scratch2, r0); |
| 75 | 78 |
| 76 // Get the value at the masked, scaled index and return. | 79 // Get the value at the masked, scaled index and return. |
| 77 __ ldr(result, | 80 __ LoadP(result, |
| 78 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize)); | 81 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize)); |
| 79 } | 82 } |
| 80 | 83 |
| 81 | 84 |
| 82 // Helper function used from StoreIC::GenerateNormal. | 85 // Helper function used from StoreIC::GenerateNormal. |
| 83 // | 86 // |
| 84 // elements: Property dictionary. It is not clobbered if a jump to the miss | 87 // elements: Property dictionary. It is not clobbered if a jump to the miss |
| 85 // label is done. | 88 // label is done. |
| 86 // name: Property name. It is not clobbered if a jump to the miss label is | 89 // name: Property name. It is not clobbered if a jump to the miss label is |
| 87 // done | 90 // done |
| 88 // value: The value to store. | 91 // value: The value to store. |
| (...skipping 15 matching lines...) Expand all Loading... |
| 104 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements, | 107 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements, |
| 105 name, scratch1, scratch2); | 108 name, scratch1, scratch2); |
| 106 | 109 |
| 107 // If probing finds an entry in the dictionary check that the value | 110 // If probing finds an entry in the dictionary check that the value |
| 108 // is a normal property that is not read only. | 111 // is a normal property that is not read only. |
| 109 __ bind(&done); // scratch2 == elements + 4 * index | 112 __ bind(&done); // scratch2 == elements + 4 * index |
| 110 const int kElementsStartOffset = | 113 const int kElementsStartOffset = |
| 111 NameDictionary::kHeaderSize + | 114 NameDictionary::kHeaderSize + |
| 112 NameDictionary::kElementsStartIndex * kPointerSize; | 115 NameDictionary::kElementsStartIndex * kPointerSize; |
| 113 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; | 116 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; |
| 114 const int kTypeAndReadOnlyMask = | 117 int kTypeAndReadOnlyMask = |
| 115 (PropertyDetails::TypeField::kMask | | 118 PropertyDetails::TypeField::kMask | |
| 116 PropertyDetails::AttributesField::encode(READ_ONLY)) | 119 PropertyDetails::AttributesField::encode(READ_ONLY); |
| 117 << kSmiTagSize; | 120 __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); |
| 118 __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); | 121 __ mr(r0, scratch2); |
| 119 __ tst(scratch1, Operand(kTypeAndReadOnlyMask)); | 122 __ LoadSmiLiteral(scratch2, Smi::FromInt(kTypeAndReadOnlyMask)); |
| 120 __ b(ne, miss); | 123 __ and_(scratch2, scratch1, scratch2, SetRC); |
| 124 __ bne(miss, cr0); |
| 125 __ mr(scratch2, r0); |
| 121 | 126 |
| 122 // Store the value at the masked, scaled index and return. | 127 // Store the value at the masked, scaled index and return. |
| 123 const int kValueOffset = kElementsStartOffset + kPointerSize; | 128 const int kValueOffset = kElementsStartOffset + kPointerSize; |
| 124 __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag)); | 129 __ addi(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag)); |
| 125 __ str(value, MemOperand(scratch2)); | 130 __ StoreP(value, MemOperand(scratch2)); |
| 126 | 131 |
| 127 // Update the write barrier. Make sure not to clobber the value. | 132 // Update the write barrier. Make sure not to clobber the value. |
| 128 __ mov(scratch1, value); | 133 __ mr(scratch1, value); |
| 129 __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved, | 134 __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved, |
| 130 kDontSaveFPRegs); | 135 kDontSaveFPRegs); |
| 131 } | 136 } |
| 132 | 137 |
| 133 | 138 |
| 134 // Checks the receiver for special cases (value type, slow case bits). | 139 // Checks the receiver for special cases (value type, slow case bits). |
| 135 // Falls through for regular JS object. | 140 // Falls through for regular JS object. |
| 136 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, | 141 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, |
| 137 Register receiver, Register map, | 142 Register receiver, Register map, |
| 138 Register scratch, | 143 Register scratch, |
| 139 int interceptor_bit, Label* slow) { | 144 int interceptor_bit, Label* slow) { |
| 140 // Check that the object isn't a smi. | 145 // Check that the object isn't a smi. |
| 141 __ JumpIfSmi(receiver, slow); | 146 __ JumpIfSmi(receiver, slow); |
| 142 // Get the map of the receiver. | 147 // Get the map of the receiver. |
| 143 __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 148 __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 144 // Check bit field. | 149 // Check bit field. |
| 145 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); | 150 __ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); |
| 146 __ tst(scratch, | 151 DCHECK(((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)) < 0x8000); |
| 147 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit))); | 152 __ andi(r0, scratch, |
| 148 __ b(ne, slow); | 153 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit))); |
| 154 __ bne(slow, cr0); |
| 149 // Check that the object is some kind of JS object EXCEPT JS Value type. | 155 // Check that the object is some kind of JS object EXCEPT JS Value type. |
| 150 // In the case that the object is a value-wrapper object, | 156 // In the case that the object is a value-wrapper object, |
| 151 // we enter the runtime system to make sure that indexing into string | 157 // we enter the runtime system to make sure that indexing into string |
| 152 // objects work as intended. | 158 // objects work as intended. |
| 153 DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE); | 159 DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE); |
| 154 __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 160 __ lbz(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 155 __ cmp(scratch, Operand(JS_OBJECT_TYPE)); | 161 __ cmpi(scratch, Operand(JS_OBJECT_TYPE)); |
| 156 __ b(lt, slow); | 162 __ blt(slow); |
| 157 } | 163 } |
| 158 | 164 |
| 159 | 165 |
| 160 // Loads an indexed element from a fast case array. | 166 // Loads an indexed element from a fast case array. |
| 161 // If not_fast_array is NULL, doesn't perform the elements map check. | 167 // If not_fast_array is NULL, doesn't perform the elements map check. |
| 162 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver, | 168 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver, |
| 163 Register key, Register elements, | 169 Register key, Register elements, |
| 164 Register scratch1, Register scratch2, | 170 Register scratch1, Register scratch2, |
| 165 Register result, Label* not_fast_array, | 171 Register result, Label* not_fast_array, |
| 166 Label* out_of_range) { | 172 Label* out_of_range) { |
| (...skipping 12 matching lines...) Expand all Loading... |
| 179 // Unchanged on bailout so 'receiver' and 'key' can be safely | 185 // Unchanged on bailout so 'receiver' and 'key' can be safely |
| 180 // used by further computation. | 186 // used by further computation. |
| 181 // | 187 // |
| 182 // Scratch registers: | 188 // Scratch registers: |
| 183 // | 189 // |
| 184 // scratch1 - used to hold elements map and elements length. | 190 // scratch1 - used to hold elements map and elements length. |
| 185 // Holds the elements map if not_fast_array branch is taken. | 191 // Holds the elements map if not_fast_array branch is taken. |
| 186 // | 192 // |
| 187 // scratch2 - used to hold the loaded value. | 193 // scratch2 - used to hold the loaded value. |
| 188 | 194 |
| 189 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 195 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 190 if (not_fast_array != NULL) { | 196 if (not_fast_array != NULL) { |
| 191 // Check that the object is in fast mode and writable. | 197 // Check that the object is in fast mode and writable. |
| 192 __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); | 198 __ LoadP(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); |
| 193 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); | 199 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); |
| 194 __ cmp(scratch1, ip); | 200 __ cmp(scratch1, ip); |
| 195 __ b(ne, not_fast_array); | 201 __ bne(not_fast_array); |
| 196 } else { | 202 } else { |
| 197 __ AssertFastElements(elements); | 203 __ AssertFastElements(elements); |
| 198 } | 204 } |
| 199 // Check that the key (index) is within bounds. | 205 // Check that the key (index) is within bounds. |
| 200 __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset)); | 206 __ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 201 __ cmp(key, Operand(scratch1)); | 207 __ cmpl(key, scratch1); |
| 202 __ b(hs, out_of_range); | 208 __ bge(out_of_range); |
| 203 // Fast case: Do the load. | 209 // Fast case: Do the load. |
| 204 __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 210 __ addi(scratch1, elements, |
| 205 __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key)); | 211 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 212 // The key is a smi. |
| 213 __ SmiToPtrArrayOffset(scratch2, key); |
| 214 __ LoadPX(scratch2, MemOperand(scratch2, scratch1)); |
| 206 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 215 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 207 __ cmp(scratch2, ip); | 216 __ cmp(scratch2, ip); |
| 208 // In case the loaded value is the_hole we have to consult GetProperty | 217 // In case the loaded value is the_hole we have to consult GetProperty |
| 209 // to ensure the prototype chain is searched. | 218 // to ensure the prototype chain is searched. |
| 210 __ b(eq, out_of_range); | 219 __ beq(out_of_range); |
| 211 __ mov(result, scratch2); | 220 __ mr(result, scratch2); |
| 212 } | 221 } |
| 213 | 222 |
| 214 | 223 |
| 215 // Checks whether a key is an array index string or a unique name. | 224 // Checks whether a key is an array index string or a unique name. |
| 216 // Falls through if a key is a unique name. | 225 // Falls through if a key is a unique name. |
| 217 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key, | 226 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key, |
| 218 Register map, Register hash, | 227 Register map, Register hash, |
| 219 Label* index_string, Label* not_unique) { | 228 Label* index_string, Label* not_unique) { |
| 220 // The key is not a smi. | 229 // The key is not a smi. |
| 221 Label unique; | 230 Label unique; |
| 222 // Is it a name? | 231 // Is it a name? |
| 223 __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE); | 232 __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE); |
| 224 __ b(hi, not_unique); | 233 __ bgt(not_unique); |
| 225 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); | 234 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); |
| 226 __ b(eq, &unique); | 235 __ beq(&unique); |
| 227 | 236 |
| 228 // Is the string an array index, with cached numeric value? | 237 // Is the string an array index, with cached numeric value? |
| 229 __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset)); | 238 __ lwz(hash, FieldMemOperand(key, Name::kHashFieldOffset)); |
| 230 __ tst(hash, Operand(Name::kContainsCachedArrayIndexMask)); | 239 __ mov(r8, Operand(Name::kContainsCachedArrayIndexMask)); |
| 231 __ b(eq, index_string); | 240 __ and_(r0, hash, r8, SetRC); |
| 241 __ beq(index_string, cr0); |
| 232 | 242 |
| 233 // Is the string internalized? We know it's a string, so a single | 243 // Is the string internalized? We know it's a string, so a single |
| 234 // bit test is enough. | 244 // bit test is enough. |
| 235 // map: key map | 245 // map: key map |
| 236 __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 246 __ lbz(hash, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 237 STATIC_ASSERT(kInternalizedTag == 0); | 247 STATIC_ASSERT(kInternalizedTag == 0); |
| 238 __ tst(hash, Operand(kIsNotInternalizedMask)); | 248 __ andi(r0, hash, Operand(kIsNotInternalizedMask)); |
| 239 __ b(ne, not_unique); | 249 __ bne(not_unique, cr0); |
| 240 | 250 |
| 241 __ bind(&unique); | 251 __ bind(&unique); |
| 242 } | 252 } |
| 243 | 253 |
| 244 | 254 |
| 245 void LoadIC::GenerateNormal(MacroAssembler* masm) { | 255 void LoadIC::GenerateNormal(MacroAssembler* masm) { |
| 246 Register dictionary = r0; | 256 Register dictionary = r3; |
| 247 DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister())); | 257 DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister())); |
| 248 DCHECK(!dictionary.is(LoadDescriptor::NameRegister())); | 258 DCHECK(!dictionary.is(LoadDescriptor::NameRegister())); |
| 249 | 259 |
| 250 Label slow; | 260 Label slow; |
| 251 | 261 |
| 252 __ ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(), | 262 __ LoadP(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(), |
| 253 JSObject::kPropertiesOffset)); | 263 JSObject::kPropertiesOffset)); |
| 254 GenerateDictionaryLoad(masm, &slow, dictionary, | 264 GenerateDictionaryLoad(masm, &slow, dictionary, |
| 255 LoadDescriptor::NameRegister(), r0, r3, r4); | 265 LoadDescriptor::NameRegister(), r3, r6, r7); |
| 256 __ Ret(); | 266 __ Ret(); |
| 257 | 267 |
| 258 // Dictionary load failed, go slow (but don't miss). | 268 // Dictionary load failed, go slow (but don't miss). |
| 259 __ bind(&slow); | 269 __ bind(&slow); |
| 260 GenerateRuntimeGetProperty(masm); | 270 GenerateRuntimeGetProperty(masm); |
| 261 } | 271 } |
| 262 | 272 |
| 263 | 273 |
| 264 // A register that isn't one of the parameters to the load ic. | 274 // A register that isn't one of the parameters to the load ic. |
| 265 static const Register LoadIC_TempRegister() { return r3; } | 275 static const Register LoadIC_TempRegister() { return r6; } |
| 266 | 276 |
| 267 | 277 |
| 268 void LoadIC::GenerateMiss(MacroAssembler* masm) { | 278 void LoadIC::GenerateMiss(MacroAssembler* masm) { |
| 269 // The return address is in lr. | 279 // The return address is in lr. |
| 270 Isolate* isolate = masm->isolate(); | 280 Isolate* isolate = masm->isolate(); |
| 271 | 281 |
| 272 __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4); | 282 __ IncrementCounter(isolate->counters()->load_miss(), 1, r6, r7); |
| 273 | 283 |
| 274 __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister()); | 284 __ mr(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister()); |
| 275 __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister()); | 285 __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister()); |
| 276 | 286 |
| 277 // Perform tail call to the entry. | 287 // Perform tail call to the entry. |
| 278 ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate); | 288 ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate); |
| 279 __ TailCallExternalReference(ref, 2, 1); | 289 __ TailCallExternalReference(ref, 2, 1); |
| 280 } | 290 } |
| 281 | 291 |
| 282 | 292 |
| 283 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { | 293 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { |
| 284 // The return address is in lr. | 294 // The return address is in lr. |
| 285 | 295 |
| 286 __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister()); | 296 __ mr(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister()); |
| 287 __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister()); | 297 __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister()); |
| 288 | 298 |
| 289 __ TailCallRuntime(Runtime::kGetProperty, 2, 1); | 299 __ TailCallRuntime(Runtime::kGetProperty, 2, 1); |
| 290 } | 300 } |
| 291 | 301 |
| 292 | 302 |
| 293 static MemOperand GenerateMappedArgumentsLookup( | 303 static MemOperand GenerateMappedArgumentsLookup( |
| 294 MacroAssembler* masm, Register object, Register key, Register scratch1, | 304 MacroAssembler* masm, Register object, Register key, Register scratch1, |
| 295 Register scratch2, Register scratch3, Label* unmapped_case, | 305 Register scratch2, Register scratch3, Label* unmapped_case, |
| 296 Label* slow_case) { | 306 Label* slow_case) { |
| 297 Heap* heap = masm->isolate()->heap(); | 307 Heap* heap = masm->isolate()->heap(); |
| 298 | 308 |
| 299 // Check that the receiver is a JSObject. Because of the map check | 309 // Check that the receiver is a JSObject. Because of the map check |
| 300 // later, we do not need to check for interceptors or whether it | 310 // later, we do not need to check for interceptors or whether it |
| 301 // requires access checks. | 311 // requires access checks. |
| 302 __ JumpIfSmi(object, slow_case); | 312 __ JumpIfSmi(object, slow_case); |
| 303 // Check that the object is some kind of JSObject. | 313 // Check that the object is some kind of JSObject. |
| 304 __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE); | 314 __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE); |
| 305 __ b(lt, slow_case); | 315 __ blt(slow_case); |
| 306 | 316 |
| 307 // Check that the key is a positive smi. | 317 // Check that the key is a positive smi. |
| 308 __ tst(key, Operand(0x80000001)); | 318 __ mov(scratch1, Operand(0x80000001)); |
| 309 __ b(ne, slow_case); | 319 __ and_(r0, key, scratch1, SetRC); |
| 320 __ bne(slow_case, cr0); |
| 310 | 321 |
| 311 // Load the elements into scratch1 and check its map. | 322 // Load the elements into scratch1 and check its map. |
| 312 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map()); | 323 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map()); |
| 313 __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset)); | 324 __ LoadP(scratch1, FieldMemOperand(object, JSObject::kElementsOffset)); |
| 314 __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK); | 325 __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK); |
| 315 | 326 |
| 316 // Check if element is in the range of mapped arguments. If not, jump | 327 // Check if element is in the range of mapped arguments. If not, jump |
| 317 // to the unmapped lookup with the parameter map in scratch1. | 328 // to the unmapped lookup with the parameter map in scratch1. |
| 318 __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset)); | 329 __ LoadP(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset)); |
| 319 __ sub(scratch2, scratch2, Operand(Smi::FromInt(2))); | 330 __ SubSmiLiteral(scratch2, scratch2, Smi::FromInt(2), r0); |
| 320 __ cmp(key, Operand(scratch2)); | 331 __ cmpl(key, scratch2); |
| 321 __ b(cs, unmapped_case); | 332 __ bge(unmapped_case); |
| 322 | 333 |
| 323 // Load element index and check whether it is the hole. | 334 // Load element index and check whether it is the hole. |
| 324 const int kOffset = | 335 const int kOffset = |
| 325 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag; | 336 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag; |
| 326 | 337 |
| 327 __ mov(scratch3, Operand(kPointerSize >> 1)); | 338 __ SmiToPtrArrayOffset(scratch3, key); |
| 328 __ mul(scratch3, key, scratch3); | 339 __ addi(scratch3, scratch3, Operand(kOffset)); |
| 329 __ add(scratch3, scratch3, Operand(kOffset)); | |
| 330 | 340 |
| 331 __ ldr(scratch2, MemOperand(scratch1, scratch3)); | 341 __ LoadPX(scratch2, MemOperand(scratch1, scratch3)); |
| 332 __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex); | 342 __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex); |
| 333 __ cmp(scratch2, scratch3); | 343 __ cmp(scratch2, scratch3); |
| 334 __ b(eq, unmapped_case); | 344 __ beq(unmapped_case); |
| 335 | 345 |
| 336 // Load value from context and return it. We can reuse scratch1 because | 346 // Load value from context and return it. We can reuse scratch1 because |
| 337 // we do not jump to the unmapped lookup (which requires the parameter | 347 // we do not jump to the unmapped lookup (which requires the parameter |
| 338 // map in scratch1). | 348 // map in scratch1). |
| 339 __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); | 349 __ LoadP(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); |
| 340 __ mov(scratch3, Operand(kPointerSize >> 1)); | 350 __ SmiToPtrArrayOffset(scratch3, scratch2); |
| 341 __ mul(scratch3, scratch2, scratch3); | 351 __ addi(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag)); |
| 342 __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag)); | |
| 343 return MemOperand(scratch1, scratch3); | 352 return MemOperand(scratch1, scratch3); |
| 344 } | 353 } |
| 345 | 354 |
| 346 | 355 |
| 347 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, | 356 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, |
| 348 Register key, | 357 Register key, |
| 349 Register parameter_map, | 358 Register parameter_map, |
| 350 Register scratch, | 359 Register scratch, |
| 351 Label* slow_case) { | 360 Label* slow_case) { |
| 352 // Element is in arguments backing store, which is referenced by the | 361 // Element is in arguments backing store, which is referenced by the |
| 353 // second element of the parameter_map. The parameter_map register | 362 // second element of the parameter_map. The parameter_map register |
| 354 // must be loaded with the parameter map of the arguments object and is | 363 // must be loaded with the parameter map of the arguments object and is |
| 355 // overwritten. | 364 // overwritten. |
| 356 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize; | 365 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize; |
| 357 Register backing_store = parameter_map; | 366 Register backing_store = parameter_map; |
| 358 __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); | 367 __ LoadP(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); |
| 359 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map()); | 368 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map()); |
| 360 __ CheckMap(backing_store, scratch, fixed_array_map, slow_case, | 369 __ CheckMap(backing_store, scratch, fixed_array_map, slow_case, |
| 361 DONT_DO_SMI_CHECK); | 370 DONT_DO_SMI_CHECK); |
| 362 __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); | 371 __ LoadP(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); |
| 363 __ cmp(key, Operand(scratch)); | 372 __ cmpl(key, scratch); |
| 364 __ b(cs, slow_case); | 373 __ bge(slow_case); |
| 365 __ mov(scratch, Operand(kPointerSize >> 1)); | 374 __ SmiToPtrArrayOffset(scratch, key); |
| 366 __ mul(scratch, key, scratch); | 375 __ addi(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 367 __ add(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
| 368 return MemOperand(backing_store, scratch); | 376 return MemOperand(backing_store, scratch); |
| 369 } | 377 } |
| 370 | 378 |
| 371 | 379 |
| 372 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { | 380 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { |
| 373 Register receiver = StoreDescriptor::ReceiverRegister(); | 381 Register receiver = StoreDescriptor::ReceiverRegister(); |
| 374 Register key = StoreDescriptor::NameRegister(); | 382 Register key = StoreDescriptor::NameRegister(); |
| 375 Register value = StoreDescriptor::ValueRegister(); | 383 Register value = StoreDescriptor::ValueRegister(); |
| 376 DCHECK(receiver.is(r1)); | 384 DCHECK(receiver.is(r4)); |
| 377 DCHECK(key.is(r2)); | 385 DCHECK(key.is(r5)); |
| 378 DCHECK(value.is(r0)); | 386 DCHECK(value.is(r3)); |
| 379 | 387 |
| 380 Label slow, notin; | 388 Label slow, notin; |
| 381 MemOperand mapped_location = GenerateMappedArgumentsLookup( | 389 MemOperand mapped_location = GenerateMappedArgumentsLookup( |
| 382 masm, receiver, key, r3, r4, r5, ¬in, &slow); | 390 masm, receiver, key, r6, r7, r8, ¬in, &slow); |
| 383 __ str(value, mapped_location); | 391 Register mapped_base = mapped_location.ra(); |
| 384 __ add(r6, r3, r5); | 392 Register mapped_offset = mapped_location.rb(); |
| 385 __ mov(r9, value); | 393 __ StorePX(value, mapped_location); |
| 386 __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs); | 394 __ add(r9, mapped_base, mapped_offset); |
| 395 __ mr(r11, value); |
| 396 __ RecordWrite(mapped_base, r9, r11, kLRHasNotBeenSaved, kDontSaveFPRegs); |
| 387 __ Ret(); | 397 __ Ret(); |
| 388 __ bind(¬in); | 398 __ bind(¬in); |
| 389 // The unmapped lookup expects that the parameter map is in r3. | 399 // The unmapped lookup expects that the parameter map is in r6. |
| 390 MemOperand unmapped_location = | 400 MemOperand unmapped_location = |
| 391 GenerateUnmappedArgumentsLookup(masm, key, r3, r4, &slow); | 401 GenerateUnmappedArgumentsLookup(masm, key, r6, r7, &slow); |
| 392 __ str(value, unmapped_location); | 402 Register unmapped_base = unmapped_location.ra(); |
| 393 __ add(r6, r3, r4); | 403 Register unmapped_offset = unmapped_location.rb(); |
| 394 __ mov(r9, value); | 404 __ StorePX(value, unmapped_location); |
| 395 __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs); | 405 __ add(r9, unmapped_base, unmapped_offset); |
| 406 __ mr(r11, value); |
| 407 __ RecordWrite(unmapped_base, r9, r11, kLRHasNotBeenSaved, kDontSaveFPRegs); |
| 396 __ Ret(); | 408 __ Ret(); |
| 397 __ bind(&slow); | 409 __ bind(&slow); |
| 398 GenerateMiss(masm); | 410 GenerateMiss(masm); |
| 399 } | 411 } |
| 400 | 412 |
| 401 | 413 |
| 402 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { | 414 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { |
| 403 // The return address is in lr. | 415 // The return address is in lr. |
| 404 Isolate* isolate = masm->isolate(); | 416 Isolate* isolate = masm->isolate(); |
| 405 | 417 |
| 406 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4); | 418 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r6, r7); |
| 407 | 419 |
| 408 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister()); | 420 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister()); |
| 409 | 421 |
| 410 // Perform tail call to the entry. | 422 // Perform tail call to the entry. |
| 411 ExternalReference ref = | 423 ExternalReference ref = |
| 412 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate); | 424 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate); |
| 413 | 425 |
| 414 __ TailCallExternalReference(ref, 2, 1); | 426 __ TailCallExternalReference(ref, 2, 1); |
| 415 } | 427 } |
| 416 | 428 |
| 417 | 429 |
| 418 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { | 430 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { |
| 419 // The return address is in lr. | 431 // The return address is in lr. |
| 420 | 432 |
| 421 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister()); | 433 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister()); |
| 422 | 434 |
| 423 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); | 435 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); |
| 424 } | 436 } |
| 425 | 437 |
| 426 | 438 |
| 427 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { | 439 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { |
| 428 // The return address is in lr. | 440 // The return address is in lr. |
| 429 Label slow, check_name, index_smi, index_name, property_array_property; | 441 Label slow, check_name, index_smi, index_name, property_array_property; |
| 430 Label probe_dictionary, check_number_dictionary; | 442 Label probe_dictionary, check_number_dictionary; |
| 431 | 443 |
| 432 Register key = LoadDescriptor::NameRegister(); | 444 Register key = LoadDescriptor::NameRegister(); |
| 433 Register receiver = LoadDescriptor::ReceiverRegister(); | 445 Register receiver = LoadDescriptor::ReceiverRegister(); |
| 434 DCHECK(key.is(r2)); | 446 DCHECK(key.is(r5)); |
| 435 DCHECK(receiver.is(r1)); | 447 DCHECK(receiver.is(r4)); |
| 436 | 448 |
| 437 Isolate* isolate = masm->isolate(); | 449 Isolate* isolate = masm->isolate(); |
| 438 | 450 |
| 439 // Check that the key is a smi. | 451 // Check that the key is a smi. |
| 440 __ JumpIfNotSmi(key, &check_name); | 452 __ JumpIfNotSmi(key, &check_name); |
| 441 __ bind(&index_smi); | 453 __ bind(&index_smi); |
| 442 // Now the key is known to be a smi. This place is also jumped to from below | 454 // Now the key is known to be a smi. This place is also jumped to from below |
| 443 // where a numeric string is converted to a smi. | 455 // where a numeric string is converted to a smi. |
| 444 | 456 |
| 445 GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3, | 457 GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6, |
| 446 Map::kHasIndexedInterceptor, &slow); | 458 Map::kHasIndexedInterceptor, &slow); |
| 447 | 459 |
| 448 // Check the receiver's map to see if it has fast elements. | 460 // Check the receiver's map to see if it has fast elements. |
| 449 __ CheckFastElements(r0, r3, &check_number_dictionary); | 461 __ CheckFastElements(r3, r6, &check_number_dictionary); |
| 450 | 462 |
| 451 GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, NULL, &slow); | 463 GenerateFastArrayLoad(masm, receiver, key, r3, r6, r7, r3, NULL, &slow); |
| 452 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r4, r3); | 464 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r7, r6); |
| 453 __ Ret(); | 465 __ Ret(); |
| 454 | 466 |
| 455 __ bind(&check_number_dictionary); | 467 __ bind(&check_number_dictionary); |
| 456 __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 468 __ LoadP(r7, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 457 __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset)); | 469 __ LoadP(r6, FieldMemOperand(r7, JSObject::kMapOffset)); |
| 458 | 470 |
| 459 // Check whether the elements is a number dictionary. | 471 // Check whether the elements is a number dictionary. |
| 460 // r3: elements map | 472 // r6: elements map |
| 461 // r4: elements | 473 // r7: elements |
| 462 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); | 474 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); |
| 463 __ cmp(r3, ip); | 475 __ cmp(r6, ip); |
| 464 __ b(ne, &slow); | 476 __ bne(&slow); |
| 465 __ SmiUntag(r0, key); | 477 __ SmiUntag(r3, key); |
| 466 __ LoadFromNumberDictionary(&slow, r4, key, r0, r0, r3, r5); | 478 __ LoadFromNumberDictionary(&slow, r7, key, r3, r3, r6, r8); |
| 467 __ Ret(); | 479 __ Ret(); |
| 468 | 480 |
| 469 // Slow case, key and receiver still in r2 and r1. | 481 // Slow case, key and receiver still in r3 and r4. |
| 470 __ bind(&slow); | 482 __ bind(&slow); |
| 471 __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r4, | 483 __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r7, |
| 472 r3); | 484 r6); |
| 473 GenerateRuntimeGetProperty(masm); | 485 GenerateRuntimeGetProperty(masm); |
| 474 | 486 |
| 475 __ bind(&check_name); | 487 __ bind(&check_name); |
| 476 GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow); | 488 GenerateKeyNameCheck(masm, key, r3, r6, &index_name, &slow); |
| 477 | 489 |
| 478 GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3, | 490 GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6, |
| 479 Map::kHasNamedInterceptor, &slow); | 491 Map::kHasNamedInterceptor, &slow); |
| 480 | 492 |
| 481 // If the receiver is a fast-case object, check the keyed lookup | 493 // If the receiver is a fast-case object, check the keyed lookup |
| 482 // cache. Otherwise probe the dictionary. | 494 // cache. Otherwise probe the dictionary. |
| 483 __ ldr(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 495 __ LoadP(r6, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
| 484 __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset)); | 496 __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset)); |
| 485 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); | 497 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); |
| 486 __ cmp(r4, ip); | 498 __ cmp(r7, ip); |
| 487 __ b(eq, &probe_dictionary); | 499 __ beq(&probe_dictionary); |
| 488 | 500 |
| 489 // Load the map of the receiver, compute the keyed lookup cache hash | 501 // Load the map of the receiver, compute the keyed lookup cache hash |
| 490 // based on 32 bits of the map pointer and the name hash. | 502 // based on 32 bits of the map pointer and the name hash. |
| 491 __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 503 __ LoadP(r3, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 492 __ mov(r3, Operand(r0, ASR, KeyedLookupCache::kMapHashShift)); | 504 __ srawi(r6, r3, KeyedLookupCache::kMapHashShift); |
| 493 __ ldr(r4, FieldMemOperand(key, Name::kHashFieldOffset)); | 505 __ lwz(r7, FieldMemOperand(key, Name::kHashFieldOffset)); |
| 494 __ eor(r3, r3, Operand(r4, ASR, Name::kHashShift)); | 506 __ srawi(r7, r7, Name::kHashShift); |
| 507 __ xor_(r6, r6, r7); |
| 495 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask; | 508 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask; |
| 496 __ And(r3, r3, Operand(mask)); | 509 __ mov(r7, Operand(mask)); |
| 510 __ and_(r6, r6, r7, LeaveRC); |
| 497 | 511 |
| 498 // Load the key (consisting of map and unique name) from the cache and | 512 // Load the key (consisting of map and unique name) from the cache and |
| 499 // check for match. | 513 // check for match. |
| 500 Label load_in_object_property; | 514 Label load_in_object_property; |
| 501 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; | 515 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; |
| 502 Label hit_on_nth_entry[kEntriesPerBucket]; | 516 Label hit_on_nth_entry[kEntriesPerBucket]; |
| 503 ExternalReference cache_keys = | 517 ExternalReference cache_keys = |
| 504 ExternalReference::keyed_lookup_cache_keys(isolate); | 518 ExternalReference::keyed_lookup_cache_keys(isolate); |
| 505 | 519 |
| 506 __ mov(r4, Operand(cache_keys)); | 520 __ mov(r7, Operand(cache_keys)); |
| 507 __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1)); | 521 __ mr(r0, r5); |
| 522 __ ShiftLeftImm(r5, r6, Operand(kPointerSizeLog2 + 1)); |
| 523 __ add(r7, r7, r5); |
| 524 __ mr(r5, r0); |
| 508 | 525 |
| 509 for (int i = 0; i < kEntriesPerBucket - 1; i++) { | 526 for (int i = 0; i < kEntriesPerBucket - 1; i++) { |
| 510 Label try_next_entry; | 527 Label try_next_entry; |
| 511 // Load map and move r4 to next entry. | 528 // Load map and move r7 to next entry. |
| 512 __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex)); | 529 __ LoadP(r8, MemOperand(r7)); |
| 513 __ cmp(r0, r5); | 530 __ addi(r7, r7, Operand(kPointerSize * 2)); |
| 514 __ b(ne, &try_next_entry); | 531 __ cmp(r3, r8); |
| 515 __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load name | 532 __ bne(&try_next_entry); |
| 516 __ cmp(key, r5); | 533 __ LoadP(r8, MemOperand(r7, -kPointerSize)); // Load name |
| 517 __ b(eq, &hit_on_nth_entry[i]); | 534 __ cmp(key, r8); |
| 535 __ beq(&hit_on_nth_entry[i]); |
| 518 __ bind(&try_next_entry); | 536 __ bind(&try_next_entry); |
| 519 } | 537 } |
| 520 | 538 |
| 521 // Last entry: Load map and move r4 to name. | 539 // Last entry: Load map and move r7 to name. |
| 522 __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex)); | 540 __ LoadP(r8, MemOperand(r7)); |
| 523 __ cmp(r0, r5); | 541 __ addi(r7, r7, Operand(kPointerSize)); |
| 524 __ b(ne, &slow); | 542 __ cmp(r3, r8); |
| 525 __ ldr(r5, MemOperand(r4)); | 543 __ bne(&slow); |
| 526 __ cmp(key, r5); | 544 __ LoadP(r8, MemOperand(r7)); |
| 527 __ b(ne, &slow); | 545 __ cmp(key, r8); |
| 546 __ bne(&slow); |
| 528 | 547 |
| 529 // Get field offset. | 548 // Get field offset. |
| 530 // r0 : receiver's map | 549 // r3 : receiver's map |
| 531 // r3 : lookup cache index | 550 // r6 : lookup cache index |
| 532 ExternalReference cache_field_offsets = | 551 ExternalReference cache_field_offsets = |
| 533 ExternalReference::keyed_lookup_cache_field_offsets(isolate); | 552 ExternalReference::keyed_lookup_cache_field_offsets(isolate); |
| 534 | 553 |
| 535 // Hit on nth entry. | 554 // Hit on nth entry. |
| 536 for (int i = kEntriesPerBucket - 1; i >= 0; i--) { | 555 for (int i = kEntriesPerBucket - 1; i >= 0; i--) { |
| 537 __ bind(&hit_on_nth_entry[i]); | 556 __ bind(&hit_on_nth_entry[i]); |
| 538 __ mov(r4, Operand(cache_field_offsets)); | 557 __ mov(r7, Operand(cache_field_offsets)); |
| 539 if (i != 0) { | 558 if (i != 0) { |
| 540 __ add(r3, r3, Operand(i)); | 559 __ addi(r6, r6, Operand(i)); |
| 541 } | 560 } |
| 542 __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2)); | 561 __ ShiftLeftImm(r8, r6, Operand(2)); |
| 543 __ ldrb(r6, FieldMemOperand(r0, Map::kInObjectPropertiesOffset)); | 562 __ lwzx(r8, MemOperand(r8, r7)); |
| 544 __ sub(r5, r5, r6, SetCC); | 563 __ lbz(r9, FieldMemOperand(r3, Map::kInObjectPropertiesOffset)); |
| 545 __ b(ge, &property_array_property); | 564 __ sub(r8, r8, r9); |
| 565 __ cmpi(r8, Operand::Zero()); |
| 566 __ bge(&property_array_property); |
| 546 if (i != 0) { | 567 if (i != 0) { |
| 547 __ jmp(&load_in_object_property); | 568 __ b(&load_in_object_property); |
| 548 } | 569 } |
| 549 } | 570 } |
| 550 | 571 |
| 551 // Load in-object property. | 572 // Load in-object property. |
| 552 __ bind(&load_in_object_property); | 573 __ bind(&load_in_object_property); |
| 553 __ ldrb(r6, FieldMemOperand(r0, Map::kInstanceSizeOffset)); | 574 __ lbz(r9, FieldMemOperand(r3, Map::kInstanceSizeOffset)); |
| 554 __ add(r6, r6, r5); // Index from start of object. | 575 __ add(r9, r9, r8); // Index from start of object. |
| 555 __ sub(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag. | 576 __ subi(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag. |
| 556 __ ldr(r0, MemOperand(receiver, r6, LSL, kPointerSizeLog2)); | 577 __ ShiftLeftImm(r3, r9, Operand(kPointerSizeLog2)); |
| 578 __ LoadPX(r3, MemOperand(r3, receiver)); |
| 557 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1, | 579 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1, |
| 558 r4, r3); | 580 r7, r6); |
| 559 __ Ret(); | 581 __ Ret(); |
| 560 | 582 |
| 561 // Load property array property. | 583 // Load property array property. |
| 562 __ bind(&property_array_property); | 584 __ bind(&property_array_property); |
| 563 __ ldr(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 585 __ LoadP(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
| 564 __ add(receiver, receiver, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 586 __ addi(receiver, receiver, |
| 565 __ ldr(r0, MemOperand(receiver, r5, LSL, kPointerSizeLog2)); | 587 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 588 __ ShiftLeftImm(r3, r8, Operand(kPointerSizeLog2)); |
| 589 __ LoadPX(r3, MemOperand(r3, receiver)); |
| 566 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1, | 590 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1, |
| 567 r4, r3); | 591 r7, r6); |
| 568 __ Ret(); | 592 __ Ret(); |
| 569 | 593 |
| 570 // Do a quick inline probe of the receiver's dictionary, if it | 594 // Do a quick inline probe of the receiver's dictionary, if it |
| 571 // exists. | 595 // exists. |
| 572 __ bind(&probe_dictionary); | 596 __ bind(&probe_dictionary); |
| 573 // r3: elements | 597 // r6: elements |
| 574 __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 598 __ LoadP(r3, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 575 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); | 599 __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset)); |
| 576 GenerateGlobalInstanceTypeCheck(masm, r0, &slow); | 600 GenerateGlobalInstanceTypeCheck(masm, r3, &slow); |
| 577 // Load the property to r0. | 601 // Load the property to r3. |
| 578 GenerateDictionaryLoad(masm, &slow, r3, key, r0, r5, r4); | 602 GenerateDictionaryLoad(masm, &slow, r6, key, r3, r8, r7); |
| 579 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, r4, | 603 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, r7, |
| 580 r3); | 604 r6); |
| 581 __ Ret(); | 605 __ Ret(); |
| 582 | 606 |
| 583 __ bind(&index_name); | 607 __ bind(&index_name); |
| 584 __ IndexFromHash(r3, key); | 608 __ IndexFromHash(r6, key); |
| 585 // Now jump to the place where smi keys are handled. | 609 // Now jump to the place where smi keys are handled. |
| 586 __ jmp(&index_smi); | 610 __ b(&index_smi); |
| 587 } | 611 } |
| 588 | 612 |
| 589 | 613 |
| 590 void KeyedLoadIC::GenerateString(MacroAssembler* masm) { | 614 void KeyedLoadIC::GenerateString(MacroAssembler* masm) { |
| 591 // Return address is in lr. | 615 // Return address is in lr. |
| 592 Label miss; | 616 Label miss; |
| 593 | 617 |
| 594 Register receiver = LoadDescriptor::ReceiverRegister(); | 618 Register receiver = LoadDescriptor::ReceiverRegister(); |
| 595 Register index = LoadDescriptor::NameRegister(); | 619 Register index = LoadDescriptor::NameRegister(); |
| 596 Register scratch = r3; | 620 Register scratch = r6; |
| 597 Register result = r0; | 621 Register result = r3; |
| 598 DCHECK(!scratch.is(receiver) && !scratch.is(index)); | 622 DCHECK(!scratch.is(receiver) && !scratch.is(index)); |
| 599 | 623 |
| 600 StringCharAtGenerator char_at_generator(receiver, index, scratch, result, | 624 StringCharAtGenerator char_at_generator(receiver, index, scratch, result, |
| 601 &miss, // When not a string. | 625 &miss, // When not a string. |
| 602 &miss, // When not a number. | 626 &miss, // When not a number. |
| 603 &miss, // When index out of range. | 627 &miss, // When index out of range. |
| 604 STRING_INDEX_IS_ARRAY_INDEX); | 628 STRING_INDEX_IS_ARRAY_INDEX); |
| 605 char_at_generator.GenerateFast(masm); | 629 char_at_generator.GenerateFast(masm); |
| 606 __ Ret(); | 630 __ Ret(); |
| 607 | 631 |
| (...skipping 20 matching lines...) Expand all Loading... |
| 628 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow, | 652 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow, |
| 629 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length, | 653 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length, |
| 630 Register value, Register key, Register receiver, Register receiver_map, | 654 Register value, Register key, Register receiver, Register receiver_map, |
| 631 Register elements_map, Register elements) { | 655 Register elements_map, Register elements) { |
| 632 Label transition_smi_elements; | 656 Label transition_smi_elements; |
| 633 Label finish_object_store, non_double_value, transition_double_elements; | 657 Label finish_object_store, non_double_value, transition_double_elements; |
| 634 Label fast_double_without_map_check; | 658 Label fast_double_without_map_check; |
| 635 | 659 |
| 636 // Fast case: Do the store, could be either Object or double. | 660 // Fast case: Do the store, could be either Object or double. |
| 637 __ bind(fast_object); | 661 __ bind(fast_object); |
| 638 Register scratch_value = r4; | 662 Register scratch_value = r7; |
| 639 Register address = r5; | 663 Register address = r8; |
| 640 if (check_map == kCheckMap) { | 664 if (check_map == kCheckMap) { |
| 641 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | 665 __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); |
| 642 __ cmp(elements_map, | 666 __ mov(scratch_value, |
| 643 Operand(masm->isolate()->factory()->fixed_array_map())); | 667 Operand(masm->isolate()->factory()->fixed_array_map())); |
| 644 __ b(ne, fast_double); | 668 __ cmp(elements_map, scratch_value); |
| 669 __ bne(fast_double); |
| 645 } | 670 } |
| 646 | 671 |
| 647 // HOLECHECK: guards "A[i] = V" | 672 // HOLECHECK: guards "A[i] = V" |
| 648 // We have to go to the runtime if the current value is the hole because | 673 // We have to go to the runtime if the current value is the hole because |
| 649 // there may be a callback on the element | 674 // there may be a callback on the element |
| 650 Label holecheck_passed1; | 675 Label holecheck_passed1; |
| 651 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 676 __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 652 __ ldr(scratch_value, | 677 __ SmiToPtrArrayOffset(scratch_value, key); |
| 653 MemOperand::PointerAddressFromSmiKey(address, key, PreIndex)); | 678 __ LoadPX(scratch_value, MemOperand(address, scratch_value)); |
| 654 __ cmp(scratch_value, Operand(masm->isolate()->factory()->the_hole_value())); | 679 __ Cmpi(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()), |
| 655 __ b(ne, &holecheck_passed1); | 680 r0); |
| 681 __ bne(&holecheck_passed1); |
| 656 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, | 682 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, |
| 657 slow); | 683 slow); |
| 658 | 684 |
| 659 __ bind(&holecheck_passed1); | 685 __ bind(&holecheck_passed1); |
| 660 | 686 |
| 661 // Smi stores don't require further checks. | 687 // Smi stores don't require further checks. |
| 662 Label non_smi_value; | 688 Label non_smi_value; |
| 663 __ JumpIfNotSmi(value, &non_smi_value); | 689 __ JumpIfNotSmi(value, &non_smi_value); |
| 664 | 690 |
| 665 if (increment_length == kIncrementLength) { | 691 if (increment_length == kIncrementLength) { |
| 666 // Add 1 to receiver->length. | 692 // Add 1 to receiver->length. |
| 667 __ add(scratch_value, key, Operand(Smi::FromInt(1))); | 693 __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0); |
| 668 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 694 __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset), |
| 695 r0); |
| 669 } | 696 } |
| 670 // It's irrelevant whether array is smi-only or not when writing a smi. | 697 // It's irrelevant whether array is smi-only or not when writing a smi. |
| 671 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 698 __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 672 __ str(value, MemOperand::PointerAddressFromSmiKey(address, key)); | 699 __ SmiToPtrArrayOffset(scratch_value, key); |
| 700 __ StorePX(value, MemOperand(address, scratch_value)); |
| 673 __ Ret(); | 701 __ Ret(); |
| 674 | 702 |
| 675 __ bind(&non_smi_value); | 703 __ bind(&non_smi_value); |
| 676 // Escape to elements kind transition case. | 704 // Escape to elements kind transition case. |
| 677 __ CheckFastObjectElements(receiver_map, scratch_value, | 705 __ CheckFastObjectElements(receiver_map, scratch_value, |
| 678 &transition_smi_elements); | 706 &transition_smi_elements); |
| 679 | 707 |
| 680 // Fast elements array, store the value to the elements backing store. | 708 // Fast elements array, store the value to the elements backing store. |
| 681 __ bind(&finish_object_store); | 709 __ bind(&finish_object_store); |
| 682 if (increment_length == kIncrementLength) { | 710 if (increment_length == kIncrementLength) { |
| 683 // Add 1 to receiver->length. | 711 // Add 1 to receiver->length. |
| 684 __ add(scratch_value, key, Operand(Smi::FromInt(1))); | 712 __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0); |
| 685 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 713 __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset), |
| 714 r0); |
| 686 } | 715 } |
| 687 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 716 __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 688 __ add(address, address, Operand::PointerOffsetFromSmiKey(key)); | 717 __ SmiToPtrArrayOffset(scratch_value, key); |
| 689 __ str(value, MemOperand(address)); | 718 __ StorePUX(value, MemOperand(address, scratch_value)); |
| 690 // Update write barrier for the elements array address. | 719 // Update write barrier for the elements array address. |
| 691 __ mov(scratch_value, value); // Preserve the value which is returned. | 720 __ mr(scratch_value, value); // Preserve the value which is returned. |
| 692 __ RecordWrite(elements, address, scratch_value, kLRHasNotBeenSaved, | 721 __ RecordWrite(elements, address, scratch_value, kLRHasNotBeenSaved, |
| 693 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); | 722 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |
| 694 __ Ret(); | 723 __ Ret(); |
| 695 | 724 |
| 696 __ bind(fast_double); | 725 __ bind(fast_double); |
| 697 if (check_map == kCheckMap) { | 726 if (check_map == kCheckMap) { |
| 698 // Check for fast double array case. If this fails, call through to the | 727 // Check for fast double array case. If this fails, call through to the |
| 699 // runtime. | 728 // runtime. |
| 700 __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex); | 729 __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex); |
| 701 __ b(ne, slow); | 730 __ bne(slow); |
| 702 } | 731 } |
| 703 | 732 |
| 704 // HOLECHECK: guards "A[i] double hole?" | 733 // HOLECHECK: guards "A[i] double hole?" |
| 705 // We have to see if the double version of the hole is present. If so | 734 // We have to see if the double version of the hole is present. If so |
| 706 // go to the runtime. | 735 // go to the runtime. |
| 707 __ add(address, elements, | 736 __ addi(address, elements, |
| 708 Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)) - | 737 Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset - |
| 709 kHeapObjectTag)); | 738 kHeapObjectTag))); |
| 710 __ ldr(scratch_value, | 739 __ SmiToDoubleArrayOffset(scratch_value, key); |
| 711 MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex)); | 740 __ lwzx(scratch_value, MemOperand(address, scratch_value)); |
| 712 __ cmp(scratch_value, Operand(kHoleNanUpper32)); | 741 __ Cmpi(scratch_value, Operand(kHoleNanUpper32), r0); |
| 713 __ b(ne, &fast_double_without_map_check); | 742 __ bne(&fast_double_without_map_check); |
| 714 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, | 743 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, |
| 715 slow); | 744 slow); |
| 716 | 745 |
| 717 __ bind(&fast_double_without_map_check); | 746 __ bind(&fast_double_without_map_check); |
| 718 __ StoreNumberToDoubleElements(value, key, elements, r3, d0, | 747 __ StoreNumberToDoubleElements(value, key, elements, r6, d0, |
| 719 &transition_double_elements); | 748 &transition_double_elements); |
| 720 if (increment_length == kIncrementLength) { | 749 if (increment_length == kIncrementLength) { |
| 721 // Add 1 to receiver->length. | 750 // Add 1 to receiver->length. |
| 722 __ add(scratch_value, key, Operand(Smi::FromInt(1))); | 751 __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0); |
| 723 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 752 __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset), |
| 753 r0); |
| 724 } | 754 } |
| 725 __ Ret(); | 755 __ Ret(); |
| 726 | 756 |
| 727 __ bind(&transition_smi_elements); | 757 __ bind(&transition_smi_elements); |
| 728 // Transition the array appropriately depending on the value type. | 758 // Transition the array appropriately depending on the value type. |
| 729 __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset)); | 759 __ LoadP(r7, FieldMemOperand(value, HeapObject::kMapOffset)); |
| 730 __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex); | 760 __ CompareRoot(r7, Heap::kHeapNumberMapRootIndex); |
| 731 __ b(ne, &non_double_value); | 761 __ bne(&non_double_value); |
| 732 | 762 |
| 733 // Value is a double. Transition FAST_SMI_ELEMENTS -> | 763 // Value is a double. Transition FAST_SMI_ELEMENTS -> |
| 734 // FAST_DOUBLE_ELEMENTS and complete the store. | 764 // FAST_DOUBLE_ELEMENTS and complete the store. |
| 735 __ LoadTransitionedArrayMapConditional( | 765 __ LoadTransitionedArrayMapConditional( |
| 736 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r4, slow); | 766 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r7, slow); |
| 737 AllocationSiteMode mode = | 767 AllocationSiteMode mode = |
| 738 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); | 768 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); |
| 739 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value, | 769 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value, |
| 740 receiver_map, mode, slow); | 770 receiver_map, mode, slow); |
| 741 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 771 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 742 __ jmp(&fast_double_without_map_check); | 772 __ b(&fast_double_without_map_check); |
| 743 | 773 |
| 744 __ bind(&non_double_value); | 774 __ bind(&non_double_value); |
| 745 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS | 775 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS |
| 746 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, | 776 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, |
| 747 receiver_map, r4, slow); | 777 receiver_map, r7, slow); |
| 748 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); | 778 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); |
| 749 ElementsTransitionGenerator::GenerateMapChangeElementsTransition( | 779 ElementsTransitionGenerator::GenerateMapChangeElementsTransition( |
| 750 masm, receiver, key, value, receiver_map, mode, slow); | 780 masm, receiver, key, value, receiver_map, mode, slow); |
| 751 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 781 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 752 __ jmp(&finish_object_store); | 782 __ b(&finish_object_store); |
| 753 | 783 |
| 754 __ bind(&transition_double_elements); | 784 __ bind(&transition_double_elements); |
| 755 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a | 785 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a |
| 756 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and | 786 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and |
| 757 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS | 787 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS |
| 758 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS, | 788 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS, |
| 759 receiver_map, r4, slow); | 789 receiver_map, r7, slow); |
| 760 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); | 790 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); |
| 761 ElementsTransitionGenerator::GenerateDoubleToObject( | 791 ElementsTransitionGenerator::GenerateDoubleToObject( |
| 762 masm, receiver, key, value, receiver_map, mode, slow); | 792 masm, receiver, key, value, receiver_map, mode, slow); |
| 763 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 793 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 764 __ jmp(&finish_object_store); | 794 __ b(&finish_object_store); |
| 765 } | 795 } |
| 766 | 796 |
| 767 | 797 |
| 768 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, | 798 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, |
| 769 StrictMode strict_mode) { | 799 StrictMode strict_mode) { |
| 770 // ---------- S t a t e -------------- | 800 // ---------- S t a t e -------------- |
| 771 // -- r0 : value | 801 // -- r3 : value |
| 772 // -- r1 : key | 802 // -- r4 : key |
| 773 // -- r2 : receiver | 803 // -- r5 : receiver |
| 774 // -- lr : return address | 804 // -- lr : return address |
| 775 // ----------------------------------- | 805 // ----------------------------------- |
| 776 Label slow, fast_object, fast_object_grow; | 806 Label slow, fast_object, fast_object_grow; |
| 777 Label fast_double, fast_double_grow; | 807 Label fast_double, fast_double_grow; |
| 778 Label array, extra, check_if_double_array; | 808 Label array, extra, check_if_double_array; |
| 779 | 809 |
| 780 // Register usage. | 810 // Register usage. |
| 781 Register value = StoreDescriptor::ValueRegister(); | 811 Register value = StoreDescriptor::ValueRegister(); |
| 782 Register key = StoreDescriptor::NameRegister(); | 812 Register key = StoreDescriptor::NameRegister(); |
| 783 Register receiver = StoreDescriptor::ReceiverRegister(); | 813 Register receiver = StoreDescriptor::ReceiverRegister(); |
| 784 DCHECK(receiver.is(r1)); | 814 DCHECK(receiver.is(r4)); |
| 785 DCHECK(key.is(r2)); | 815 DCHECK(key.is(r5)); |
| 786 DCHECK(value.is(r0)); | 816 DCHECK(value.is(r3)); |
| 787 Register receiver_map = r3; | 817 Register receiver_map = r6; |
| 788 Register elements_map = r6; | 818 Register elements_map = r9; |
| 789 Register elements = r9; // Elements array of the receiver. | 819 Register elements = r10; // Elements array of the receiver. |
| 790 // r4 and r5 are used as general scratch registers. | 820 // r7 and r8 are used as general scratch registers. |
| 791 | 821 |
| 792 // Check that the key is a smi. | 822 // Check that the key is a smi. |
| 793 __ JumpIfNotSmi(key, &slow); | 823 __ JumpIfNotSmi(key, &slow); |
| 794 // Check that the object isn't a smi. | 824 // Check that the object isn't a smi. |
| 795 __ JumpIfSmi(receiver, &slow); | 825 __ JumpIfSmi(receiver, &slow); |
| 796 // Get the map of the object. | 826 // Get the map of the object. |
| 797 __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 827 __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 798 // Check that the receiver does not require access checks and is not observed. | 828 // Check that the receiver does not require access checks and is not observed. |
| 799 // The generic stub does not perform map checks or handle observed objects. | 829 // The generic stub does not perform map checks or handle observed objects. |
| 800 __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); | 830 __ lbz(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); |
| 801 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved)); | 831 __ andi(r0, ip, |
| 802 __ b(ne, &slow); | 832 Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved)); |
| 833 __ bne(&slow, cr0); |
| 803 // Check if the object is a JS array or not. | 834 // Check if the object is a JS array or not. |
| 804 __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset)); | 835 __ lbz(r7, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset)); |
| 805 __ cmp(r4, Operand(JS_ARRAY_TYPE)); | 836 __ cmpi(r7, Operand(JS_ARRAY_TYPE)); |
| 806 __ b(eq, &array); | 837 __ beq(&array); |
| 807 // Check that the object is some kind of JSObject. | 838 // Check that the object is some kind of JSObject. |
| 808 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); | 839 __ cmpi(r7, Operand(FIRST_JS_OBJECT_TYPE)); |
| 809 __ b(lt, &slow); | 840 __ blt(&slow); |
| 810 | 841 |
| 811 // Object case: Check key against length in the elements array. | 842 // Object case: Check key against length in the elements array. |
| 812 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 843 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 813 // Check array bounds. Both the key and the length of FixedArray are smis. | 844 // Check array bounds. Both the key and the length of FixedArray are smis. |
| 814 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); | 845 __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 815 __ cmp(key, Operand(ip)); | 846 __ cmpl(key, ip); |
| 816 __ b(lo, &fast_object); | 847 __ blt(&fast_object); |
| 817 | 848 |
| 818 // Slow case, handle jump to runtime. | 849 // Slow case, handle jump to runtime. |
| 819 __ bind(&slow); | 850 __ bind(&slow); |
| 820 // Entry registers are intact. | 851 // Entry registers are intact. |
| 821 // r0: value. | 852 // r3: value. |
| 822 // r1: key. | 853 // r4: key. |
| 823 // r2: receiver. | 854 // r5: receiver. |
| 824 PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode); | 855 PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode); |
| 825 | 856 |
| 826 // Extra capacity case: Check if there is extra capacity to | 857 // Extra capacity case: Check if there is extra capacity to |
| 827 // perform the store and update the length. Used for adding one | 858 // perform the store and update the length. Used for adding one |
| 828 // element to the array by writing to array[array.length]. | 859 // element to the array by writing to array[array.length]. |
| 829 __ bind(&extra); | 860 __ bind(&extra); |
| 830 // Condition code from comparing key and array length is still available. | 861 // Condition code from comparing key and array length is still available. |
| 831 __ b(ne, &slow); // Only support writing to writing to array[array.length]. | 862 __ bne(&slow); // Only support writing to writing to array[array.length]. |
| 832 // Check for room in the elements backing store. | 863 // Check for room in the elements backing store. |
| 833 // Both the key and the length of FixedArray are smis. | 864 // Both the key and the length of FixedArray are smis. |
| 834 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); | 865 __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 835 __ cmp(key, Operand(ip)); | 866 __ cmpl(key, ip); |
| 836 __ b(hs, &slow); | 867 __ bge(&slow); |
| 837 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | 868 __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); |
| 838 __ cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map())); | 869 __ mov(ip, Operand(masm->isolate()->factory()->fixed_array_map())); |
| 839 __ b(ne, &check_if_double_array); | 870 __ cmp(elements_map, ip); // PPC - I think I can re-use ip here |
| 840 __ jmp(&fast_object_grow); | 871 __ bne(&check_if_double_array); |
| 872 __ b(&fast_object_grow); |
| 841 | 873 |
| 842 __ bind(&check_if_double_array); | 874 __ bind(&check_if_double_array); |
| 843 __ cmp(elements_map, | 875 __ mov(ip, Operand(masm->isolate()->factory()->fixed_double_array_map())); |
| 844 Operand(masm->isolate()->factory()->fixed_double_array_map())); | 876 __ cmp(elements_map, ip); // PPC - another ip re-use |
| 845 __ b(ne, &slow); | 877 __ bne(&slow); |
| 846 __ jmp(&fast_double_grow); | 878 __ b(&fast_double_grow); |
| 847 | 879 |
| 848 // Array case: Get the length and the elements array from the JS | 880 // Array case: Get the length and the elements array from the JS |
| 849 // array. Check that the array is in fast mode (and writable); if it | 881 // array. Check that the array is in fast mode (and writable); if it |
| 850 // is the length is always a smi. | 882 // is the length is always a smi. |
| 851 __ bind(&array); | 883 __ bind(&array); |
| 852 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 884 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 853 | 885 |
| 854 // Check the key against the length in the array. | 886 // Check the key against the length in the array. |
| 855 __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 887 __ LoadP(ip, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 856 __ cmp(key, Operand(ip)); | 888 __ cmpl(key, ip); |
| 857 __ b(hs, &extra); | 889 __ bge(&extra); |
| 858 | 890 |
| 859 KeyedStoreGenerateGenericHelper( | 891 KeyedStoreGenerateGenericHelper( |
| 860 masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength, | 892 masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength, |
| 861 value, key, receiver, receiver_map, elements_map, elements); | 893 value, key, receiver, receiver_map, elements_map, elements); |
| 862 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow, | 894 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow, |
| 863 &slow, kDontCheckMap, kIncrementLength, value, | 895 &slow, kDontCheckMap, kIncrementLength, value, |
| 864 key, receiver, receiver_map, elements_map, | 896 key, receiver, receiver_map, elements_map, |
| 865 elements); | 897 elements); |
| 866 } | 898 } |
| 867 | 899 |
| 868 | 900 |
| 869 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { | 901 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { |
| 870 Register receiver = StoreDescriptor::ReceiverRegister(); | 902 Register receiver = StoreDescriptor::ReceiverRegister(); |
| 871 Register name = StoreDescriptor::NameRegister(); | 903 Register name = StoreDescriptor::NameRegister(); |
| 872 DCHECK(receiver.is(r1)); | 904 DCHECK(receiver.is(r4)); |
| 873 DCHECK(name.is(r2)); | 905 DCHECK(name.is(r5)); |
| 874 DCHECK(StoreDescriptor::ValueRegister().is(r0)); | 906 DCHECK(StoreDescriptor::ValueRegister().is(r3)); |
| 875 | 907 |
| 876 // Get the receiver from the stack and probe the stub cache. | 908 // Get the receiver from the stack and probe the stub cache. |
| 877 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( | 909 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( |
| 878 Code::ComputeHandlerFlags(Code::STORE_IC)); | 910 Code::ComputeHandlerFlags(Code::STORE_IC)); |
| 879 | 911 |
| 880 masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver, | 912 masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver, |
| 881 name, r3, r4, r5, r6); | 913 name, r6, r7, r8, r9); |
| 882 | 914 |
| 883 // Cache miss: Jump to runtime. | 915 // Cache miss: Jump to runtime. |
| 884 GenerateMiss(masm); | 916 GenerateMiss(masm); |
| 885 } | 917 } |
| 886 | 918 |
| 887 | 919 |
| 888 void StoreIC::GenerateMiss(MacroAssembler* masm) { | 920 void StoreIC::GenerateMiss(MacroAssembler* masm) { |
| 889 __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), | 921 __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), |
| 890 StoreDescriptor::ValueRegister()); | 922 StoreDescriptor::ValueRegister()); |
| 891 | 923 |
| 892 // Perform tail call to the entry. | 924 // Perform tail call to the entry. |
| 893 ExternalReference ref = | 925 ExternalReference ref = |
| 894 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate()); | 926 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate()); |
| 895 __ TailCallExternalReference(ref, 3, 1); | 927 __ TailCallExternalReference(ref, 3, 1); |
| 896 } | 928 } |
| 897 | 929 |
| 898 | 930 |
| 899 void StoreIC::GenerateNormal(MacroAssembler* masm) { | 931 void StoreIC::GenerateNormal(MacroAssembler* masm) { |
| 900 Label miss; | 932 Label miss; |
| 901 Register receiver = StoreDescriptor::ReceiverRegister(); | 933 Register receiver = StoreDescriptor::ReceiverRegister(); |
| 902 Register name = StoreDescriptor::NameRegister(); | 934 Register name = StoreDescriptor::NameRegister(); |
| 903 Register value = StoreDescriptor::ValueRegister(); | 935 Register value = StoreDescriptor::ValueRegister(); |
| 904 Register dictionary = r3; | 936 Register dictionary = r6; |
| 905 DCHECK(receiver.is(r1)); | 937 DCHECK(receiver.is(r4)); |
| 906 DCHECK(name.is(r2)); | 938 DCHECK(name.is(r5)); |
| 907 DCHECK(value.is(r0)); | 939 DCHECK(value.is(r3)); |
| 908 | 940 |
| 909 __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 941 __ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
| 910 | 942 |
| 911 GenerateDictionaryStore(masm, &miss, dictionary, name, value, r4, r5); | 943 GenerateDictionaryStore(masm, &miss, dictionary, name, value, r7, r8); |
| 912 Counters* counters = masm->isolate()->counters(); | 944 Counters* counters = masm->isolate()->counters(); |
| 913 __ IncrementCounter(counters->store_normal_hit(), 1, r4, r5); | 945 __ IncrementCounter(counters->store_normal_hit(), 1, r7, r8); |
| 914 __ Ret(); | 946 __ Ret(); |
| 915 | 947 |
| 916 __ bind(&miss); | 948 __ bind(&miss); |
| 917 __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5); | 949 __ IncrementCounter(counters->store_normal_miss(), 1, r7, r8); |
| 918 GenerateMiss(masm); | 950 GenerateMiss(masm); |
| 919 } | 951 } |
| 920 | 952 |
| 921 | 953 |
| 922 #undef __ | 954 #undef __ |
| 923 | 955 |
| 924 | 956 |
| 925 Condition CompareIC::ComputeCondition(Token::Value op) { | 957 Condition CompareIC::ComputeCondition(Token::Value op) { |
| 926 switch (op) { | 958 switch (op) { |
| 927 case Token::EQ_STRICT: | 959 case Token::EQ_STRICT: |
| (...skipping 19 matching lines...) Expand all Loading... |
| 947 Address cmp_instruction_address = | 979 Address cmp_instruction_address = |
| 948 Assembler::return_address_from_call_start(address); | 980 Assembler::return_address_from_call_start(address); |
| 949 | 981 |
| 950 // If the instruction following the call is not a cmp rx, #yyy, nothing | 982 // If the instruction following the call is not a cmp rx, #yyy, nothing |
| 951 // was inlined. | 983 // was inlined. |
| 952 Instr instr = Assembler::instr_at(cmp_instruction_address); | 984 Instr instr = Assembler::instr_at(cmp_instruction_address); |
| 953 return Assembler::IsCmpImmediate(instr); | 985 return Assembler::IsCmpImmediate(instr); |
| 954 } | 986 } |
| 955 | 987 |
| 956 | 988 |
| 989 // |
| 990 // This code is paired with the JumpPatchSite class in full-codegen-ppc.cc |
| 991 // |
| 957 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { | 992 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { |
| 958 Address cmp_instruction_address = | 993 Address cmp_instruction_address = |
| 959 Assembler::return_address_from_call_start(address); | 994 Assembler::return_address_from_call_start(address); |
| 960 | 995 |
| 961 // If the instruction following the call is not a cmp rx, #yyy, nothing | 996 // If the instruction following the call is not a cmp rx, #yyy, nothing |
| 962 // was inlined. | 997 // was inlined. |
| 963 Instr instr = Assembler::instr_at(cmp_instruction_address); | 998 Instr instr = Assembler::instr_at(cmp_instruction_address); |
| 964 if (!Assembler::IsCmpImmediate(instr)) { | 999 if (!Assembler::IsCmpImmediate(instr)) { |
| 965 return; | 1000 return; |
| 966 } | 1001 } |
| 967 | 1002 |
| 968 // The delta to the start of the map check instruction and the | 1003 // The delta to the start of the map check instruction and the |
| 969 // condition code uses at the patched jump. | 1004 // condition code uses at the patched jump. |
| 970 int delta = Assembler::GetCmpImmediateRawImmediate(instr); | 1005 int delta = Assembler::GetCmpImmediateRawImmediate(instr); |
| 971 delta += Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask; | 1006 delta += Assembler::GetCmpImmediateRegister(instr).code() * kOff16Mask; |
| 972 // If the delta is 0 the instruction is cmp r0, #0 which also signals that | 1007 // If the delta is 0 the instruction is cmp r0, #0 which also signals that |
| 973 // nothing was inlined. | 1008 // nothing was inlined. |
| 974 if (delta == 0) { | 1009 if (delta == 0) { |
| 975 return; | 1010 return; |
| 976 } | 1011 } |
| 977 | 1012 |
| 978 if (FLAG_trace_ic) { | 1013 if (FLAG_trace_ic) { |
| 979 PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", address, | 1014 PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", address, |
| 980 cmp_instruction_address, delta); | 1015 cmp_instruction_address, delta); |
| 981 } | 1016 } |
| 982 | 1017 |
| 983 Address patch_address = | 1018 Address patch_address = |
| 984 cmp_instruction_address - delta * Instruction::kInstrSize; | 1019 cmp_instruction_address - delta * Instruction::kInstrSize; |
| 985 Instr instr_at_patch = Assembler::instr_at(patch_address); | 1020 Instr instr_at_patch = Assembler::instr_at(patch_address); |
| 986 Instr branch_instr = | 1021 Instr branch_instr = |
| 987 Assembler::instr_at(patch_address + Instruction::kInstrSize); | 1022 Assembler::instr_at(patch_address + Instruction::kInstrSize); |
| 988 // This is patching a conditional "jump if not smi/jump if smi" site. | 1023 // This is patching a conditional "jump if not smi/jump if smi" site. |
| 989 // Enabling by changing from | 1024 // Enabling by changing from |
| 990 // cmp rx, rx | 1025 // cmp cr0, rx, rx |
| 991 // b eq/ne, <target> | |
| 992 // to | 1026 // to |
| 993 // tst rx, #kSmiTagMask | 1027 // rlwinm(r0, value, 0, 31, 31, SetRC); |
| 994 // b ne/eq, <target> | 1028 // bc(label, BT/BF, 2) |
| 995 // and vice-versa to be disabled again. | 1029 // and vice-versa to be disabled again. |
| 996 CodePatcher patcher(patch_address, 2); | 1030 CodePatcher patcher(patch_address, 2); |
| 997 Register reg = Assembler::GetRn(instr_at_patch); | 1031 Register reg = Assembler::GetRA(instr_at_patch); |
| 998 if (check == ENABLE_INLINED_SMI_CHECK) { | 1032 if (check == ENABLE_INLINED_SMI_CHECK) { |
| 999 DCHECK(Assembler::IsCmpRegister(instr_at_patch)); | 1033 DCHECK(Assembler::IsCmpRegister(instr_at_patch)); |
| 1000 DCHECK_EQ(Assembler::GetRn(instr_at_patch).code(), | 1034 DCHECK_EQ(Assembler::GetRA(instr_at_patch).code(), |
| 1001 Assembler::GetRm(instr_at_patch).code()); | 1035 Assembler::GetRB(instr_at_patch).code()); |
| 1002 patcher.masm()->tst(reg, Operand(kSmiTagMask)); | 1036 patcher.masm()->TestIfSmi(reg, r0); |
| 1003 } else { | 1037 } else { |
| 1004 DCHECK(check == DISABLE_INLINED_SMI_CHECK); | 1038 DCHECK(check == DISABLE_INLINED_SMI_CHECK); |
| 1005 DCHECK(Assembler::IsTstImmediate(instr_at_patch)); | 1039 #if V8_TARGET_ARCH_PPC64 |
| 1006 patcher.masm()->cmp(reg, reg); | 1040 DCHECK(Assembler::IsRldicl(instr_at_patch)); |
| 1041 #else |
| 1042 DCHECK(Assembler::IsRlwinm(instr_at_patch)); |
| 1043 #endif |
| 1044 patcher.masm()->cmp(reg, reg, cr0); |
| 1007 } | 1045 } |
| 1008 DCHECK(Assembler::IsBranch(branch_instr)); | 1046 DCHECK(Assembler::IsBranch(branch_instr)); |
| 1047 |
| 1048 // Invert the logic of the branch |
| 1009 if (Assembler::GetCondition(branch_instr) == eq) { | 1049 if (Assembler::GetCondition(branch_instr) == eq) { |
| 1010 patcher.EmitCondition(ne); | 1050 patcher.EmitCondition(ne); |
| 1011 } else { | 1051 } else { |
| 1012 DCHECK(Assembler::GetCondition(branch_instr) == ne); | 1052 DCHECK(Assembler::GetCondition(branch_instr) == ne); |
| 1013 patcher.EmitCondition(eq); | 1053 patcher.EmitCondition(eq); |
| 1014 } | 1054 } |
| 1015 } | 1055 } |
| 1016 } | 1056 } |
| 1017 } // namespace v8::internal | 1057 } // namespace v8::internal |
| 1018 | 1058 |
| 1019 #endif // V8_TARGET_ARCH_ARM | 1059 #endif // V8_TARGET_ARCH_PPC |
| OLD | NEW |