| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // |
| 3 // Copyright IBM Corp. 2012, 2013. All rights reserved. |
| 4 // |
| 2 // Use of this source code is governed by a BSD-style license that can be | 5 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 6 // found in the LICENSE file. |
| 4 | 7 |
| 5 #include "src/v8.h" | 8 #include "src/v8.h" |
| 6 | 9 |
| 7 #if V8_TARGET_ARCH_ARM | 10 #if V8_TARGET_ARCH_PPC |
| 8 | 11 |
| 9 #include "src/arm/assembler-arm.h" | 12 #include "src/ppc/assembler-ppc.h" |
| 13 |
| 10 #include "src/code-stubs.h" | 14 #include "src/code-stubs.h" |
| 11 #include "src/codegen.h" | 15 #include "src/codegen.h" |
| 12 #include "src/disasm.h" | 16 #include "src/disasm.h" |
| 13 #include "src/ic-inl.h" | 17 #include "src/ic-inl.h" |
| 14 #include "src/runtime.h" | 18 #include "src/runtime.h" |
| 15 #include "src/stub-cache.h" | 19 #include "src/stub-cache.h" |
| 16 | 20 |
| 17 namespace v8 { | 21 namespace v8 { |
| 18 namespace internal { | 22 namespace internal { |
| 19 | 23 |
| 20 | 24 |
| 21 // ---------------------------------------------------------------------------- | 25 // ---------------------------------------------------------------------------- |
| 22 // Static IC stub generators. | 26 // Static IC stub generators. |
| 23 // | 27 // |
| 24 | 28 |
| 25 #define __ ACCESS_MASM(masm) | 29 #define __ ACCESS_MASM(masm) |
| 26 | 30 |
| 27 | 31 |
| 28 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, | 32 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type, |
| 29 Register type, | |
| 30 Label* global_object) { | 33 Label* global_object) { |
| 31 // Register usage: | 34 // Register usage: |
| 32 // type: holds the receiver instance type on entry. | 35 // type: holds the receiver instance type on entry. |
| 33 __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE)); | 36 __ cmpi(type, Operand(JS_GLOBAL_OBJECT_TYPE)); |
| 34 __ b(eq, global_object); | 37 __ beq(global_object); |
| 35 __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE)); | 38 __ cmpi(type, Operand(JS_BUILTINS_OBJECT_TYPE)); |
| 36 __ b(eq, global_object); | 39 __ beq(global_object); |
| 37 __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE)); | 40 __ cmpi(type, Operand(JS_GLOBAL_PROXY_TYPE)); |
| 38 __ b(eq, global_object); | 41 __ beq(global_object); |
| 39 } | 42 } |
| 40 | 43 |
| 41 | 44 |
| 42 // Helper function used from LoadIC GenerateNormal. | 45 // Helper function used from LoadIC GenerateNormal. |
| 43 // | 46 // |
| 44 // elements: Property dictionary. It is not clobbered if a jump to the miss | 47 // elements: Property dictionary. It is not clobbered if a jump to the miss |
| 45 // label is done. | 48 // label is done. |
| 46 // name: Property name. It is not clobbered if a jump to the miss label is | 49 // name: Property name. It is not clobbered if a jump to the miss label is |
| 47 // done | 50 // done |
| 48 // result: Register for the result. It is only updated if a jump to the miss | 51 // result: Register for the result. It is only updated if a jump to the miss |
| 49 // label is not done. Can be the same as elements or name clobbering | 52 // label is not done. Can be the same as elements or name clobbering |
| 50 // one of these in the case of not jumping to the miss label. | 53 // one of these in the case of not jumping to the miss label. |
| 51 // The two scratch registers need to be different from elements, name and | 54 // The two scratch registers need to be different from elements, name and |
| 52 // result. | 55 // result. |
| 53 // The generated code assumes that the receiver has slow properties, | 56 // The generated code assumes that the receiver has slow properties, |
| 54 // is not a global object and does not have interceptors. | 57 // is not a global object and does not have interceptors. |
| 55 static void GenerateDictionaryLoad(MacroAssembler* masm, | 58 static void GenerateDictionaryLoad(MacroAssembler* masm, Label* miss, |
| 56 Label* miss, | 59 Register elements, Register name, |
| 57 Register elements, | 60 Register result, Register scratch1, |
| 58 Register name, | |
| 59 Register result, | |
| 60 Register scratch1, | |
| 61 Register scratch2) { | 61 Register scratch2) { |
| 62 // Main use of the scratch registers. | 62 // Main use of the scratch registers. |
| 63 // scratch1: Used as temporary and to hold the capacity of the property | 63 // scratch1: Used as temporary and to hold the capacity of the property |
| 64 // dictionary. | 64 // dictionary. |
| 65 // scratch2: Used as temporary. | 65 // scratch2: Used as temporary. |
| 66 Label done; | 66 Label done; |
| 67 | 67 |
| 68 // Probe the dictionary. | 68 // Probe the dictionary. |
| 69 NameDictionaryLookupStub::GeneratePositiveLookup(masm, | 69 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements, |
| 70 miss, | 70 name, scratch1, scratch2); |
| 71 &done, | |
| 72 elements, | |
| 73 name, | |
| 74 scratch1, | |
| 75 scratch2); | |
| 76 | 71 |
| 77 // If probing finds an entry check that the value is a normal | 72 // If probing finds an entry check that the value is a normal |
| 78 // property. | 73 // property. |
| 79 __ bind(&done); // scratch2 == elements + 4 * index | 74 __ bind(&done); // scratch2 == elements + 4 * index |
| 80 const int kElementsStartOffset = NameDictionary::kHeaderSize + | 75 const int kElementsStartOffset = |
| 76 NameDictionary::kHeaderSize + |
| 81 NameDictionary::kElementsStartIndex * kPointerSize; | 77 NameDictionary::kElementsStartIndex * kPointerSize; |
| 82 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; | 78 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; |
| 83 __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); | 79 __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); |
| 84 __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize)); | 80 __ mr(r0, scratch2); |
| 85 __ b(ne, miss); | 81 __ LoadSmiLiteral(scratch2, Smi::FromInt(PropertyDetails::TypeField::kMask)); |
| 82 __ and_(scratch2, scratch1, scratch2, SetRC); |
| 83 __ bne(miss, cr0); |
| 84 __ mr(scratch2, r0); |
| 86 | 85 |
| 87 // Get the value at the masked, scaled index and return. | 86 // Get the value at the masked, scaled index and return. |
| 88 __ ldr(result, | 87 __ LoadP(result, |
| 89 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize)); | 88 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize)); |
| 90 } | 89 } |
| 91 | 90 |
| 92 | 91 |
| 93 // Helper function used from StoreIC::GenerateNormal. | 92 // Helper function used from StoreIC::GenerateNormal. |
| 94 // | 93 // |
| 95 // elements: Property dictionary. It is not clobbered if a jump to the miss | 94 // elements: Property dictionary. It is not clobbered if a jump to the miss |
| 96 // label is done. | 95 // label is done. |
| 97 // name: Property name. It is not clobbered if a jump to the miss label is | 96 // name: Property name. It is not clobbered if a jump to the miss label is |
| 98 // done | 97 // done |
| 99 // value: The value to store. | 98 // value: The value to store. |
| 100 // The two scratch registers need to be different from elements, name and | 99 // The two scratch registers need to be different from elements, name and |
| 101 // result. | 100 // result. |
| 102 // The generated code assumes that the receiver has slow properties, | 101 // The generated code assumes that the receiver has slow properties, |
| 103 // is not a global object and does not have interceptors. | 102 // is not a global object and does not have interceptors. |
| 104 static void GenerateDictionaryStore(MacroAssembler* masm, | 103 static void GenerateDictionaryStore(MacroAssembler* masm, Label* miss, |
| 105 Label* miss, | 104 Register elements, Register name, |
| 106 Register elements, | 105 Register value, Register scratch1, |
| 107 Register name, | |
| 108 Register value, | |
| 109 Register scratch1, | |
| 110 Register scratch2) { | 106 Register scratch2) { |
| 111 // Main use of the scratch registers. | 107 // Main use of the scratch registers. |
| 112 // scratch1: Used as temporary and to hold the capacity of the property | 108 // scratch1: Used as temporary and to hold the capacity of the property |
| 113 // dictionary. | 109 // dictionary. |
| 114 // scratch2: Used as temporary. | 110 // scratch2: Used as temporary. |
| 115 Label done; | 111 Label done; |
| 116 | 112 |
| 117 // Probe the dictionary. | 113 // Probe the dictionary. |
| 118 NameDictionaryLookupStub::GeneratePositiveLookup(masm, | 114 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements, |
| 119 miss, | 115 name, scratch1, scratch2); |
| 120 &done, | |
| 121 elements, | |
| 122 name, | |
| 123 scratch1, | |
| 124 scratch2); | |
| 125 | 116 |
| 126 // If probing finds an entry in the dictionary check that the value | 117 // If probing finds an entry in the dictionary check that the value |
| 127 // is a normal property that is not read only. | 118 // is a normal property that is not read only. |
| 128 __ bind(&done); // scratch2 == elements + 4 * index | 119 __ bind(&done); // scratch2 == elements + 4 * index |
| 129 const int kElementsStartOffset = NameDictionary::kHeaderSize + | 120 const int kElementsStartOffset = |
| 121 NameDictionary::kHeaderSize + |
| 130 NameDictionary::kElementsStartIndex * kPointerSize; | 122 NameDictionary::kElementsStartIndex * kPointerSize; |
| 131 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; | 123 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; |
| 132 const int kTypeAndReadOnlyMask = | 124 int kTypeAndReadOnlyMask = |
| 133 (PropertyDetails::TypeField::kMask | | 125 PropertyDetails::TypeField::kMask | |
| 134 PropertyDetails::AttributesField::encode(READ_ONLY)) << kSmiTagSize; | 126 PropertyDetails::AttributesField::encode(READ_ONLY); |
| 135 __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); | 127 __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); |
| 136 __ tst(scratch1, Operand(kTypeAndReadOnlyMask)); | 128 __ mr(r0, scratch2); |
| 137 __ b(ne, miss); | 129 __ LoadSmiLiteral(scratch2, Smi::FromInt(kTypeAndReadOnlyMask)); |
| 130 __ and_(scratch2, scratch1, scratch2, SetRC); |
| 131 __ bne(miss, cr0); |
| 132 __ mr(scratch2, r0); |
| 138 | 133 |
| 139 // Store the value at the masked, scaled index and return. | 134 // Store the value at the masked, scaled index and return. |
| 140 const int kValueOffset = kElementsStartOffset + kPointerSize; | 135 const int kValueOffset = kElementsStartOffset + kPointerSize; |
| 141 __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag)); | 136 __ addi(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag)); |
| 142 __ str(value, MemOperand(scratch2)); | 137 __ StoreP(value, MemOperand(scratch2)); |
| 143 | 138 |
| 144 // Update the write barrier. Make sure not to clobber the value. | 139 // Update the write barrier. Make sure not to clobber the value. |
| 145 __ mov(scratch1, value); | 140 __ mr(scratch1, value); |
| 146 __ RecordWrite( | 141 __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved, |
| 147 elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs); | 142 kDontSaveFPRegs); |
| 148 } | 143 } |
| 149 | 144 |
| 150 | 145 |
| 151 // Checks the receiver for special cases (value type, slow case bits). | 146 // Checks the receiver for special cases (value type, slow case bits). |
| 152 // Falls through for regular JS object. | 147 // Falls through for regular JS object. |
| 153 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, | 148 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, |
| 154 Register receiver, | 149 Register receiver, Register map, |
| 155 Register map, | |
| 156 Register scratch, | 150 Register scratch, |
| 157 int interceptor_bit, | 151 int interceptor_bit, Label* slow) { |
| 158 Label* slow) { | |
| 159 // Check that the object isn't a smi. | 152 // Check that the object isn't a smi. |
| 160 __ JumpIfSmi(receiver, slow); | 153 __ JumpIfSmi(receiver, slow); |
| 161 // Get the map of the receiver. | 154 // Get the map of the receiver. |
| 162 __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 155 __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 163 // Check bit field. | 156 // Check bit field. |
| 164 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); | 157 __ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); |
| 165 __ tst(scratch, | 158 DCHECK(((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)) < 0x8000); |
| 166 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit))); | 159 __ andi(r0, scratch, |
| 167 __ b(ne, slow); | 160 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit))); |
| 161 __ bne(slow, cr0); |
| 168 // Check that the object is some kind of JS object EXCEPT JS Value type. | 162 // Check that the object is some kind of JS object EXCEPT JS Value type. |
| 169 // In the case that the object is a value-wrapper object, | 163 // In the case that the object is a value-wrapper object, |
| 170 // we enter the runtime system to make sure that indexing into string | 164 // we enter the runtime system to make sure that indexing into string |
| 171 // objects work as intended. | 165 // objects work as intended. |
| 172 DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE); | 166 DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE); |
| 173 __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 167 __ lbz(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 174 __ cmp(scratch, Operand(JS_OBJECT_TYPE)); | 168 __ cmpi(scratch, Operand(JS_OBJECT_TYPE)); |
| 175 __ b(lt, slow); | 169 __ blt(slow); |
| 176 } | 170 } |
| 177 | 171 |
| 178 | 172 |
| 179 // Loads an indexed element from a fast case array. | 173 // Loads an indexed element from a fast case array. |
| 180 // If not_fast_array is NULL, doesn't perform the elements map check. | 174 // If not_fast_array is NULL, doesn't perform the elements map check. |
| 181 static void GenerateFastArrayLoad(MacroAssembler* masm, | 175 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver, |
| 182 Register receiver, | 176 Register key, Register elements, |
| 183 Register key, | 177 Register scratch1, Register scratch2, |
| 184 Register elements, | 178 Register result, Label* not_fast_array, |
| 185 Register scratch1, | |
| 186 Register scratch2, | |
| 187 Register result, | |
| 188 Label* not_fast_array, | |
| 189 Label* out_of_range) { | 179 Label* out_of_range) { |
| 190 // Register use: | 180 // Register use: |
| 191 // | 181 // |
| 192 // receiver - holds the receiver on entry. | 182 // receiver - holds the receiver on entry. |
| 193 // Unchanged unless 'result' is the same register. | 183 // Unchanged unless 'result' is the same register. |
| 194 // | 184 // |
| 195 // key - holds the smi key on entry. | 185 // key - holds the smi key on entry. |
| 196 // Unchanged unless 'result' is the same register. | 186 // Unchanged unless 'result' is the same register. |
| 197 // | 187 // |
| 198 // elements - holds the elements of the receiver on exit. | 188 // elements - holds the elements of the receiver on exit. |
| 199 // | 189 // |
| 200 // result - holds the result on exit if the load succeeded. | 190 // result - holds the result on exit if the load succeeded. |
| 201 // Allowed to be the the same as 'receiver' or 'key'. | 191 // Allowed to be the the same as 'receiver' or 'key'. |
| 202 // Unchanged on bailout so 'receiver' and 'key' can be safely | 192 // Unchanged on bailout so 'receiver' and 'key' can be safely |
| 203 // used by further computation. | 193 // used by further computation. |
| 204 // | 194 // |
| 205 // Scratch registers: | 195 // Scratch registers: |
| 206 // | 196 // |
| 207 // scratch1 - used to hold elements map and elements length. | 197 // scratch1 - used to hold elements map and elements length. |
| 208 // Holds the elements map if not_fast_array branch is taken. | 198 // Holds the elements map if not_fast_array branch is taken. |
| 209 // | 199 // |
| 210 // scratch2 - used to hold the loaded value. | 200 // scratch2 - used to hold the loaded value. |
| 211 | 201 |
| 212 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 202 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 213 if (not_fast_array != NULL) { | 203 if (not_fast_array != NULL) { |
| 214 // Check that the object is in fast mode and writable. | 204 // Check that the object is in fast mode and writable. |
| 215 __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); | 205 __ LoadP(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); |
| 216 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); | 206 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); |
| 217 __ cmp(scratch1, ip); | 207 __ cmp(scratch1, ip); |
| 218 __ b(ne, not_fast_array); | 208 __ bne(not_fast_array); |
| 219 } else { | 209 } else { |
| 220 __ AssertFastElements(elements); | 210 __ AssertFastElements(elements); |
| 221 } | 211 } |
| 222 // Check that the key (index) is within bounds. | 212 // Check that the key (index) is within bounds. |
| 223 __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset)); | 213 __ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 224 __ cmp(key, Operand(scratch1)); | 214 __ cmpl(key, scratch1); |
| 225 __ b(hs, out_of_range); | 215 __ bge(out_of_range); |
| 226 // Fast case: Do the load. | 216 // Fast case: Do the load. |
| 227 __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 217 __ addi(scratch1, elements, |
| 228 __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key)); | 218 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 219 // The key is a smi. |
| 220 __ SmiToPtrArrayOffset(scratch2, key); |
| 221 __ LoadPX(scratch2, MemOperand(scratch2, scratch1)); |
| 229 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 222 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 230 __ cmp(scratch2, ip); | 223 __ cmp(scratch2, ip); |
| 231 // In case the loaded value is the_hole we have to consult GetProperty | 224 // In case the loaded value is the_hole we have to consult GetProperty |
| 232 // to ensure the prototype chain is searched. | 225 // to ensure the prototype chain is searched. |
| 233 __ b(eq, out_of_range); | 226 __ beq(out_of_range); |
| 234 __ mov(result, scratch2); | 227 __ mr(result, scratch2); |
| 235 } | 228 } |
| 236 | 229 |
| 237 | 230 |
| 238 // Checks whether a key is an array index string or a unique name. | 231 // Checks whether a key is an array index string or a unique name. |
| 239 // Falls through if a key is a unique name. | 232 // Falls through if a key is a unique name. |
| 240 static void GenerateKeyNameCheck(MacroAssembler* masm, | 233 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key, |
| 241 Register key, | 234 Register map, Register hash, |
| 242 Register map, | 235 Label* index_string, Label* not_unique) { |
| 243 Register hash, | |
| 244 Label* index_string, | |
| 245 Label* not_unique) { | |
| 246 // The key is not a smi. | 236 // The key is not a smi. |
| 247 Label unique; | 237 Label unique; |
| 248 // Is it a name? | 238 // Is it a name? |
| 249 __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE); | 239 __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE); |
| 250 __ b(hi, not_unique); | 240 __ bgt(not_unique); |
| 251 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); | 241 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); |
| 252 __ b(eq, &unique); | 242 __ beq(&unique); |
| 253 | 243 |
| 254 // Is the string an array index, with cached numeric value? | 244 // Is the string an array index, with cached numeric value? |
| 255 __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset)); | 245 __ lwz(hash, FieldMemOperand(key, Name::kHashFieldOffset)); |
| 256 __ tst(hash, Operand(Name::kContainsCachedArrayIndexMask)); | 246 __ mov(r8, Operand(Name::kContainsCachedArrayIndexMask)); |
| 257 __ b(eq, index_string); | 247 __ and_(r0, hash, r8, SetRC); |
| 248 __ beq(index_string, cr0); |
| 258 | 249 |
| 259 // Is the string internalized? We know it's a string, so a single | 250 // Is the string internalized? We know it's a string, so a single |
| 260 // bit test is enough. | 251 // bit test is enough. |
| 261 // map: key map | 252 // map: key map |
| 262 __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 253 __ lbz(hash, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 263 STATIC_ASSERT(kInternalizedTag == 0); | 254 STATIC_ASSERT(kInternalizedTag == 0); |
| 264 __ tst(hash, Operand(kIsNotInternalizedMask)); | 255 __ andi(r0, hash, Operand(kIsNotInternalizedMask)); |
| 265 __ b(ne, not_unique); | 256 __ bne(not_unique, cr0); |
| 266 | 257 |
| 267 __ bind(&unique); | 258 __ bind(&unique); |
| 268 } | 259 } |
| 269 | 260 |
| 270 | 261 |
| 271 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { | 262 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { |
| 272 // The return address is in lr. | 263 // The return address is in lr. |
| 273 Register receiver = ReceiverRegister(); | 264 Register receiver = ReceiverRegister(); |
| 274 Register name = NameRegister(); | 265 Register name = NameRegister(); |
| 275 DCHECK(receiver.is(r1)); | 266 DCHECK(receiver.is(r4)); |
| 276 DCHECK(name.is(r2)); | 267 DCHECK(name.is(r5)); |
| 277 | 268 |
| 278 // Probe the stub cache. | 269 // Probe the stub cache. |
| 279 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( | 270 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( |
| 280 Code::ComputeHandlerFlags(Code::LOAD_IC)); | 271 Code::ComputeHandlerFlags(Code::LOAD_IC)); |
| 281 masm->isolate()->stub_cache()->GenerateProbe( | 272 masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, r6, |
| 282 masm, flags, receiver, name, r3, r4, r5, r6); | 273 r7, r8, r9); |
| 283 | 274 |
| 284 // Cache miss: Jump to runtime. | 275 // Cache miss: Jump to runtime. |
| 285 GenerateMiss(masm); | 276 GenerateMiss(masm); |
| 286 } | 277 } |
| 287 | 278 |
| 288 | 279 |
| 289 void LoadIC::GenerateNormal(MacroAssembler* masm) { | 280 void LoadIC::GenerateNormal(MacroAssembler* masm) { |
| 290 Register dictionary = r0; | 281 Register dictionary = r3; |
| 291 DCHECK(!dictionary.is(ReceiverRegister())); | 282 DCHECK(!dictionary.is(ReceiverRegister())); |
| 292 DCHECK(!dictionary.is(NameRegister())); | 283 DCHECK(!dictionary.is(NameRegister())); |
| 293 | 284 |
| 294 Label slow; | 285 Label slow; |
| 295 | 286 |
| 296 __ ldr(dictionary, | 287 __ LoadP(dictionary, |
| 297 FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset)); | 288 FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset)); |
| 298 GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), r0, r3, r4); | 289 GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), r3, r6, r7); |
| 299 __ Ret(); | 290 __ Ret(); |
| 300 | 291 |
| 301 // Dictionary load failed, go slow (but don't miss). | 292 // Dictionary load failed, go slow (but don't miss). |
| 302 __ bind(&slow); | 293 __ bind(&slow); |
| 303 GenerateRuntimeGetProperty(masm); | 294 GenerateRuntimeGetProperty(masm); |
| 304 } | 295 } |
| 305 | 296 |
| 306 | 297 |
| 307 // A register that isn't one of the parameters to the load ic. | 298 // A register that isn't one of the parameters to the load ic. |
| 308 static const Register LoadIC_TempRegister() { return r3; } | 299 static const Register LoadIC_TempRegister() { return r6; } |
| 309 | 300 |
| 310 | 301 |
| 311 void LoadIC::GenerateMiss(MacroAssembler* masm) { | 302 void LoadIC::GenerateMiss(MacroAssembler* masm) { |
| 312 // The return address is in lr. | 303 // The return address is in lr. |
| 313 Isolate* isolate = masm->isolate(); | 304 Isolate* isolate = masm->isolate(); |
| 314 | 305 |
| 315 __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4); | 306 __ IncrementCounter(isolate->counters()->load_miss(), 1, r6, r7); |
| 316 | 307 |
| 317 __ mov(LoadIC_TempRegister(), ReceiverRegister()); | 308 __ mr(LoadIC_TempRegister(), ReceiverRegister()); |
| 318 __ Push(LoadIC_TempRegister(), NameRegister()); | 309 __ Push(LoadIC_TempRegister(), NameRegister()); |
| 319 | 310 |
| 320 // Perform tail call to the entry. | 311 // Perform tail call to the entry. |
| 321 ExternalReference ref = | 312 ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate); |
| 322 ExternalReference(IC_Utility(kLoadIC_Miss), isolate); | |
| 323 __ TailCallExternalReference(ref, 2, 1); | 313 __ TailCallExternalReference(ref, 2, 1); |
| 324 } | 314 } |
| 325 | 315 |
| 326 | 316 |
| 327 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { | 317 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { |
| 328 // The return address is in lr. | 318 // The return address is in lr. |
| 329 | 319 |
| 330 __ mov(LoadIC_TempRegister(), ReceiverRegister()); | 320 __ mr(LoadIC_TempRegister(), ReceiverRegister()); |
| 331 __ Push(LoadIC_TempRegister(), NameRegister()); | 321 __ Push(LoadIC_TempRegister(), NameRegister()); |
| 332 | 322 |
| 333 __ TailCallRuntime(Runtime::kGetProperty, 2, 1); | 323 __ TailCallRuntime(Runtime::kGetProperty, 2, 1); |
| 334 } | 324 } |
| 335 | 325 |
| 336 | 326 |
| 337 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm, | 327 static MemOperand GenerateMappedArgumentsLookup( |
| 338 Register object, | 328 MacroAssembler* masm, Register object, Register key, Register scratch1, |
| 339 Register key, | 329 Register scratch2, Register scratch3, Label* unmapped_case, |
| 340 Register scratch1, | 330 Label* slow_case) { |
| 341 Register scratch2, | |
| 342 Register scratch3, | |
| 343 Label* unmapped_case, | |
| 344 Label* slow_case) { | |
| 345 Heap* heap = masm->isolate()->heap(); | 331 Heap* heap = masm->isolate()->heap(); |
| 346 | 332 |
| 347 // Check that the receiver is a JSObject. Because of the map check | 333 // Check that the receiver is a JSObject. Because of the map check |
| 348 // later, we do not need to check for interceptors or whether it | 334 // later, we do not need to check for interceptors or whether it |
| 349 // requires access checks. | 335 // requires access checks. |
| 350 __ JumpIfSmi(object, slow_case); | 336 __ JumpIfSmi(object, slow_case); |
| 351 // Check that the object is some kind of JSObject. | 337 // Check that the object is some kind of JSObject. |
| 352 __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE); | 338 __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE); |
| 353 __ b(lt, slow_case); | 339 __ blt(slow_case); |
| 354 | 340 |
| 355 // Check that the key is a positive smi. | 341 // Check that the key is a positive smi. |
| 356 __ tst(key, Operand(0x80000001)); | 342 __ mov(scratch1, Operand(0x80000001)); |
| 357 __ b(ne, slow_case); | 343 __ and_(r0, key, scratch1, SetRC); |
| 344 __ bne(slow_case, cr0); |
| 358 | 345 |
| 359 // Load the elements into scratch1 and check its map. | 346 // Load the elements into scratch1 and check its map. |
| 360 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map()); | 347 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map()); |
| 361 __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset)); | 348 __ LoadP(scratch1, FieldMemOperand(object, JSObject::kElementsOffset)); |
| 362 __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK); | 349 __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK); |
| 363 | 350 |
| 364 // Check if element is in the range of mapped arguments. If not, jump | 351 // Check if element is in the range of mapped arguments. If not, jump |
| 365 // to the unmapped lookup with the parameter map in scratch1. | 352 // to the unmapped lookup with the parameter map in scratch1. |
| 366 __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset)); | 353 __ LoadP(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset)); |
| 367 __ sub(scratch2, scratch2, Operand(Smi::FromInt(2))); | 354 __ SubSmiLiteral(scratch2, scratch2, Smi::FromInt(2), r0); |
| 368 __ cmp(key, Operand(scratch2)); | 355 __ cmpl(key, scratch2); |
| 369 __ b(cs, unmapped_case); | 356 __ bge(unmapped_case); |
| 370 | 357 |
| 371 // Load element index and check whether it is the hole. | 358 // Load element index and check whether it is the hole. |
| 372 const int kOffset = | 359 const int kOffset = |
| 373 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag; | 360 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag; |
| 374 | 361 |
| 375 __ mov(scratch3, Operand(kPointerSize >> 1)); | 362 __ SmiToPtrArrayOffset(scratch3, key); |
| 376 __ mul(scratch3, key, scratch3); | 363 __ addi(scratch3, scratch3, Operand(kOffset)); |
| 377 __ add(scratch3, scratch3, Operand(kOffset)); | |
| 378 | 364 |
| 379 __ ldr(scratch2, MemOperand(scratch1, scratch3)); | 365 __ LoadPX(scratch2, MemOperand(scratch1, scratch3)); |
| 380 __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex); | 366 __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex); |
| 381 __ cmp(scratch2, scratch3); | 367 __ cmp(scratch2, scratch3); |
| 382 __ b(eq, unmapped_case); | 368 __ beq(unmapped_case); |
| 383 | 369 |
| 384 // Load value from context and return it. We can reuse scratch1 because | 370 // Load value from context and return it. We can reuse scratch1 because |
| 385 // we do not jump to the unmapped lookup (which requires the parameter | 371 // we do not jump to the unmapped lookup (which requires the parameter |
| 386 // map in scratch1). | 372 // map in scratch1). |
| 387 __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); | 373 __ LoadP(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); |
| 388 __ mov(scratch3, Operand(kPointerSize >> 1)); | 374 __ SmiToPtrArrayOffset(scratch3, scratch2); |
| 389 __ mul(scratch3, scratch2, scratch3); | 375 __ addi(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag)); |
| 390 __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag)); | |
| 391 return MemOperand(scratch1, scratch3); | 376 return MemOperand(scratch1, scratch3); |
| 392 } | 377 } |
| 393 | 378 |
| 394 | 379 |
| 395 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, | 380 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, |
| 396 Register key, | 381 Register key, |
| 397 Register parameter_map, | 382 Register parameter_map, |
| 398 Register scratch, | 383 Register scratch, |
| 399 Label* slow_case) { | 384 Label* slow_case) { |
| 400 // Element is in arguments backing store, which is referenced by the | 385 // Element is in arguments backing store, which is referenced by the |
| 401 // second element of the parameter_map. The parameter_map register | 386 // second element of the parameter_map. The parameter_map register |
| 402 // must be loaded with the parameter map of the arguments object and is | 387 // must be loaded with the parameter map of the arguments object and is |
| 403 // overwritten. | 388 // overwritten. |
| 404 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize; | 389 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize; |
| 405 Register backing_store = parameter_map; | 390 Register backing_store = parameter_map; |
| 406 __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); | 391 __ LoadP(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); |
| 407 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map()); | 392 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map()); |
| 408 __ CheckMap(backing_store, scratch, fixed_array_map, slow_case, | 393 __ CheckMap(backing_store, scratch, fixed_array_map, slow_case, |
| 409 DONT_DO_SMI_CHECK); | 394 DONT_DO_SMI_CHECK); |
| 410 __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); | 395 __ LoadP(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); |
| 411 __ cmp(key, Operand(scratch)); | 396 __ cmpl(key, scratch); |
| 412 __ b(cs, slow_case); | 397 __ bge(slow_case); |
| 413 __ mov(scratch, Operand(kPointerSize >> 1)); | 398 __ SmiToPtrArrayOffset(scratch, key); |
| 414 __ mul(scratch, key, scratch); | 399 __ addi(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 415 __ add(scratch, | |
| 416 scratch, | |
| 417 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
| 418 return MemOperand(backing_store, scratch); | 400 return MemOperand(backing_store, scratch); |
| 419 } | 401 } |
| 420 | 402 |
| 421 | 403 |
| 422 void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { | 404 void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { |
| 423 // The return address is in lr. | 405 // The return address is in lr. |
| 424 Register receiver = ReceiverRegister(); | 406 Register receiver = ReceiverRegister(); |
| 425 Register key = NameRegister(); | 407 Register key = NameRegister(); |
| 426 DCHECK(receiver.is(r1)); | 408 DCHECK(receiver.is(r4)); |
| 427 DCHECK(key.is(r2)); | 409 DCHECK(key.is(r5)); |
| 428 | 410 |
| 429 Label slow, notin; | 411 Label slow, notin; |
| 430 MemOperand mapped_location = | 412 MemOperand mapped_location = GenerateMappedArgumentsLookup( |
| 431 GenerateMappedArgumentsLookup( | 413 masm, receiver, key, r3, r6, r7, ¬in, &slow); |
| 432 masm, receiver, key, r0, r3, r4, ¬in, &slow); | 414 __ LoadPX(r3, mapped_location); |
| 433 __ ldr(r0, mapped_location); | |
| 434 __ Ret(); | 415 __ Ret(); |
| 435 __ bind(¬in); | 416 __ bind(¬in); |
| 436 // The unmapped lookup expects that the parameter map is in r0. | 417 // The unmapped lookup expects that the parameter map is in r3. |
| 437 MemOperand unmapped_location = | 418 MemOperand unmapped_location = |
| 438 GenerateUnmappedArgumentsLookup(masm, key, r0, r3, &slow); | 419 GenerateUnmappedArgumentsLookup(masm, key, r3, r6, &slow); |
| 439 __ ldr(r0, unmapped_location); | 420 __ LoadPX(r3, unmapped_location); |
| 440 __ LoadRoot(r3, Heap::kTheHoleValueRootIndex); | 421 __ LoadRoot(r6, Heap::kTheHoleValueRootIndex); |
| 441 __ cmp(r0, r3); | 422 __ cmp(r3, r6); |
| 442 __ b(eq, &slow); | 423 __ beq(&slow); |
| 443 __ Ret(); | 424 __ Ret(); |
| 444 __ bind(&slow); | 425 __ bind(&slow); |
| 445 GenerateMiss(masm); | 426 GenerateMiss(masm); |
| 446 } | 427 } |
| 447 | 428 |
| 448 | 429 |
| 449 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { | 430 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { |
| 450 Register receiver = ReceiverRegister(); | 431 Register receiver = ReceiverRegister(); |
| 451 Register key = NameRegister(); | 432 Register key = NameRegister(); |
| 452 Register value = ValueRegister(); | 433 Register value = ValueRegister(); |
| 453 DCHECK(receiver.is(r1)); | 434 DCHECK(receiver.is(r4)); |
| 454 DCHECK(key.is(r2)); | 435 DCHECK(key.is(r5)); |
| 455 DCHECK(value.is(r0)); | 436 DCHECK(value.is(r3)); |
| 456 | 437 |
| 457 Label slow, notin; | 438 Label slow, notin; |
| 458 MemOperand mapped_location = GenerateMappedArgumentsLookup( | 439 MemOperand mapped_location = GenerateMappedArgumentsLookup( |
| 459 masm, receiver, key, r3, r4, r5, ¬in, &slow); | 440 masm, receiver, key, r6, r7, r8, ¬in, &slow); |
| 460 __ str(value, mapped_location); | 441 Register mapped_base = mapped_location.ra(); |
| 461 __ add(r6, r3, r5); | 442 Register mapped_offset = mapped_location.rb(); |
| 462 __ mov(r9, value); | 443 __ StorePX(value, mapped_location); |
| 463 __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs); | 444 __ add(r9, mapped_base, mapped_offset); |
| 445 __ mr(r11, value); |
| 446 __ RecordWrite(mapped_base, r9, r11, kLRHasNotBeenSaved, kDontSaveFPRegs); |
| 464 __ Ret(); | 447 __ Ret(); |
| 465 __ bind(¬in); | 448 __ bind(¬in); |
| 466 // The unmapped lookup expects that the parameter map is in r3. | 449 // The unmapped lookup expects that the parameter map is in r6. |
| 467 MemOperand unmapped_location = | 450 MemOperand unmapped_location = |
| 468 GenerateUnmappedArgumentsLookup(masm, key, r3, r4, &slow); | 451 GenerateUnmappedArgumentsLookup(masm, key, r6, r7, &slow); |
| 469 __ str(value, unmapped_location); | 452 Register unmapped_base = unmapped_location.ra(); |
| 470 __ add(r6, r3, r4); | 453 Register unmapped_offset = unmapped_location.rb(); |
| 471 __ mov(r9, value); | 454 __ StorePX(value, unmapped_location); |
| 472 __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs); | 455 __ add(r9, unmapped_base, unmapped_offset); |
| 456 __ mr(r11, value); |
| 457 __ RecordWrite(unmapped_base, r9, r11, kLRHasNotBeenSaved, kDontSaveFPRegs); |
| 473 __ Ret(); | 458 __ Ret(); |
| 474 __ bind(&slow); | 459 __ bind(&slow); |
| 475 GenerateMiss(masm); | 460 GenerateMiss(masm); |
| 476 } | 461 } |
| 477 | 462 |
| 478 | 463 |
| 479 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { | 464 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { |
| 480 // The return address is in lr. | 465 // The return address is in lr. |
| 481 Isolate* isolate = masm->isolate(); | 466 Isolate* isolate = masm->isolate(); |
| 482 | 467 |
| 483 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4); | 468 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r6, r7); |
| 484 | 469 |
| 485 __ Push(ReceiverRegister(), NameRegister()); | 470 __ Push(ReceiverRegister(), NameRegister()); |
| 486 | 471 |
| 487 // Perform tail call to the entry. | 472 // Perform tail call to the entry. |
| 488 ExternalReference ref = | 473 ExternalReference ref = |
| 489 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate); | 474 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate); |
| 490 | 475 |
| 491 __ TailCallExternalReference(ref, 2, 1); | 476 __ TailCallExternalReference(ref, 2, 1); |
| 492 } | 477 } |
| 493 | 478 |
| 494 | 479 |
| 495 // IC register specifications | 480 // IC register specifications |
| 496 const Register LoadIC::ReceiverRegister() { return r1; } | 481 const Register LoadIC::ReceiverRegister() { return r4; } |
| 497 const Register LoadIC::NameRegister() { return r2; } | 482 const Register LoadIC::NameRegister() { return r5; } |
| 498 | 483 |
| 499 | 484 |
| 500 const Register LoadIC::SlotRegister() { | 485 const Register LoadIC::SlotRegister() { |
| 501 DCHECK(FLAG_vector_ics); | 486 DCHECK(FLAG_vector_ics); |
| 502 return r0; | 487 return r3; |
| 503 } | 488 } |
| 504 | 489 |
| 505 | 490 |
| 506 const Register LoadIC::VectorRegister() { | 491 const Register LoadIC::VectorRegister() { |
| 507 DCHECK(FLAG_vector_ics); | 492 DCHECK(FLAG_vector_ics); |
| 508 return r3; | 493 return r6; |
| 509 } | 494 } |
| 510 | 495 |
| 511 | 496 |
| 512 const Register StoreIC::ReceiverRegister() { return r1; } | 497 const Register StoreIC::ReceiverRegister() { return r4; } |
| 513 const Register StoreIC::NameRegister() { return r2; } | 498 const Register StoreIC::NameRegister() { return r5; } |
| 514 const Register StoreIC::ValueRegister() { return r0; } | 499 const Register StoreIC::ValueRegister() { return r3; } |
| 515 | 500 |
| 516 | 501 |
| 517 const Register KeyedStoreIC::MapRegister() { | 502 const Register KeyedStoreIC::MapRegister() { return r6; } |
| 518 return r3; | |
| 519 } | |
| 520 | 503 |
| 521 | 504 |
| 522 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { | 505 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { |
| 523 // The return address is in lr. | 506 // The return address is in lr. |
| 524 | 507 |
| 525 __ Push(ReceiverRegister(), NameRegister()); | 508 __ Push(ReceiverRegister(), NameRegister()); |
| 526 | 509 |
| 527 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); | 510 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); |
| 528 } | 511 } |
| 529 | 512 |
| 530 | 513 |
| 531 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { | 514 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { |
| 532 // The return address is in lr. | 515 // The return address is in lr. |
| 533 Label slow, check_name, index_smi, index_name, property_array_property; | 516 Label slow, check_name, index_smi, index_name, property_array_property; |
| 534 Label probe_dictionary, check_number_dictionary; | 517 Label probe_dictionary, check_number_dictionary; |
| 535 | 518 |
| 536 Register key = NameRegister(); | 519 Register key = NameRegister(); |
| 537 Register receiver = ReceiverRegister(); | 520 Register receiver = ReceiverRegister(); |
| 538 DCHECK(key.is(r2)); | 521 DCHECK(key.is(r5)); |
| 539 DCHECK(receiver.is(r1)); | 522 DCHECK(receiver.is(r4)); |
| 540 | 523 |
| 541 Isolate* isolate = masm->isolate(); | 524 Isolate* isolate = masm->isolate(); |
| 542 | 525 |
| 543 // Check that the key is a smi. | 526 // Check that the key is a smi. |
| 544 __ JumpIfNotSmi(key, &check_name); | 527 __ JumpIfNotSmi(key, &check_name); |
| 545 __ bind(&index_smi); | 528 __ bind(&index_smi); |
| 546 // Now the key is known to be a smi. This place is also jumped to from below | 529 // Now the key is known to be a smi. This place is also jumped to from below |
| 547 // where a numeric string is converted to a smi. | 530 // where a numeric string is converted to a smi. |
| 548 | 531 |
| 549 GenerateKeyedLoadReceiverCheck( | 532 GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6, |
| 550 masm, receiver, r0, r3, Map::kHasIndexedInterceptor, &slow); | 533 Map::kHasIndexedInterceptor, &slow); |
| 551 | 534 |
| 552 // Check the receiver's map to see if it has fast elements. | 535 // Check the receiver's map to see if it has fast elements. |
| 553 __ CheckFastElements(r0, r3, &check_number_dictionary); | 536 __ CheckFastElements(r3, r6, &check_number_dictionary); |
| 554 | 537 |
| 555 GenerateFastArrayLoad( | 538 GenerateFastArrayLoad(masm, receiver, key, r3, r6, r7, r3, NULL, &slow); |
| 556 masm, receiver, key, r0, r3, r4, r0, NULL, &slow); | 539 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r7, r6); |
| 557 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r4, r3); | |
| 558 __ Ret(); | 540 __ Ret(); |
| 559 | 541 |
| 560 __ bind(&check_number_dictionary); | 542 __ bind(&check_number_dictionary); |
| 561 __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 543 __ LoadP(r7, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 562 __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset)); | 544 __ LoadP(r6, FieldMemOperand(r7, JSObject::kMapOffset)); |
| 563 | 545 |
| 564 // Check whether the elements is a number dictionary. | 546 // Check whether the elements is a number dictionary. |
| 565 // r3: elements map | 547 // r6: elements map |
| 566 // r4: elements | 548 // r7: elements |
| 567 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); | 549 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); |
| 568 __ cmp(r3, ip); | 550 __ cmp(r6, ip); |
| 569 __ b(ne, &slow); | 551 __ bne(&slow); |
| 570 __ SmiUntag(r0, key); | 552 __ SmiUntag(r3, key); |
| 571 __ LoadFromNumberDictionary(&slow, r4, key, r0, r0, r3, r5); | 553 __ LoadFromNumberDictionary(&slow, r7, key, r3, r3, r6, r8); |
| 572 __ Ret(); | 554 __ Ret(); |
| 573 | 555 |
| 574 // Slow case, key and receiver still in r2 and r1. | 556 // Slow case, key and receiver still in r3 and r4. |
| 575 __ bind(&slow); | 557 __ bind(&slow); |
| 576 __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), | 558 __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r7, |
| 577 1, r4, r3); | 559 r6); |
| 578 GenerateRuntimeGetProperty(masm); | 560 GenerateRuntimeGetProperty(masm); |
| 579 | 561 |
| 580 __ bind(&check_name); | 562 __ bind(&check_name); |
| 581 GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow); | 563 GenerateKeyNameCheck(masm, key, r3, r6, &index_name, &slow); |
| 582 | 564 |
| 583 GenerateKeyedLoadReceiverCheck( | 565 GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6, |
| 584 masm, receiver, r0, r3, Map::kHasNamedInterceptor, &slow); | 566 Map::kHasNamedInterceptor, &slow); |
| 585 | 567 |
| 586 // If the receiver is a fast-case object, check the keyed lookup | 568 // If the receiver is a fast-case object, check the keyed lookup |
| 587 // cache. Otherwise probe the dictionary. | 569 // cache. Otherwise probe the dictionary. |
| 588 __ ldr(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 570 __ LoadP(r6, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
| 589 __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset)); | 571 __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset)); |
| 590 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); | 572 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); |
| 591 __ cmp(r4, ip); | 573 __ cmp(r7, ip); |
| 592 __ b(eq, &probe_dictionary); | 574 __ beq(&probe_dictionary); |
| 593 | 575 |
| 594 // Load the map of the receiver, compute the keyed lookup cache hash | 576 // Load the map of the receiver, compute the keyed lookup cache hash |
| 595 // based on 32 bits of the map pointer and the name hash. | 577 // based on 32 bits of the map pointer and the name hash. |
| 596 __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 578 __ LoadP(r3, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 597 __ mov(r3, Operand(r0, ASR, KeyedLookupCache::kMapHashShift)); | 579 __ srawi(r6, r3, KeyedLookupCache::kMapHashShift); |
| 598 __ ldr(r4, FieldMemOperand(key, Name::kHashFieldOffset)); | 580 __ lwz(r7, FieldMemOperand(key, Name::kHashFieldOffset)); |
| 599 __ eor(r3, r3, Operand(r4, ASR, Name::kHashShift)); | 581 __ srawi(r7, r7, Name::kHashShift); |
| 582 __ xor_(r6, r6, r7); |
| 600 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask; | 583 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask; |
| 601 __ And(r3, r3, Operand(mask)); | 584 __ mov(r7, Operand(mask)); |
| 585 __ and_(r6, r6, r7, LeaveRC); |
| 602 | 586 |
| 603 // Load the key (consisting of map and unique name) from the cache and | 587 // Load the key (consisting of map and unique name) from the cache and |
| 604 // check for match. | 588 // check for match. |
| 605 Label load_in_object_property; | 589 Label load_in_object_property; |
| 606 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; | 590 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; |
| 607 Label hit_on_nth_entry[kEntriesPerBucket]; | 591 Label hit_on_nth_entry[kEntriesPerBucket]; |
| 608 ExternalReference cache_keys = | 592 ExternalReference cache_keys = |
| 609 ExternalReference::keyed_lookup_cache_keys(isolate); | 593 ExternalReference::keyed_lookup_cache_keys(isolate); |
| 610 | 594 |
| 611 __ mov(r4, Operand(cache_keys)); | 595 __ mov(r7, Operand(cache_keys)); |
| 612 __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1)); | 596 __ mr(r0, r5); |
| 597 __ ShiftLeftImm(r5, r6, Operand(kPointerSizeLog2 + 1)); |
| 598 __ add(r7, r7, r5); |
| 599 __ mr(r5, r0); |
| 613 | 600 |
| 614 for (int i = 0; i < kEntriesPerBucket - 1; i++) { | 601 for (int i = 0; i < kEntriesPerBucket - 1; i++) { |
| 615 Label try_next_entry; | 602 Label try_next_entry; |
| 616 // Load map and move r4 to next entry. | 603 // Load map and move r7 to next entry. |
| 617 __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex)); | 604 __ LoadP(r8, MemOperand(r7)); |
| 618 __ cmp(r0, r5); | 605 __ addi(r7, r7, Operand(kPointerSize * 2)); |
| 619 __ b(ne, &try_next_entry); | 606 __ cmp(r3, r8); |
| 620 __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load name | 607 __ bne(&try_next_entry); |
| 621 __ cmp(key, r5); | 608 __ LoadP(r8, MemOperand(r7, -kPointerSize)); // Load name |
| 622 __ b(eq, &hit_on_nth_entry[i]); | 609 __ cmp(key, r8); |
| 610 __ beq(&hit_on_nth_entry[i]); |
| 623 __ bind(&try_next_entry); | 611 __ bind(&try_next_entry); |
| 624 } | 612 } |
| 625 | 613 |
| 626 // Last entry: Load map and move r4 to name. | 614 // Last entry: Load map and move r7 to name. |
| 627 __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex)); | 615 __ LoadP(r8, MemOperand(r7)); |
| 628 __ cmp(r0, r5); | 616 __ addi(r7, r7, Operand(kPointerSize)); |
| 629 __ b(ne, &slow); | 617 __ cmp(r3, r8); |
| 630 __ ldr(r5, MemOperand(r4)); | 618 __ bne(&slow); |
| 631 __ cmp(key, r5); | 619 __ LoadP(r8, MemOperand(r7)); |
| 632 __ b(ne, &slow); | 620 __ cmp(key, r8); |
| 621 __ bne(&slow); |
| 633 | 622 |
| 634 // Get field offset. | 623 // Get field offset. |
| 635 // r0 : receiver's map | 624 // r3 : receiver's map |
| 636 // r3 : lookup cache index | 625 // r6 : lookup cache index |
| 637 ExternalReference cache_field_offsets = | 626 ExternalReference cache_field_offsets = |
| 638 ExternalReference::keyed_lookup_cache_field_offsets(isolate); | 627 ExternalReference::keyed_lookup_cache_field_offsets(isolate); |
| 639 | 628 |
| 640 // Hit on nth entry. | 629 // Hit on nth entry. |
| 641 for (int i = kEntriesPerBucket - 1; i >= 0; i--) { | 630 for (int i = kEntriesPerBucket - 1; i >= 0; i--) { |
| 642 __ bind(&hit_on_nth_entry[i]); | 631 __ bind(&hit_on_nth_entry[i]); |
| 643 __ mov(r4, Operand(cache_field_offsets)); | 632 __ mov(r7, Operand(cache_field_offsets)); |
| 644 if (i != 0) { | 633 if (i != 0) { |
| 645 __ add(r3, r3, Operand(i)); | 634 __ addi(r6, r6, Operand(i)); |
| 646 } | 635 } |
| 647 __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2)); | 636 __ ShiftLeftImm(r8, r6, Operand(2)); |
| 648 __ ldrb(r6, FieldMemOperand(r0, Map::kInObjectPropertiesOffset)); | 637 __ lwzx(r8, MemOperand(r8, r7)); |
| 649 __ sub(r5, r5, r6, SetCC); | 638 __ lbz(r9, FieldMemOperand(r3, Map::kInObjectPropertiesOffset)); |
| 650 __ b(ge, &property_array_property); | 639 __ sub(r8, r8, r9); |
| 640 __ cmpi(r8, Operand::Zero()); |
| 641 __ bge(&property_array_property); |
| 651 if (i != 0) { | 642 if (i != 0) { |
| 652 __ jmp(&load_in_object_property); | 643 __ b(&load_in_object_property); |
| 653 } | 644 } |
| 654 } | 645 } |
| 655 | 646 |
| 656 // Load in-object property. | 647 // Load in-object property. |
| 657 __ bind(&load_in_object_property); | 648 __ bind(&load_in_object_property); |
| 658 __ ldrb(r6, FieldMemOperand(r0, Map::kInstanceSizeOffset)); | 649 __ lbz(r9, FieldMemOperand(r3, Map::kInstanceSizeOffset)); |
| 659 __ add(r6, r6, r5); // Index from start of object. | 650 __ add(r9, r9, r8); // Index from start of object. |
| 660 __ sub(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag. | 651 __ subi(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag. |
| 661 __ ldr(r0, MemOperand(receiver, r6, LSL, kPointerSizeLog2)); | 652 __ ShiftLeftImm(r3, r9, Operand(kPointerSizeLog2)); |
| 662 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), | 653 __ LoadPX(r3, MemOperand(r3, receiver)); |
| 663 1, r4, r3); | 654 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1, |
| 655 r7, r6); |
| 664 __ Ret(); | 656 __ Ret(); |
| 665 | 657 |
| 666 // Load property array property. | 658 // Load property array property. |
| 667 __ bind(&property_array_property); | 659 __ bind(&property_array_property); |
| 668 __ ldr(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 660 __ LoadP(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
| 669 __ add(receiver, receiver, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 661 __ addi(receiver, receiver, |
| 670 __ ldr(r0, MemOperand(receiver, r5, LSL, kPointerSizeLog2)); | 662 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 671 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), | 663 __ ShiftLeftImm(r3, r8, Operand(kPointerSizeLog2)); |
| 672 1, r4, r3); | 664 __ LoadPX(r3, MemOperand(r3, receiver)); |
| 665 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1, |
| 666 r7, r6); |
| 673 __ Ret(); | 667 __ Ret(); |
| 674 | 668 |
| 675 // Do a quick inline probe of the receiver's dictionary, if it | 669 // Do a quick inline probe of the receiver's dictionary, if it |
| 676 // exists. | 670 // exists. |
| 677 __ bind(&probe_dictionary); | 671 __ bind(&probe_dictionary); |
| 678 // r3: elements | 672 // r6: elements |
| 679 __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 673 __ LoadP(r3, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 680 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); | 674 __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset)); |
| 681 GenerateGlobalInstanceTypeCheck(masm, r0, &slow); | 675 GenerateGlobalInstanceTypeCheck(masm, r3, &slow); |
| 682 // Load the property to r0. | 676 // Load the property to r3. |
| 683 GenerateDictionaryLoad(masm, &slow, r3, key, r0, r5, r4); | 677 GenerateDictionaryLoad(masm, &slow, r6, key, r3, r8, r7); |
| 684 __ IncrementCounter( | 678 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, r7, |
| 685 isolate->counters()->keyed_load_generic_symbol(), 1, r4, r3); | 679 r6); |
| 686 __ Ret(); | 680 __ Ret(); |
| 687 | 681 |
| 688 __ bind(&index_name); | 682 __ bind(&index_name); |
| 689 __ IndexFromHash(r3, key); | 683 __ IndexFromHash(r6, key); |
| 690 // Now jump to the place where smi keys are handled. | 684 // Now jump to the place where smi keys are handled. |
| 691 __ jmp(&index_smi); | 685 __ b(&index_smi); |
| 692 } | 686 } |
| 693 | 687 |
| 694 | 688 |
| 695 void KeyedLoadIC::GenerateString(MacroAssembler* masm) { | 689 void KeyedLoadIC::GenerateString(MacroAssembler* masm) { |
| 696 // Return address is in lr. | 690 // Return address is in lr. |
| 697 Label miss; | 691 Label miss; |
| 698 | 692 |
| 699 Register receiver = ReceiverRegister(); | 693 Register receiver = ReceiverRegister(); |
| 700 Register index = NameRegister(); | 694 Register index = NameRegister(); |
| 701 Register scratch = r3; | 695 Register scratch = r6; |
| 702 Register result = r0; | 696 Register result = r3; |
| 703 DCHECK(!scratch.is(receiver) && !scratch.is(index)); | 697 DCHECK(!scratch.is(receiver) && !scratch.is(index)); |
| 704 | 698 |
| 705 StringCharAtGenerator char_at_generator(receiver, | 699 StringCharAtGenerator char_at_generator(receiver, index, scratch, result, |
| 706 index, | |
| 707 scratch, | |
| 708 result, | |
| 709 &miss, // When not a string. | 700 &miss, // When not a string. |
| 710 &miss, // When not a number. | 701 &miss, // When not a number. |
| 711 &miss, // When index out of range. | 702 &miss, // When index out of range. |
| 712 STRING_INDEX_IS_ARRAY_INDEX); | 703 STRING_INDEX_IS_ARRAY_INDEX); |
| 713 char_at_generator.GenerateFast(masm); | 704 char_at_generator.GenerateFast(masm); |
| 714 __ Ret(); | 705 __ Ret(); |
| 715 | 706 |
| 716 StubRuntimeCallHelper call_helper; | 707 StubRuntimeCallHelper call_helper; |
| 717 char_at_generator.GenerateSlow(masm, call_helper); | 708 char_at_generator.GenerateSlow(masm, call_helper); |
| 718 | 709 |
| 719 __ bind(&miss); | 710 __ bind(&miss); |
| 720 GenerateMiss(masm); | 711 GenerateMiss(masm); |
| 721 } | 712 } |
| 722 | 713 |
| 723 | 714 |
| 724 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { | 715 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { |
| 725 // Return address is in lr. | 716 // Return address is in lr. |
| 726 Label slow; | 717 Label slow; |
| 727 | 718 |
| 728 Register receiver = ReceiverRegister(); | 719 Register receiver = ReceiverRegister(); |
| 729 Register key = NameRegister(); | 720 Register key = NameRegister(); |
| 730 Register scratch1 = r3; | 721 Register scratch1 = r6; |
| 731 Register scratch2 = r4; | 722 Register scratch2 = r7; |
| 732 DCHECK(!scratch1.is(receiver) && !scratch1.is(key)); | 723 DCHECK(!scratch1.is(receiver) && !scratch1.is(key)); |
| 733 DCHECK(!scratch2.is(receiver) && !scratch2.is(key)); | 724 DCHECK(!scratch2.is(receiver) && !scratch2.is(key)); |
| 734 | 725 |
| 735 // Check that the receiver isn't a smi. | 726 // Check that the receiver isn't a smi. |
| 736 __ JumpIfSmi(receiver, &slow); | 727 __ JumpIfSmi(receiver, &slow); |
| 737 | 728 |
| 738 // Check that the key is an array index, that is Uint32. | 729 // Check that the key is an array index, that is Uint32. |
| 739 __ NonNegativeSmiTst(key); | 730 __ TestIfPositiveSmi(key, r0); |
| 740 __ b(ne, &slow); | 731 __ bne(&slow, cr0); |
| 741 | 732 |
| 742 // Get the map of the receiver. | 733 // Get the map of the receiver. |
| 743 __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 734 __ LoadP(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 744 | 735 |
| 745 // Check that it has indexed interceptor and access checks | 736 // Check that it has indexed interceptor and access checks |
| 746 // are not enabled for this object. | 737 // are not enabled for this object. |
| 747 __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset)); | 738 __ lbz(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset)); |
| 748 __ and_(scratch2, scratch2, Operand(kSlowCaseBitFieldMask)); | 739 __ andi(scratch2, scratch2, Operand(kSlowCaseBitFieldMask)); |
| 749 __ cmp(scratch2, Operand(1 << Map::kHasIndexedInterceptor)); | 740 __ cmpi(scratch2, Operand(1 << Map::kHasIndexedInterceptor)); |
| 750 __ b(ne, &slow); | 741 __ bne(&slow); |
| 751 | 742 |
| 752 // Everything is fine, call runtime. | 743 // Everything is fine, call runtime. |
| 753 __ Push(receiver, key); // Receiver, key. | 744 __ Push(receiver, key); // Receiver, key. |
| 754 | 745 |
| 755 // Perform tail call to the entry. | 746 // Perform tail call to the entry. |
| 756 __ TailCallExternalReference( | 747 __ TailCallExternalReference( |
| 757 ExternalReference(IC_Utility(kLoadElementWithInterceptor), | 748 ExternalReference(IC_Utility(kLoadElementWithInterceptor), |
| 758 masm->isolate()), | 749 masm->isolate()), |
| 759 2, 1); | 750 2, 1); |
| 760 | 751 |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 795 ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate()); | 786 ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate()); |
| 796 __ TailCallExternalReference(ref, 3, 1); | 787 __ TailCallExternalReference(ref, 3, 1); |
| 797 } | 788 } |
| 798 | 789 |
| 799 | 790 |
| 800 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, | 791 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, |
| 801 StrictMode strict_mode) { | 792 StrictMode strict_mode) { |
| 802 // Push receiver, key and value for runtime call. | 793 // Push receiver, key and value for runtime call. |
| 803 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); | 794 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); |
| 804 | 795 |
| 805 __ mov(r0, Operand(Smi::FromInt(strict_mode))); // Strict mode. | 796 __ LoadSmiLiteral(r3, Smi::FromInt(strict_mode)); // Strict mode. |
| 806 __ Push(r0); | 797 __ Push(r3); |
| 807 | 798 |
| 808 __ TailCallRuntime(Runtime::kSetProperty, 4, 1); | 799 __ TailCallRuntime(Runtime::kSetProperty, 4, 1); |
| 809 } | 800 } |
| 810 | 801 |
| 811 | 802 |
| 812 static void KeyedStoreGenerateGenericHelper( | 803 static void KeyedStoreGenerateGenericHelper( |
| 813 MacroAssembler* masm, | 804 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow, |
| 814 Label* fast_object, | 805 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length, |
| 815 Label* fast_double, | 806 Register value, Register key, Register receiver, Register receiver_map, |
| 816 Label* slow, | 807 Register elements_map, Register elements) { |
| 817 KeyedStoreCheckMap check_map, | |
| 818 KeyedStoreIncrementLength increment_length, | |
| 819 Register value, | |
| 820 Register key, | |
| 821 Register receiver, | |
| 822 Register receiver_map, | |
| 823 Register elements_map, | |
| 824 Register elements) { | |
| 825 Label transition_smi_elements; | 808 Label transition_smi_elements; |
| 826 Label finish_object_store, non_double_value, transition_double_elements; | 809 Label finish_object_store, non_double_value, transition_double_elements; |
| 827 Label fast_double_without_map_check; | 810 Label fast_double_without_map_check; |
| 828 | 811 |
| 829 // Fast case: Do the store, could be either Object or double. | 812 // Fast case: Do the store, could be either Object or double. |
| 830 __ bind(fast_object); | 813 __ bind(fast_object); |
| 831 Register scratch_value = r4; | 814 Register scratch_value = r7; |
| 832 Register address = r5; | 815 Register address = r8; |
| 833 if (check_map == kCheckMap) { | 816 if (check_map == kCheckMap) { |
| 834 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | 817 __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); |
| 835 __ cmp(elements_map, | 818 __ mov(scratch_value, |
| 836 Operand(masm->isolate()->factory()->fixed_array_map())); | 819 Operand(masm->isolate()->factory()->fixed_array_map())); |
| 837 __ b(ne, fast_double); | 820 __ cmp(elements_map, scratch_value); |
| 821 __ bne(fast_double); |
| 838 } | 822 } |
| 839 | 823 |
| 840 // HOLECHECK: guards "A[i] = V" | 824 // HOLECHECK: guards "A[i] = V" |
| 841 // We have to go to the runtime if the current value is the hole because | 825 // We have to go to the runtime if the current value is the hole because |
| 842 // there may be a callback on the element | 826 // there may be a callback on the element |
| 843 Label holecheck_passed1; | 827 Label holecheck_passed1; |
| 844 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 828 __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 845 __ ldr(scratch_value, | 829 __ SmiToPtrArrayOffset(scratch_value, key); |
| 846 MemOperand::PointerAddressFromSmiKey(address, key, PreIndex)); | 830 __ LoadPX(scratch_value, MemOperand(address, scratch_value)); |
| 847 __ cmp(scratch_value, Operand(masm->isolate()->factory()->the_hole_value())); | 831 __ Cmpi(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()), |
| 848 __ b(ne, &holecheck_passed1); | 832 r0); |
| 833 __ bne(&holecheck_passed1); |
| 849 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, | 834 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, |
| 850 slow); | 835 slow); |
| 851 | 836 |
| 852 __ bind(&holecheck_passed1); | 837 __ bind(&holecheck_passed1); |
| 853 | 838 |
| 854 // Smi stores don't require further checks. | 839 // Smi stores don't require further checks. |
| 855 Label non_smi_value; | 840 Label non_smi_value; |
| 856 __ JumpIfNotSmi(value, &non_smi_value); | 841 __ JumpIfNotSmi(value, &non_smi_value); |
| 857 | 842 |
| 858 if (increment_length == kIncrementLength) { | 843 if (increment_length == kIncrementLength) { |
| 859 // Add 1 to receiver->length. | 844 // Add 1 to receiver->length. |
| 860 __ add(scratch_value, key, Operand(Smi::FromInt(1))); | 845 __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0); |
| 861 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 846 __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset), |
| 847 r0); |
| 862 } | 848 } |
| 863 // It's irrelevant whether array is smi-only or not when writing a smi. | 849 // It's irrelevant whether array is smi-only or not when writing a smi. |
| 864 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 850 __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 865 __ str(value, MemOperand::PointerAddressFromSmiKey(address, key)); | 851 __ SmiToPtrArrayOffset(scratch_value, key); |
| 852 __ StorePX(value, MemOperand(address, scratch_value)); |
| 866 __ Ret(); | 853 __ Ret(); |
| 867 | 854 |
| 868 __ bind(&non_smi_value); | 855 __ bind(&non_smi_value); |
| 869 // Escape to elements kind transition case. | 856 // Escape to elements kind transition case. |
| 870 __ CheckFastObjectElements(receiver_map, scratch_value, | 857 __ CheckFastObjectElements(receiver_map, scratch_value, |
| 871 &transition_smi_elements); | 858 &transition_smi_elements); |
| 872 | 859 |
| 873 // Fast elements array, store the value to the elements backing store. | 860 // Fast elements array, store the value to the elements backing store. |
| 874 __ bind(&finish_object_store); | 861 __ bind(&finish_object_store); |
| 875 if (increment_length == kIncrementLength) { | 862 if (increment_length == kIncrementLength) { |
| 876 // Add 1 to receiver->length. | 863 // Add 1 to receiver->length. |
| 877 __ add(scratch_value, key, Operand(Smi::FromInt(1))); | 864 __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0); |
| 878 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 865 __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset), |
| 866 r0); |
| 879 } | 867 } |
| 880 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 868 __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 881 __ add(address, address, Operand::PointerOffsetFromSmiKey(key)); | 869 __ SmiToPtrArrayOffset(scratch_value, key); |
| 882 __ str(value, MemOperand(address)); | 870 __ StorePUX(value, MemOperand(address, scratch_value)); |
| 883 // Update write barrier for the elements array address. | 871 // Update write barrier for the elements array address. |
| 884 __ mov(scratch_value, value); // Preserve the value which is returned. | 872 __ mr(scratch_value, value); // Preserve the value which is returned. |
| 885 __ RecordWrite(elements, | 873 __ RecordWrite(elements, address, scratch_value, kLRHasNotBeenSaved, |
| 886 address, | 874 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |
| 887 scratch_value, | |
| 888 kLRHasNotBeenSaved, | |
| 889 kDontSaveFPRegs, | |
| 890 EMIT_REMEMBERED_SET, | |
| 891 OMIT_SMI_CHECK); | |
| 892 __ Ret(); | 875 __ Ret(); |
| 893 | 876 |
| 894 __ bind(fast_double); | 877 __ bind(fast_double); |
| 895 if (check_map == kCheckMap) { | 878 if (check_map == kCheckMap) { |
| 896 // Check for fast double array case. If this fails, call through to the | 879 // Check for fast double array case. If this fails, call through to the |
| 897 // runtime. | 880 // runtime. |
| 898 __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex); | 881 __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex); |
| 899 __ b(ne, slow); | 882 __ bne(slow); |
| 900 } | 883 } |
| 901 | 884 |
| 902 // HOLECHECK: guards "A[i] double hole?" | 885 // HOLECHECK: guards "A[i] double hole?" |
| 903 // We have to see if the double version of the hole is present. If so | 886 // We have to see if the double version of the hole is present. If so |
| 904 // go to the runtime. | 887 // go to the runtime. |
| 905 __ add(address, elements, | 888 __ addi(address, elements, |
| 906 Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)) | 889 Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset - |
| 907 - kHeapObjectTag)); | 890 kHeapObjectTag))); |
| 908 __ ldr(scratch_value, | 891 __ SmiToDoubleArrayOffset(scratch_value, key); |
| 909 MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex)); | 892 __ lwzx(scratch_value, MemOperand(address, scratch_value)); |
| 910 __ cmp(scratch_value, Operand(kHoleNanUpper32)); | 893 __ Cmpi(scratch_value, Operand(kHoleNanUpper32), r0); |
| 911 __ b(ne, &fast_double_without_map_check); | 894 __ bne(&fast_double_without_map_check); |
| 912 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, | 895 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, |
| 913 slow); | 896 slow); |
| 914 | 897 |
| 915 __ bind(&fast_double_without_map_check); | 898 __ bind(&fast_double_without_map_check); |
| 916 __ StoreNumberToDoubleElements(value, key, elements, r3, d0, | 899 __ StoreNumberToDoubleElements(value, key, elements, r6, d0, |
| 917 &transition_double_elements); | 900 &transition_double_elements); |
| 918 if (increment_length == kIncrementLength) { | 901 if (increment_length == kIncrementLength) { |
| 919 // Add 1 to receiver->length. | 902 // Add 1 to receiver->length. |
| 920 __ add(scratch_value, key, Operand(Smi::FromInt(1))); | 903 __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0); |
| 921 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 904 __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset), |
| 905 r0); |
| 922 } | 906 } |
| 923 __ Ret(); | 907 __ Ret(); |
| 924 | 908 |
| 925 __ bind(&transition_smi_elements); | 909 __ bind(&transition_smi_elements); |
| 926 // Transition the array appropriately depending on the value type. | 910 // Transition the array appropriately depending on the value type. |
| 927 __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset)); | 911 __ LoadP(r7, FieldMemOperand(value, HeapObject::kMapOffset)); |
| 928 __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex); | 912 __ CompareRoot(r7, Heap::kHeapNumberMapRootIndex); |
| 929 __ b(ne, &non_double_value); | 913 __ bne(&non_double_value); |
| 930 | 914 |
| 931 // Value is a double. Transition FAST_SMI_ELEMENTS -> | 915 // Value is a double. Transition FAST_SMI_ELEMENTS -> |
| 932 // FAST_DOUBLE_ELEMENTS and complete the store. | 916 // FAST_DOUBLE_ELEMENTS and complete the store. |
| 933 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, | 917 __ LoadTransitionedArrayMapConditional( |
| 934 FAST_DOUBLE_ELEMENTS, | 918 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r7, slow); |
| 935 receiver_map, | 919 AllocationSiteMode mode = |
| 936 r4, | 920 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); |
| 937 slow); | 921 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value, |
| 938 AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, | 922 receiver_map, mode, slow); |
| 939 FAST_DOUBLE_ELEMENTS); | 923 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 940 ElementsTransitionGenerator::GenerateSmiToDouble( | 924 __ b(&fast_double_without_map_check); |
| 941 masm, receiver, key, value, receiver_map, mode, slow); | |
| 942 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
| 943 __ jmp(&fast_double_without_map_check); | |
| 944 | 925 |
| 945 __ bind(&non_double_value); | 926 __ bind(&non_double_value); |
| 946 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS | 927 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS |
| 947 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, | 928 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, |
| 948 FAST_ELEMENTS, | 929 receiver_map, r7, slow); |
| 949 receiver_map, | |
| 950 r4, | |
| 951 slow); | |
| 952 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); | 930 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); |
| 953 ElementsTransitionGenerator::GenerateMapChangeElementsTransition( | 931 ElementsTransitionGenerator::GenerateMapChangeElementsTransition( |
| 954 masm, receiver, key, value, receiver_map, mode, slow); | 932 masm, receiver, key, value, receiver_map, mode, slow); |
| 955 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 933 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 956 __ jmp(&finish_object_store); | 934 __ b(&finish_object_store); |
| 957 | 935 |
| 958 __ bind(&transition_double_elements); | 936 __ bind(&transition_double_elements); |
| 959 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a | 937 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a |
| 960 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and | 938 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and |
| 961 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS | 939 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS |
| 962 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, | 940 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS, |
| 963 FAST_ELEMENTS, | 941 receiver_map, r7, slow); |
| 964 receiver_map, | |
| 965 r4, | |
| 966 slow); | |
| 967 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); | 942 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); |
| 968 ElementsTransitionGenerator::GenerateDoubleToObject( | 943 ElementsTransitionGenerator::GenerateDoubleToObject( |
| 969 masm, receiver, key, value, receiver_map, mode, slow); | 944 masm, receiver, key, value, receiver_map, mode, slow); |
| 970 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 945 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 971 __ jmp(&finish_object_store); | 946 __ b(&finish_object_store); |
| 972 } | 947 } |
| 973 | 948 |
| 974 | 949 |
| 975 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, | 950 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, |
| 976 StrictMode strict_mode) { | 951 StrictMode strict_mode) { |
| 977 // ---------- S t a t e -------------- | 952 // ---------- S t a t e -------------- |
| 978 // -- r0 : value | 953 // -- r3 : value |
| 979 // -- r1 : key | 954 // -- r4 : key |
| 980 // -- r2 : receiver | 955 // -- r5 : receiver |
| 981 // -- lr : return address | 956 // -- lr : return address |
| 982 // ----------------------------------- | 957 // ----------------------------------- |
| 983 Label slow, fast_object, fast_object_grow; | 958 Label slow, fast_object, fast_object_grow; |
| 984 Label fast_double, fast_double_grow; | 959 Label fast_double, fast_double_grow; |
| 985 Label array, extra, check_if_double_array; | 960 Label array, extra, check_if_double_array; |
| 986 | 961 |
| 987 // Register usage. | 962 // Register usage. |
| 988 Register value = ValueRegister(); | 963 Register value = ValueRegister(); |
| 989 Register key = NameRegister(); | 964 Register key = NameRegister(); |
| 990 Register receiver = ReceiverRegister(); | 965 Register receiver = ReceiverRegister(); |
| 991 DCHECK(receiver.is(r1)); | 966 DCHECK(receiver.is(r4)); |
| 992 DCHECK(key.is(r2)); | 967 DCHECK(key.is(r5)); |
| 993 DCHECK(value.is(r0)); | 968 DCHECK(value.is(r3)); |
| 994 Register receiver_map = r3; | 969 Register receiver_map = r6; |
| 995 Register elements_map = r6; | 970 Register elements_map = r9; |
| 996 Register elements = r9; // Elements array of the receiver. | 971 Register elements = r10; // Elements array of the receiver. |
| 997 // r4 and r5 are used as general scratch registers. | 972 // r7 and r8 are used as general scratch registers. |
| 998 | 973 |
| 999 // Check that the key is a smi. | 974 // Check that the key is a smi. |
| 1000 __ JumpIfNotSmi(key, &slow); | 975 __ JumpIfNotSmi(key, &slow); |
| 1001 // Check that the object isn't a smi. | 976 // Check that the object isn't a smi. |
| 1002 __ JumpIfSmi(receiver, &slow); | 977 __ JumpIfSmi(receiver, &slow); |
| 1003 // Get the map of the object. | 978 // Get the map of the object. |
| 1004 __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 979 __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 1005 // Check that the receiver does not require access checks and is not observed. | 980 // Check that the receiver does not require access checks and is not observed. |
| 1006 // The generic stub does not perform map checks or handle observed objects. | 981 // The generic stub does not perform map checks or handle observed objects. |
| 1007 __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); | 982 __ lbz(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); |
| 1008 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved)); | 983 __ andi(r0, ip, |
| 1009 __ b(ne, &slow); | 984 Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved)); |
| 985 __ bne(&slow, cr0); |
| 1010 // Check if the object is a JS array or not. | 986 // Check if the object is a JS array or not. |
| 1011 __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset)); | 987 __ lbz(r7, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset)); |
| 1012 __ cmp(r4, Operand(JS_ARRAY_TYPE)); | 988 __ cmpi(r7, Operand(JS_ARRAY_TYPE)); |
| 1013 __ b(eq, &array); | 989 __ beq(&array); |
| 1014 // Check that the object is some kind of JSObject. | 990 // Check that the object is some kind of JSObject. |
| 1015 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); | 991 __ cmpi(r7, Operand(FIRST_JS_OBJECT_TYPE)); |
| 1016 __ b(lt, &slow); | 992 __ blt(&slow); |
| 1017 | 993 |
| 1018 // Object case: Check key against length in the elements array. | 994 // Object case: Check key against length in the elements array. |
| 1019 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 995 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 1020 // Check array bounds. Both the key and the length of FixedArray are smis. | 996 // Check array bounds. Both the key and the length of FixedArray are smis. |
| 1021 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); | 997 __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 1022 __ cmp(key, Operand(ip)); | 998 __ cmpl(key, ip); |
| 1023 __ b(lo, &fast_object); | 999 __ blt(&fast_object); |
| 1024 | 1000 |
| 1025 // Slow case, handle jump to runtime. | 1001 // Slow case, handle jump to runtime. |
| 1026 __ bind(&slow); | 1002 __ bind(&slow); |
| 1027 // Entry registers are intact. | 1003 // Entry registers are intact. |
| 1028 // r0: value. | 1004 // r3: value. |
| 1029 // r1: key. | 1005 // r4: key. |
| 1030 // r2: receiver. | 1006 // r5: receiver. |
| 1031 GenerateRuntimeSetProperty(masm, strict_mode); | 1007 GenerateRuntimeSetProperty(masm, strict_mode); |
| 1032 | 1008 |
| 1033 // Extra capacity case: Check if there is extra capacity to | 1009 // Extra capacity case: Check if there is extra capacity to |
| 1034 // perform the store and update the length. Used for adding one | 1010 // perform the store and update the length. Used for adding one |
| 1035 // element to the array by writing to array[array.length]. | 1011 // element to the array by writing to array[array.length]. |
| 1036 __ bind(&extra); | 1012 __ bind(&extra); |
| 1037 // Condition code from comparing key and array length is still available. | 1013 // Condition code from comparing key and array length is still available. |
| 1038 __ b(ne, &slow); // Only support writing to writing to array[array.length]. | 1014 __ bne(&slow); // Only support writing to writing to array[array.length]. |
| 1039 // Check for room in the elements backing store. | 1015 // Check for room in the elements backing store. |
| 1040 // Both the key and the length of FixedArray are smis. | 1016 // Both the key and the length of FixedArray are smis. |
| 1041 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); | 1017 __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 1042 __ cmp(key, Operand(ip)); | 1018 __ cmpl(key, ip); |
| 1043 __ b(hs, &slow); | 1019 __ bge(&slow); |
| 1044 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | 1020 __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); |
| 1045 __ cmp(elements_map, | 1021 __ mov(ip, Operand(masm->isolate()->factory()->fixed_array_map())); |
| 1046 Operand(masm->isolate()->factory()->fixed_array_map())); | 1022 __ cmp(elements_map, ip); // PPC - I think I can re-use ip here |
| 1047 __ b(ne, &check_if_double_array); | 1023 __ bne(&check_if_double_array); |
| 1048 __ jmp(&fast_object_grow); | 1024 __ b(&fast_object_grow); |
| 1049 | 1025 |
| 1050 __ bind(&check_if_double_array); | 1026 __ bind(&check_if_double_array); |
| 1051 __ cmp(elements_map, | 1027 __ mov(ip, Operand(masm->isolate()->factory()->fixed_double_array_map())); |
| 1052 Operand(masm->isolate()->factory()->fixed_double_array_map())); | 1028 __ cmp(elements_map, ip); // PPC - another ip re-use |
| 1053 __ b(ne, &slow); | 1029 __ bne(&slow); |
| 1054 __ jmp(&fast_double_grow); | 1030 __ b(&fast_double_grow); |
| 1055 | 1031 |
| 1056 // Array case: Get the length and the elements array from the JS | 1032 // Array case: Get the length and the elements array from the JS |
| 1057 // array. Check that the array is in fast mode (and writable); if it | 1033 // array. Check that the array is in fast mode (and writable); if it |
| 1058 // is the length is always a smi. | 1034 // is the length is always a smi. |
| 1059 __ bind(&array); | 1035 __ bind(&array); |
| 1060 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 1036 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 1061 | 1037 |
| 1062 // Check the key against the length in the array. | 1038 // Check the key against the length in the array. |
| 1063 __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 1039 __ LoadP(ip, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 1064 __ cmp(key, Operand(ip)); | 1040 __ cmpl(key, ip); |
| 1065 __ b(hs, &extra); | 1041 __ bge(&extra); |
| 1066 | 1042 |
| 1067 KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, | 1043 KeyedStoreGenerateGenericHelper( |
| 1068 &slow, kCheckMap, kDontIncrementLength, | 1044 masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength, |
| 1069 value, key, receiver, receiver_map, | 1045 value, key, receiver, receiver_map, elements_map, elements); |
| 1070 elements_map, elements); | |
| 1071 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow, | 1046 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow, |
| 1072 &slow, kDontCheckMap, kIncrementLength, | 1047 &slow, kDontCheckMap, kIncrementLength, value, |
| 1073 value, key, receiver, receiver_map, | 1048 key, receiver, receiver_map, elements_map, |
| 1074 elements_map, elements); | 1049 elements); |
| 1075 } | 1050 } |
| 1076 | 1051 |
| 1077 | 1052 |
| 1078 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { | 1053 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { |
| 1079 Register receiver = ReceiverRegister(); | 1054 Register receiver = ReceiverRegister(); |
| 1080 Register name = NameRegister(); | 1055 Register name = NameRegister(); |
| 1081 DCHECK(receiver.is(r1)); | 1056 DCHECK(receiver.is(r4)); |
| 1082 DCHECK(name.is(r2)); | 1057 DCHECK(name.is(r5)); |
| 1083 DCHECK(ValueRegister().is(r0)); | 1058 DCHECK(ValueRegister().is(r3)); |
| 1084 | 1059 |
| 1085 // Get the receiver from the stack and probe the stub cache. | 1060 // Get the receiver from the stack and probe the stub cache. |
| 1086 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( | 1061 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( |
| 1087 Code::ComputeHandlerFlags(Code::STORE_IC)); | 1062 Code::ComputeHandlerFlags(Code::STORE_IC)); |
| 1088 | 1063 |
| 1089 masm->isolate()->stub_cache()->GenerateProbe( | 1064 masm->isolate()->stub_cache()->GenerateProbe(masm, flags, receiver, name, r6, |
| 1090 masm, flags, receiver, name, r3, r4, r5, r6); | 1065 r7, r8, r9); |
| 1091 | 1066 |
| 1092 // Cache miss: Jump to runtime. | 1067 // Cache miss: Jump to runtime. |
| 1093 GenerateMiss(masm); | 1068 GenerateMiss(masm); |
| 1094 } | 1069 } |
| 1095 | 1070 |
| 1096 | 1071 |
| 1097 void StoreIC::GenerateMiss(MacroAssembler* masm) { | 1072 void StoreIC::GenerateMiss(MacroAssembler* masm) { |
| 1098 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); | 1073 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); |
| 1099 | 1074 |
| 1100 // Perform tail call to the entry. | 1075 // Perform tail call to the entry. |
| 1101 ExternalReference ref = | 1076 ExternalReference ref = |
| 1102 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate()); | 1077 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate()); |
| 1103 __ TailCallExternalReference(ref, 3, 1); | 1078 __ TailCallExternalReference(ref, 3, 1); |
| 1104 } | 1079 } |
| 1105 | 1080 |
| 1106 | 1081 |
| 1107 void StoreIC::GenerateNormal(MacroAssembler* masm) { | 1082 void StoreIC::GenerateNormal(MacroAssembler* masm) { |
| 1108 Label miss; | 1083 Label miss; |
| 1109 Register receiver = ReceiverRegister(); | 1084 Register receiver = ReceiverRegister(); |
| 1110 Register name = NameRegister(); | 1085 Register name = NameRegister(); |
| 1111 Register value = ValueRegister(); | 1086 Register value = ValueRegister(); |
| 1112 Register dictionary = r3; | 1087 Register dictionary = r6; |
| 1113 DCHECK(receiver.is(r1)); | 1088 DCHECK(receiver.is(r4)); |
| 1114 DCHECK(name.is(r2)); | 1089 DCHECK(name.is(r5)); |
| 1115 DCHECK(value.is(r0)); | 1090 DCHECK(value.is(r3)); |
| 1116 | 1091 |
| 1117 __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 1092 __ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
| 1118 | 1093 |
| 1119 GenerateDictionaryStore(masm, &miss, dictionary, name, value, r4, r5); | 1094 GenerateDictionaryStore(masm, &miss, dictionary, name, value, r7, r8); |
| 1120 Counters* counters = masm->isolate()->counters(); | 1095 Counters* counters = masm->isolate()->counters(); |
| 1121 __ IncrementCounter(counters->store_normal_hit(), | 1096 __ IncrementCounter(counters->store_normal_hit(), 1, r7, r8); |
| 1122 1, r4, r5); | |
| 1123 __ Ret(); | 1097 __ Ret(); |
| 1124 | 1098 |
| 1125 __ bind(&miss); | 1099 __ bind(&miss); |
| 1126 __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5); | 1100 __ IncrementCounter(counters->store_normal_miss(), 1, r7, r8); |
| 1127 GenerateMiss(masm); | 1101 GenerateMiss(masm); |
| 1128 } | 1102 } |
| 1129 | 1103 |
| 1130 | 1104 |
| 1131 void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, | 1105 void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, |
| 1132 StrictMode strict_mode) { | 1106 StrictMode strict_mode) { |
| 1133 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); | 1107 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); |
| 1134 | 1108 |
| 1135 __ mov(r0, Operand(Smi::FromInt(strict_mode))); | 1109 __ LoadSmiLiteral(r3, Smi::FromInt(strict_mode)); |
| 1136 __ Push(r0); | 1110 __ Push(r3); |
| 1137 | 1111 |
| 1138 // Do tail-call to runtime routine. | 1112 // Do tail-call to runtime routine. |
| 1139 __ TailCallRuntime(Runtime::kSetProperty, 4, 1); | 1113 __ TailCallRuntime(Runtime::kSetProperty, 4, 1); |
| 1140 } | 1114 } |
| 1141 | 1115 |
| 1142 | 1116 |
| 1143 #undef __ | 1117 #undef __ |
| 1144 | 1118 |
| 1145 | 1119 |
| 1146 Condition CompareIC::ComputeCondition(Token::Value op) { | 1120 Condition CompareIC::ComputeCondition(Token::Value op) { |
| (...skipping 21 matching lines...) Expand all Loading... |
| 1168 Address cmp_instruction_address = | 1142 Address cmp_instruction_address = |
| 1169 Assembler::return_address_from_call_start(address); | 1143 Assembler::return_address_from_call_start(address); |
| 1170 | 1144 |
| 1171 // If the instruction following the call is not a cmp rx, #yyy, nothing | 1145 // If the instruction following the call is not a cmp rx, #yyy, nothing |
| 1172 // was inlined. | 1146 // was inlined. |
| 1173 Instr instr = Assembler::instr_at(cmp_instruction_address); | 1147 Instr instr = Assembler::instr_at(cmp_instruction_address); |
| 1174 return Assembler::IsCmpImmediate(instr); | 1148 return Assembler::IsCmpImmediate(instr); |
| 1175 } | 1149 } |
| 1176 | 1150 |
| 1177 | 1151 |
| 1152 // |
| 1153 // This code is paired with the JumpPatchSite class in full-codegen-ppc.cc |
| 1154 // |
| 1178 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { | 1155 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { |
| 1179 Address cmp_instruction_address = | 1156 Address cmp_instruction_address = |
| 1180 Assembler::return_address_from_call_start(address); | 1157 Assembler::return_address_from_call_start(address); |
| 1181 | 1158 |
| 1182 // If the instruction following the call is not a cmp rx, #yyy, nothing | 1159 // If the instruction following the call is not a cmp rx, #yyy, nothing |
| 1183 // was inlined. | 1160 // was inlined. |
| 1184 Instr instr = Assembler::instr_at(cmp_instruction_address); | 1161 Instr instr = Assembler::instr_at(cmp_instruction_address); |
| 1185 if (!Assembler::IsCmpImmediate(instr)) { | 1162 if (!Assembler::IsCmpImmediate(instr)) { |
| 1186 return; | 1163 return; |
| 1187 } | 1164 } |
| 1188 | 1165 |
| 1189 // The delta to the start of the map check instruction and the | 1166 // The delta to the start of the map check instruction and the |
| 1190 // condition code uses at the patched jump. | 1167 // condition code uses at the patched jump. |
| 1191 int delta = Assembler::GetCmpImmediateRawImmediate(instr); | 1168 int delta = Assembler::GetCmpImmediateRawImmediate(instr); |
| 1192 delta += | 1169 delta += Assembler::GetCmpImmediateRegister(instr).code() * kOff16Mask; |
| 1193 Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask; | |
| 1194 // If the delta is 0 the instruction is cmp r0, #0 which also signals that | 1170 // If the delta is 0 the instruction is cmp r0, #0 which also signals that |
| 1195 // nothing was inlined. | 1171 // nothing was inlined. |
| 1196 if (delta == 0) { | 1172 if (delta == 0) { |
| 1197 return; | 1173 return; |
| 1198 } | 1174 } |
| 1199 | 1175 |
| 1200 if (FLAG_trace_ic) { | 1176 if (FLAG_trace_ic) { |
| 1201 PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", | 1177 PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", address, |
| 1202 address, cmp_instruction_address, delta); | 1178 cmp_instruction_address, delta); |
| 1203 } | 1179 } |
| 1204 | 1180 |
| 1205 Address patch_address = | 1181 Address patch_address = |
| 1206 cmp_instruction_address - delta * Instruction::kInstrSize; | 1182 cmp_instruction_address - delta * Instruction::kInstrSize; |
| 1207 Instr instr_at_patch = Assembler::instr_at(patch_address); | 1183 Instr instr_at_patch = Assembler::instr_at(patch_address); |
| 1208 Instr branch_instr = | 1184 Instr branch_instr = |
| 1209 Assembler::instr_at(patch_address + Instruction::kInstrSize); | 1185 Assembler::instr_at(patch_address + Instruction::kInstrSize); |
| 1210 // This is patching a conditional "jump if not smi/jump if smi" site. | 1186 // This is patching a conditional "jump if not smi/jump if smi" site. |
| 1211 // Enabling by changing from | 1187 // Enabling by changing from |
| 1212 // cmp rx, rx | 1188 // cmp cr0, rx, rx |
| 1213 // b eq/ne, <target> | |
| 1214 // to | 1189 // to |
| 1215 // tst rx, #kSmiTagMask | 1190 // rlwinm(r0, value, 0, 31, 31, SetRC); |
| 1216 // b ne/eq, <target> | 1191 // bc(label, BT/BF, 2) |
| 1217 // and vice-versa to be disabled again. | 1192 // and vice-versa to be disabled again. |
| 1218 CodePatcher patcher(patch_address, 2); | 1193 CodePatcher patcher(patch_address, 2); |
| 1219 Register reg = Assembler::GetRn(instr_at_patch); | 1194 Register reg = Assembler::GetRA(instr_at_patch); |
| 1220 if (check == ENABLE_INLINED_SMI_CHECK) { | 1195 if (check == ENABLE_INLINED_SMI_CHECK) { |
| 1221 DCHECK(Assembler::IsCmpRegister(instr_at_patch)); | 1196 DCHECK(Assembler::IsCmpRegister(instr_at_patch)); |
| 1222 DCHECK_EQ(Assembler::GetRn(instr_at_patch).code(), | 1197 DCHECK_EQ(Assembler::GetRA(instr_at_patch).code(), |
| 1223 Assembler::GetRm(instr_at_patch).code()); | 1198 Assembler::GetRB(instr_at_patch).code()); |
| 1224 patcher.masm()->tst(reg, Operand(kSmiTagMask)); | 1199 patcher.masm()->TestIfSmi(reg, r0); |
| 1225 } else { | 1200 } else { |
| 1226 DCHECK(check == DISABLE_INLINED_SMI_CHECK); | 1201 DCHECK(check == DISABLE_INLINED_SMI_CHECK); |
| 1227 DCHECK(Assembler::IsTstImmediate(instr_at_patch)); | 1202 #if V8_TARGET_ARCH_PPC64 |
| 1228 patcher.masm()->cmp(reg, reg); | 1203 DCHECK(Assembler::IsRldicl(instr_at_patch)); |
| 1204 #else |
| 1205 DCHECK(Assembler::IsRlwinm(instr_at_patch)); |
| 1206 #endif |
| 1207 patcher.masm()->cmp(reg, reg, cr0); |
| 1229 } | 1208 } |
| 1230 DCHECK(Assembler::IsBranch(branch_instr)); | 1209 DCHECK(Assembler::IsBranch(branch_instr)); |
| 1210 |
| 1211 // Invert the logic of the branch |
| 1231 if (Assembler::GetCondition(branch_instr) == eq) { | 1212 if (Assembler::GetCondition(branch_instr) == eq) { |
| 1232 patcher.EmitCondition(ne); | 1213 patcher.EmitCondition(ne); |
| 1233 } else { | 1214 } else { |
| 1234 DCHECK(Assembler::GetCondition(branch_instr) == ne); | 1215 DCHECK(Assembler::GetCondition(branch_instr) == ne); |
| 1235 patcher.EmitCondition(eq); | 1216 patcher.EmitCondition(eq); |
| 1236 } | 1217 } |
| 1237 } | 1218 } |
| 1219 } |
| 1220 } // namespace v8::internal |
| 1238 | 1221 |
| 1239 | 1222 #endif // V8_TARGET_ARCH_PPC |
| 1240 } } // namespace v8::internal | |
| 1241 | |
| 1242 #endif // V8_TARGET_ARCH_ARM | |
| OLD | NEW |