OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // |
| 3 // Copyright IBM Corp. 2012, 2013. All rights reserved. |
| 4 // |
2 // Use of this source code is governed by a BSD-style license that can be | 5 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 6 // found in the LICENSE file. |
4 | 7 |
5 #include "src/v8.h" | 8 #include "src/v8.h" |
6 | 9 |
7 #if V8_TARGET_ARCH_ARM | 10 #if V8_TARGET_ARCH_PPC |
8 | 11 |
9 #include "src/codegen.h" | 12 #include "src/codegen.h" |
10 #include "src/ic/ic.h" | 13 #include "src/ic/ic.h" |
11 #include "src/ic/ic-compiler.h" | 14 #include "src/ic/ic-compiler.h" |
12 #include "src/ic/stub-cache.h" | 15 #include "src/ic/stub-cache.h" |
13 | 16 |
14 namespace v8 { | 17 namespace v8 { |
15 namespace internal { | 18 namespace internal { |
16 | 19 |
17 | 20 |
18 // ---------------------------------------------------------------------------- | 21 // ---------------------------------------------------------------------------- |
19 // Static IC stub generators. | 22 // Static IC stub generators. |
20 // | 23 // |
21 | 24 |
22 #define __ ACCESS_MASM(masm) | 25 #define __ ACCESS_MASM(masm) |
23 | 26 |
24 | 27 |
25 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type, | 28 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type, |
26 Label* global_object) { | 29 Label* global_object) { |
27 // Register usage: | 30 // Register usage: |
28 // type: holds the receiver instance type on entry. | 31 // type: holds the receiver instance type on entry. |
29 __ cmp(type, Operand(JS_GLOBAL_OBJECT_TYPE)); | 32 __ cmpi(type, Operand(JS_GLOBAL_OBJECT_TYPE)); |
30 __ b(eq, global_object); | 33 __ beq(global_object); |
31 __ cmp(type, Operand(JS_BUILTINS_OBJECT_TYPE)); | 34 __ cmpi(type, Operand(JS_BUILTINS_OBJECT_TYPE)); |
32 __ b(eq, global_object); | 35 __ beq(global_object); |
33 __ cmp(type, Operand(JS_GLOBAL_PROXY_TYPE)); | 36 __ cmpi(type, Operand(JS_GLOBAL_PROXY_TYPE)); |
34 __ b(eq, global_object); | 37 __ beq(global_object); |
35 } | 38 } |
36 | 39 |
37 | 40 |
38 // Helper function used from LoadIC GenerateNormal. | 41 // Helper function used from LoadIC GenerateNormal. |
39 // | 42 // |
40 // elements: Property dictionary. It is not clobbered if a jump to the miss | 43 // elements: Property dictionary. It is not clobbered if a jump to the miss |
41 // label is done. | 44 // label is done. |
42 // name: Property name. It is not clobbered if a jump to the miss label is | 45 // name: Property name. It is not clobbered if a jump to the miss label is |
43 // done | 46 // done |
44 // result: Register for the result. It is only updated if a jump to the miss | 47 // result: Register for the result. It is only updated if a jump to the miss |
(...skipping 17 matching lines...) Expand all Loading... |
62 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements, | 65 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements, |
63 name, scratch1, scratch2); | 66 name, scratch1, scratch2); |
64 | 67 |
65 // If probing finds an entry check that the value is a normal | 68 // If probing finds an entry check that the value is a normal |
66 // property. | 69 // property. |
67 __ bind(&done); // scratch2 == elements + 4 * index | 70 __ bind(&done); // scratch2 == elements + 4 * index |
68 const int kElementsStartOffset = | 71 const int kElementsStartOffset = |
69 NameDictionary::kHeaderSize + | 72 NameDictionary::kHeaderSize + |
70 NameDictionary::kElementsStartIndex * kPointerSize; | 73 NameDictionary::kElementsStartIndex * kPointerSize; |
71 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; | 74 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; |
72 __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); | 75 __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); |
73 __ tst(scratch1, Operand(PropertyDetails::TypeField::kMask << kSmiTagSize)); | 76 __ mr(r0, scratch2); |
74 __ b(ne, miss); | 77 __ LoadSmiLiteral(scratch2, Smi::FromInt(PropertyDetails::TypeField::kMask)); |
| 78 __ and_(scratch2, scratch1, scratch2, SetRC); |
| 79 __ bne(miss, cr0); |
| 80 __ mr(scratch2, r0); |
75 | 81 |
76 // Get the value at the masked, scaled index and return. | 82 // Get the value at the masked, scaled index and return. |
77 __ ldr(result, | 83 __ LoadP(result, |
78 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize)); | 84 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize)); |
79 } | 85 } |
80 | 86 |
81 | 87 |
82 // Helper function used from StoreIC::GenerateNormal. | 88 // Helper function used from StoreIC::GenerateNormal. |
83 // | 89 // |
84 // elements: Property dictionary. It is not clobbered if a jump to the miss | 90 // elements: Property dictionary. It is not clobbered if a jump to the miss |
85 // label is done. | 91 // label is done. |
86 // name: Property name. It is not clobbered if a jump to the miss label is | 92 // name: Property name. It is not clobbered if a jump to the miss label is |
87 // done | 93 // done |
88 // value: The value to store. | 94 // value: The value to store. |
(...skipping 15 matching lines...) Expand all Loading... |
104 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements, | 110 NameDictionaryLookupStub::GeneratePositiveLookup(masm, miss, &done, elements, |
105 name, scratch1, scratch2); | 111 name, scratch1, scratch2); |
106 | 112 |
107 // If probing finds an entry in the dictionary check that the value | 113 // If probing finds an entry in the dictionary check that the value |
108 // is a normal property that is not read only. | 114 // is a normal property that is not read only. |
109 __ bind(&done); // scratch2 == elements + 4 * index | 115 __ bind(&done); // scratch2 == elements + 4 * index |
110 const int kElementsStartOffset = | 116 const int kElementsStartOffset = |
111 NameDictionary::kHeaderSize + | 117 NameDictionary::kHeaderSize + |
112 NameDictionary::kElementsStartIndex * kPointerSize; | 118 NameDictionary::kElementsStartIndex * kPointerSize; |
113 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; | 119 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; |
114 const int kTypeAndReadOnlyMask = | 120 int kTypeAndReadOnlyMask = |
115 (PropertyDetails::TypeField::kMask | | 121 PropertyDetails::TypeField::kMask | |
116 PropertyDetails::AttributesField::encode(READ_ONLY)) | 122 PropertyDetails::AttributesField::encode(READ_ONLY); |
117 << kSmiTagSize; | 123 __ LoadP(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); |
118 __ ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); | 124 __ mr(r0, scratch2); |
119 __ tst(scratch1, Operand(kTypeAndReadOnlyMask)); | 125 __ LoadSmiLiteral(scratch2, Smi::FromInt(kTypeAndReadOnlyMask)); |
120 __ b(ne, miss); | 126 __ and_(scratch2, scratch1, scratch2, SetRC); |
| 127 __ bne(miss, cr0); |
| 128 __ mr(scratch2, r0); |
121 | 129 |
122 // Store the value at the masked, scaled index and return. | 130 // Store the value at the masked, scaled index and return. |
123 const int kValueOffset = kElementsStartOffset + kPointerSize; | 131 const int kValueOffset = kElementsStartOffset + kPointerSize; |
124 __ add(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag)); | 132 __ addi(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag)); |
125 __ str(value, MemOperand(scratch2)); | 133 __ StoreP(value, MemOperand(scratch2)); |
126 | 134 |
127 // Update the write barrier. Make sure not to clobber the value. | 135 // Update the write barrier. Make sure not to clobber the value. |
128 __ mov(scratch1, value); | 136 __ mr(scratch1, value); |
129 __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved, | 137 __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved, |
130 kDontSaveFPRegs); | 138 kDontSaveFPRegs); |
131 } | 139 } |
132 | 140 |
133 | 141 |
134 // Checks the receiver for special cases (value type, slow case bits). | 142 // Checks the receiver for special cases (value type, slow case bits). |
135 // Falls through for regular JS object. | 143 // Falls through for regular JS object. |
136 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, | 144 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, |
137 Register receiver, Register map, | 145 Register receiver, Register map, |
138 Register scratch, | 146 Register scratch, |
139 int interceptor_bit, Label* slow) { | 147 int interceptor_bit, Label* slow) { |
140 // Check that the object isn't a smi. | 148 // Check that the object isn't a smi. |
141 __ JumpIfSmi(receiver, slow); | 149 __ JumpIfSmi(receiver, slow); |
142 // Get the map of the receiver. | 150 // Get the map of the receiver. |
143 __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 151 __ LoadP(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
144 // Check bit field. | 152 // Check bit field. |
145 __ ldrb(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); | 153 __ lbz(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); |
146 __ tst(scratch, | 154 DCHECK(((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit)) < 0x8000); |
147 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit))); | 155 __ andi(r0, scratch, |
148 __ b(ne, slow); | 156 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit))); |
| 157 __ bne(slow, cr0); |
149 // Check that the object is some kind of JS object EXCEPT JS Value type. | 158 // Check that the object is some kind of JS object EXCEPT JS Value type. |
150 // In the case that the object is a value-wrapper object, | 159 // In the case that the object is a value-wrapper object, |
151 // we enter the runtime system to make sure that indexing into string | 160 // we enter the runtime system to make sure that indexing into string |
152 // objects work as intended. | 161 // objects work as intended. |
153 DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE); | 162 DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE); |
154 __ ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 163 __ lbz(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
155 __ cmp(scratch, Operand(JS_OBJECT_TYPE)); | 164 __ cmpi(scratch, Operand(JS_OBJECT_TYPE)); |
156 __ b(lt, slow); | 165 __ blt(slow); |
157 } | 166 } |
158 | 167 |
159 | 168 |
160 // Loads an indexed element from a fast case array. | 169 // Loads an indexed element from a fast case array. |
161 // If not_fast_array is NULL, doesn't perform the elements map check. | 170 // If not_fast_array is NULL, doesn't perform the elements map check. |
162 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver, | 171 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver, |
163 Register key, Register elements, | 172 Register key, Register elements, |
164 Register scratch1, Register scratch2, | 173 Register scratch1, Register scratch2, |
165 Register result, Label* not_fast_array, | 174 Register result, Label* not_fast_array, |
166 Label* out_of_range) { | 175 Label* out_of_range) { |
(...skipping 12 matching lines...) Expand all Loading... |
179 // Unchanged on bailout so 'receiver' and 'key' can be safely | 188 // Unchanged on bailout so 'receiver' and 'key' can be safely |
180 // used by further computation. | 189 // used by further computation. |
181 // | 190 // |
182 // Scratch registers: | 191 // Scratch registers: |
183 // | 192 // |
184 // scratch1 - used to hold elements map and elements length. | 193 // scratch1 - used to hold elements map and elements length. |
185 // Holds the elements map if not_fast_array branch is taken. | 194 // Holds the elements map if not_fast_array branch is taken. |
186 // | 195 // |
187 // scratch2 - used to hold the loaded value. | 196 // scratch2 - used to hold the loaded value. |
188 | 197 |
189 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 198 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
190 if (not_fast_array != NULL) { | 199 if (not_fast_array != NULL) { |
191 // Check that the object is in fast mode and writable. | 200 // Check that the object is in fast mode and writable. |
192 __ ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); | 201 __ LoadP(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); |
193 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); | 202 __ LoadRoot(ip, Heap::kFixedArrayMapRootIndex); |
194 __ cmp(scratch1, ip); | 203 __ cmp(scratch1, ip); |
195 __ b(ne, not_fast_array); | 204 __ bne(not_fast_array); |
196 } else { | 205 } else { |
197 __ AssertFastElements(elements); | 206 __ AssertFastElements(elements); |
198 } | 207 } |
199 // Check that the key (index) is within bounds. | 208 // Check that the key (index) is within bounds. |
200 __ ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset)); | 209 __ LoadP(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
201 __ cmp(key, Operand(scratch1)); | 210 __ cmpl(key, scratch1); |
202 __ b(hs, out_of_range); | 211 __ bge(out_of_range); |
203 // Fast case: Do the load. | 212 // Fast case: Do the load. |
204 __ add(scratch1, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 213 __ addi(scratch1, elements, |
205 __ ldr(scratch2, MemOperand::PointerAddressFromSmiKey(scratch1, key)); | 214 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 215 // The key is a smi. |
| 216 __ SmiToPtrArrayOffset(scratch2, key); |
| 217 __ LoadPX(scratch2, MemOperand(scratch2, scratch1)); |
206 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 218 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
207 __ cmp(scratch2, ip); | 219 __ cmp(scratch2, ip); |
208 // In case the loaded value is the_hole we have to consult GetProperty | 220 // In case the loaded value is the_hole we have to consult GetProperty |
209 // to ensure the prototype chain is searched. | 221 // to ensure the prototype chain is searched. |
210 __ b(eq, out_of_range); | 222 __ beq(out_of_range); |
211 __ mov(result, scratch2); | 223 __ mr(result, scratch2); |
212 } | 224 } |
213 | 225 |
214 | 226 |
215 // Checks whether a key is an array index string or a unique name. | 227 // Checks whether a key is an array index string or a unique name. |
216 // Falls through if a key is a unique name. | 228 // Falls through if a key is a unique name. |
217 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key, | 229 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key, |
218 Register map, Register hash, | 230 Register map, Register hash, |
219 Label* index_string, Label* not_unique) { | 231 Label* index_string, Label* not_unique) { |
220 // The key is not a smi. | 232 // The key is not a smi. |
221 Label unique; | 233 Label unique; |
222 // Is it a name? | 234 // Is it a name? |
223 __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE); | 235 __ CompareObjectType(key, map, hash, LAST_UNIQUE_NAME_TYPE); |
224 __ b(hi, not_unique); | 236 __ bgt(not_unique); |
225 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); | 237 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); |
226 __ b(eq, &unique); | 238 __ beq(&unique); |
227 | 239 |
228 // Is the string an array index, with cached numeric value? | 240 // Is the string an array index, with cached numeric value? |
229 __ ldr(hash, FieldMemOperand(key, Name::kHashFieldOffset)); | 241 __ lwz(hash, FieldMemOperand(key, Name::kHashFieldOffset)); |
230 __ tst(hash, Operand(Name::kContainsCachedArrayIndexMask)); | 242 __ mov(r8, Operand(Name::kContainsCachedArrayIndexMask)); |
231 __ b(eq, index_string); | 243 __ and_(r0, hash, r8, SetRC); |
| 244 __ beq(index_string, cr0); |
232 | 245 |
233 // Is the string internalized? We know it's a string, so a single | 246 // Is the string internalized? We know it's a string, so a single |
234 // bit test is enough. | 247 // bit test is enough. |
235 // map: key map | 248 // map: key map |
236 __ ldrb(hash, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 249 __ lbz(hash, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
237 STATIC_ASSERT(kInternalizedTag == 0); | 250 STATIC_ASSERT(kInternalizedTag == 0); |
238 __ tst(hash, Operand(kIsNotInternalizedMask)); | 251 __ andi(r0, hash, Operand(kIsNotInternalizedMask)); |
239 __ b(ne, not_unique); | 252 __ bne(not_unique, cr0); |
240 | 253 |
241 __ bind(&unique); | 254 __ bind(&unique); |
242 } | 255 } |
243 | 256 |
244 | 257 |
245 void LoadIC::GenerateNormal(MacroAssembler* masm) { | 258 void LoadIC::GenerateNormal(MacroAssembler* masm) { |
246 Register dictionary = r0; | 259 Register dictionary = r3; |
247 DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister())); | 260 DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister())); |
248 DCHECK(!dictionary.is(LoadDescriptor::NameRegister())); | 261 DCHECK(!dictionary.is(LoadDescriptor::NameRegister())); |
249 | 262 |
250 Label slow; | 263 Label slow; |
251 | 264 |
252 __ ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(), | 265 __ LoadP(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(), |
253 JSObject::kPropertiesOffset)); | 266 JSObject::kPropertiesOffset)); |
254 GenerateDictionaryLoad(masm, &slow, dictionary, | 267 GenerateDictionaryLoad(masm, &slow, dictionary, |
255 LoadDescriptor::NameRegister(), r0, r3, r4); | 268 LoadDescriptor::NameRegister(), r3, r6, r7); |
256 __ Ret(); | 269 __ Ret(); |
257 | 270 |
258 // Dictionary load failed, go slow (but don't miss). | 271 // Dictionary load failed, go slow (but don't miss). |
259 __ bind(&slow); | 272 __ bind(&slow); |
260 GenerateRuntimeGetProperty(masm); | 273 GenerateRuntimeGetProperty(masm); |
261 } | 274 } |
262 | 275 |
263 | 276 |
264 // A register that isn't one of the parameters to the load ic. | 277 // A register that isn't one of the parameters to the load ic. |
265 static const Register LoadIC_TempRegister() { return r3; } | 278 static const Register LoadIC_TempRegister() { return r6; } |
266 | 279 |
267 | 280 |
268 void LoadIC::GenerateMiss(MacroAssembler* masm) { | 281 void LoadIC::GenerateMiss(MacroAssembler* masm) { |
269 // The return address is in lr. | 282 // The return address is in lr. |
270 Isolate* isolate = masm->isolate(); | 283 Isolate* isolate = masm->isolate(); |
271 | 284 |
272 __ IncrementCounter(isolate->counters()->load_miss(), 1, r3, r4); | 285 __ IncrementCounter(isolate->counters()->load_miss(), 1, r6, r7); |
273 | 286 |
274 __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister()); | 287 __ mr(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister()); |
275 __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister()); | 288 __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister()); |
276 | 289 |
277 // Perform tail call to the entry. | 290 // Perform tail call to the entry. |
278 ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate); | 291 ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate); |
279 __ TailCallExternalReference(ref, 2, 1); | 292 __ TailCallExternalReference(ref, 2, 1); |
280 } | 293 } |
281 | 294 |
282 | 295 |
283 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { | 296 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { |
284 // The return address is in lr. | 297 // The return address is in lr. |
285 | 298 |
286 __ mov(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister()); | 299 __ mr(LoadIC_TempRegister(), LoadDescriptor::ReceiverRegister()); |
287 __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister()); | 300 __ Push(LoadIC_TempRegister(), LoadDescriptor::NameRegister()); |
288 | 301 |
289 __ TailCallRuntime(Runtime::kGetProperty, 2, 1); | 302 __ TailCallRuntime(Runtime::kGetProperty, 2, 1); |
290 } | 303 } |
291 | 304 |
292 | 305 |
293 static MemOperand GenerateMappedArgumentsLookup( | 306 static MemOperand GenerateMappedArgumentsLookup( |
294 MacroAssembler* masm, Register object, Register key, Register scratch1, | 307 MacroAssembler* masm, Register object, Register key, Register scratch1, |
295 Register scratch2, Register scratch3, Label* unmapped_case, | 308 Register scratch2, Register scratch3, Label* unmapped_case, |
296 Label* slow_case) { | 309 Label* slow_case) { |
297 Heap* heap = masm->isolate()->heap(); | 310 Heap* heap = masm->isolate()->heap(); |
298 | 311 |
299 // Check that the receiver is a JSObject. Because of the map check | 312 // Check that the receiver is a JSObject. Because of the map check |
300 // later, we do not need to check for interceptors or whether it | 313 // later, we do not need to check for interceptors or whether it |
301 // requires access checks. | 314 // requires access checks. |
302 __ JumpIfSmi(object, slow_case); | 315 __ JumpIfSmi(object, slow_case); |
303 // Check that the object is some kind of JSObject. | 316 // Check that the object is some kind of JSObject. |
304 __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE); | 317 __ CompareObjectType(object, scratch1, scratch2, FIRST_JS_RECEIVER_TYPE); |
305 __ b(lt, slow_case); | 318 __ blt(slow_case); |
306 | 319 |
307 // Check that the key is a positive smi. | 320 // Check that the key is a positive smi. |
308 __ tst(key, Operand(0x80000001)); | 321 __ mov(scratch1, Operand(0x80000001)); |
309 __ b(ne, slow_case); | 322 __ and_(r0, key, scratch1, SetRC); |
| 323 __ bne(slow_case, cr0); |
310 | 324 |
311 // Load the elements into scratch1 and check its map. | 325 // Load the elements into scratch1 and check its map. |
312 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map()); | 326 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map()); |
313 __ ldr(scratch1, FieldMemOperand(object, JSObject::kElementsOffset)); | 327 __ LoadP(scratch1, FieldMemOperand(object, JSObject::kElementsOffset)); |
314 __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK); | 328 __ CheckMap(scratch1, scratch2, arguments_map, slow_case, DONT_DO_SMI_CHECK); |
315 | 329 |
316 // Check if element is in the range of mapped arguments. If not, jump | 330 // Check if element is in the range of mapped arguments. If not, jump |
317 // to the unmapped lookup with the parameter map in scratch1. | 331 // to the unmapped lookup with the parameter map in scratch1. |
318 __ ldr(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset)); | 332 __ LoadP(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset)); |
319 __ sub(scratch2, scratch2, Operand(Smi::FromInt(2))); | 333 __ SubSmiLiteral(scratch2, scratch2, Smi::FromInt(2), r0); |
320 __ cmp(key, Operand(scratch2)); | 334 __ cmpl(key, scratch2); |
321 __ b(cs, unmapped_case); | 335 __ bge(unmapped_case); |
322 | 336 |
323 // Load element index and check whether it is the hole. | 337 // Load element index and check whether it is the hole. |
324 const int kOffset = | 338 const int kOffset = |
325 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag; | 339 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag; |
326 | 340 |
327 __ mov(scratch3, Operand(kPointerSize >> 1)); | 341 __ SmiToPtrArrayOffset(scratch3, key); |
328 __ mul(scratch3, key, scratch3); | 342 __ addi(scratch3, scratch3, Operand(kOffset)); |
329 __ add(scratch3, scratch3, Operand(kOffset)); | |
330 | 343 |
331 __ ldr(scratch2, MemOperand(scratch1, scratch3)); | 344 __ LoadPX(scratch2, MemOperand(scratch1, scratch3)); |
332 __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex); | 345 __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex); |
333 __ cmp(scratch2, scratch3); | 346 __ cmp(scratch2, scratch3); |
334 __ b(eq, unmapped_case); | 347 __ beq(unmapped_case); |
335 | 348 |
336 // Load value from context and return it. We can reuse scratch1 because | 349 // Load value from context and return it. We can reuse scratch1 because |
337 // we do not jump to the unmapped lookup (which requires the parameter | 350 // we do not jump to the unmapped lookup (which requires the parameter |
338 // map in scratch1). | 351 // map in scratch1). |
339 __ ldr(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); | 352 __ LoadP(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); |
340 __ mov(scratch3, Operand(kPointerSize >> 1)); | 353 __ SmiToPtrArrayOffset(scratch3, scratch2); |
341 __ mul(scratch3, scratch2, scratch3); | 354 __ addi(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag)); |
342 __ add(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag)); | |
343 return MemOperand(scratch1, scratch3); | 355 return MemOperand(scratch1, scratch3); |
344 } | 356 } |
345 | 357 |
346 | 358 |
347 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, | 359 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, |
348 Register key, | 360 Register key, |
349 Register parameter_map, | 361 Register parameter_map, |
350 Register scratch, | 362 Register scratch, |
351 Label* slow_case) { | 363 Label* slow_case) { |
352 // Element is in arguments backing store, which is referenced by the | 364 // Element is in arguments backing store, which is referenced by the |
353 // second element of the parameter_map. The parameter_map register | 365 // second element of the parameter_map. The parameter_map register |
354 // must be loaded with the parameter map of the arguments object and is | 366 // must be loaded with the parameter map of the arguments object and is |
355 // overwritten. | 367 // overwritten. |
356 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize; | 368 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize; |
357 Register backing_store = parameter_map; | 369 Register backing_store = parameter_map; |
358 __ ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); | 370 __ LoadP(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); |
359 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map()); | 371 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map()); |
360 __ CheckMap(backing_store, scratch, fixed_array_map, slow_case, | 372 __ CheckMap(backing_store, scratch, fixed_array_map, slow_case, |
361 DONT_DO_SMI_CHECK); | 373 DONT_DO_SMI_CHECK); |
362 __ ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); | 374 __ LoadP(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); |
363 __ cmp(key, Operand(scratch)); | 375 __ cmpl(key, scratch); |
364 __ b(cs, slow_case); | 376 __ bge(slow_case); |
365 __ mov(scratch, Operand(kPointerSize >> 1)); | 377 __ SmiToPtrArrayOffset(scratch, key); |
366 __ mul(scratch, key, scratch); | 378 __ addi(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
367 __ add(scratch, scratch, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
368 return MemOperand(backing_store, scratch); | 379 return MemOperand(backing_store, scratch); |
369 } | 380 } |
370 | 381 |
371 | 382 |
372 void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { | |
373 // The return address is in lr. | |
374 Register receiver = LoadDescriptor::ReceiverRegister(); | |
375 Register key = LoadDescriptor::NameRegister(); | |
376 DCHECK(receiver.is(r1)); | |
377 DCHECK(key.is(r2)); | |
378 | |
379 Label slow, notin; | |
380 MemOperand mapped_location = GenerateMappedArgumentsLookup( | |
381 masm, receiver, key, r0, r3, r4, ¬in, &slow); | |
382 __ ldr(r0, mapped_location); | |
383 __ Ret(); | |
384 __ bind(¬in); | |
385 // The unmapped lookup expects that the parameter map is in r0. | |
386 MemOperand unmapped_location = | |
387 GenerateUnmappedArgumentsLookup(masm, key, r0, r3, &slow); | |
388 __ ldr(r0, unmapped_location); | |
389 __ LoadRoot(r3, Heap::kTheHoleValueRootIndex); | |
390 __ cmp(r0, r3); | |
391 __ b(eq, &slow); | |
392 __ Ret(); | |
393 __ bind(&slow); | |
394 GenerateMiss(masm); | |
395 } | |
396 | |
397 | |
398 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { | 383 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { |
399 Register receiver = StoreDescriptor::ReceiverRegister(); | 384 Register receiver = StoreDescriptor::ReceiverRegister(); |
400 Register key = StoreDescriptor::NameRegister(); | 385 Register key = StoreDescriptor::NameRegister(); |
401 Register value = StoreDescriptor::ValueRegister(); | 386 Register value = StoreDescriptor::ValueRegister(); |
402 DCHECK(receiver.is(r1)); | 387 DCHECK(receiver.is(r4)); |
403 DCHECK(key.is(r2)); | 388 DCHECK(key.is(r5)); |
404 DCHECK(value.is(r0)); | 389 DCHECK(value.is(r3)); |
405 | 390 |
406 Label slow, notin; | 391 Label slow, notin; |
407 MemOperand mapped_location = GenerateMappedArgumentsLookup( | 392 MemOperand mapped_location = GenerateMappedArgumentsLookup( |
408 masm, receiver, key, r3, r4, r5, ¬in, &slow); | 393 masm, receiver, key, r6, r7, r8, ¬in, &slow); |
409 __ str(value, mapped_location); | 394 Register mapped_base = mapped_location.ra(); |
410 __ add(r6, r3, r5); | 395 Register mapped_offset = mapped_location.rb(); |
411 __ mov(r9, value); | 396 __ StorePX(value, mapped_location); |
412 __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs); | 397 __ add(r9, mapped_base, mapped_offset); |
| 398 __ mr(r11, value); |
| 399 __ RecordWrite(mapped_base, r9, r11, kLRHasNotBeenSaved, kDontSaveFPRegs); |
413 __ Ret(); | 400 __ Ret(); |
414 __ bind(¬in); | 401 __ bind(¬in); |
415 // The unmapped lookup expects that the parameter map is in r3. | 402 // The unmapped lookup expects that the parameter map is in r6. |
416 MemOperand unmapped_location = | 403 MemOperand unmapped_location = |
417 GenerateUnmappedArgumentsLookup(masm, key, r3, r4, &slow); | 404 GenerateUnmappedArgumentsLookup(masm, key, r6, r7, &slow); |
418 __ str(value, unmapped_location); | 405 Register unmapped_base = unmapped_location.ra(); |
419 __ add(r6, r3, r4); | 406 Register unmapped_offset = unmapped_location.rb(); |
420 __ mov(r9, value); | 407 __ StorePX(value, unmapped_location); |
421 __ RecordWrite(r3, r6, r9, kLRHasNotBeenSaved, kDontSaveFPRegs); | 408 __ add(r9, unmapped_base, unmapped_offset); |
| 409 __ mr(r11, value); |
| 410 __ RecordWrite(unmapped_base, r9, r11, kLRHasNotBeenSaved, kDontSaveFPRegs); |
422 __ Ret(); | 411 __ Ret(); |
423 __ bind(&slow); | 412 __ bind(&slow); |
424 GenerateMiss(masm); | 413 GenerateMiss(masm); |
425 } | 414 } |
426 | 415 |
427 | 416 |
428 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { | 417 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { |
429 // The return address is in lr. | 418 // The return address is in lr. |
430 Isolate* isolate = masm->isolate(); | 419 Isolate* isolate = masm->isolate(); |
431 | 420 |
432 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r3, r4); | 421 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, r6, r7); |
433 | 422 |
434 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister()); | 423 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister()); |
435 | 424 |
436 // Perform tail call to the entry. | 425 // Perform tail call to the entry. |
437 ExternalReference ref = | 426 ExternalReference ref = |
438 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate); | 427 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate); |
439 | 428 |
440 __ TailCallExternalReference(ref, 2, 1); | 429 __ TailCallExternalReference(ref, 2, 1); |
441 } | 430 } |
442 | 431 |
443 | 432 |
444 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { | 433 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { |
445 // The return address is in lr. | 434 // The return address is in lr. |
446 | 435 |
447 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister()); | 436 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister()); |
448 | 437 |
449 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); | 438 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); |
450 } | 439 } |
451 | 440 |
452 | 441 |
453 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { | 442 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { |
454 // The return address is in lr. | 443 // The return address is in lr. |
455 Label slow, check_name, index_smi, index_name, property_array_property; | 444 Label slow, check_name, index_smi, index_name, property_array_property; |
456 Label probe_dictionary, check_number_dictionary; | 445 Label probe_dictionary, check_number_dictionary; |
457 | 446 |
458 Register key = LoadDescriptor::NameRegister(); | 447 Register key = LoadDescriptor::NameRegister(); |
459 Register receiver = LoadDescriptor::ReceiverRegister(); | 448 Register receiver = LoadDescriptor::ReceiverRegister(); |
460 DCHECK(key.is(r2)); | 449 DCHECK(key.is(r5)); |
461 DCHECK(receiver.is(r1)); | 450 DCHECK(receiver.is(r4)); |
462 | 451 |
463 Isolate* isolate = masm->isolate(); | 452 Isolate* isolate = masm->isolate(); |
464 | 453 |
465 // Check that the key is a smi. | 454 // Check that the key is a smi. |
466 __ JumpIfNotSmi(key, &check_name); | 455 __ JumpIfNotSmi(key, &check_name); |
467 __ bind(&index_smi); | 456 __ bind(&index_smi); |
468 // Now the key is known to be a smi. This place is also jumped to from below | 457 // Now the key is known to be a smi. This place is also jumped to from below |
469 // where a numeric string is converted to a smi. | 458 // where a numeric string is converted to a smi. |
470 | 459 |
471 GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3, | 460 GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6, |
472 Map::kHasIndexedInterceptor, &slow); | 461 Map::kHasIndexedInterceptor, &slow); |
473 | 462 |
474 // Check the receiver's map to see if it has fast elements. | 463 // Check the receiver's map to see if it has fast elements. |
475 __ CheckFastElements(r0, r3, &check_number_dictionary); | 464 __ CheckFastElements(r3, r6, &check_number_dictionary); |
476 | 465 |
477 GenerateFastArrayLoad(masm, receiver, key, r0, r3, r4, r0, NULL, &slow); | 466 GenerateFastArrayLoad(masm, receiver, key, r3, r6, r7, r3, NULL, &slow); |
478 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r4, r3); | 467 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, r7, r6); |
479 __ Ret(); | 468 __ Ret(); |
480 | 469 |
481 __ bind(&check_number_dictionary); | 470 __ bind(&check_number_dictionary); |
482 __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 471 __ LoadP(r7, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
483 __ ldr(r3, FieldMemOperand(r4, JSObject::kMapOffset)); | 472 __ LoadP(r6, FieldMemOperand(r7, JSObject::kMapOffset)); |
484 | 473 |
485 // Check whether the elements is a number dictionary. | 474 // Check whether the elements is a number dictionary. |
486 // r3: elements map | 475 // r6: elements map |
487 // r4: elements | 476 // r7: elements |
488 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); | 477 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); |
489 __ cmp(r3, ip); | 478 __ cmp(r6, ip); |
490 __ b(ne, &slow); | 479 __ bne(&slow); |
491 __ SmiUntag(r0, key); | 480 __ SmiUntag(r3, key); |
492 __ LoadFromNumberDictionary(&slow, r4, key, r0, r0, r3, r5); | 481 __ LoadFromNumberDictionary(&slow, r7, key, r3, r3, r6, r8); |
493 __ Ret(); | 482 __ Ret(); |
494 | 483 |
495 // Slow case, key and receiver still in r2 and r1. | 484 // Slow case, key and receiver still in r3 and r4. |
496 __ bind(&slow); | 485 __ bind(&slow); |
497 __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r4, | 486 __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), 1, r7, |
498 r3); | 487 r6); |
499 GenerateRuntimeGetProperty(masm); | 488 GenerateRuntimeGetProperty(masm); |
500 | 489 |
501 __ bind(&check_name); | 490 __ bind(&check_name); |
502 GenerateKeyNameCheck(masm, key, r0, r3, &index_name, &slow); | 491 GenerateKeyNameCheck(masm, key, r3, r6, &index_name, &slow); |
503 | 492 |
504 GenerateKeyedLoadReceiverCheck(masm, receiver, r0, r3, | 493 GenerateKeyedLoadReceiverCheck(masm, receiver, r3, r6, |
505 Map::kHasNamedInterceptor, &slow); | 494 Map::kHasNamedInterceptor, &slow); |
506 | 495 |
507 // If the receiver is a fast-case object, check the keyed lookup | 496 // If the receiver is a fast-case object, check the keyed lookup |
508 // cache. Otherwise probe the dictionary. | 497 // cache. Otherwise probe the dictionary. |
509 __ ldr(r3, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 498 __ LoadP(r6, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
510 __ ldr(r4, FieldMemOperand(r3, HeapObject::kMapOffset)); | 499 __ LoadP(r7, FieldMemOperand(r6, HeapObject::kMapOffset)); |
511 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); | 500 __ LoadRoot(ip, Heap::kHashTableMapRootIndex); |
512 __ cmp(r4, ip); | 501 __ cmp(r7, ip); |
513 __ b(eq, &probe_dictionary); | 502 __ beq(&probe_dictionary); |
514 | 503 |
515 // Load the map of the receiver, compute the keyed lookup cache hash | 504 // Load the map of the receiver, compute the keyed lookup cache hash |
516 // based on 32 bits of the map pointer and the name hash. | 505 // based on 32 bits of the map pointer and the name hash. |
517 __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 506 __ LoadP(r3, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
518 __ mov(r3, Operand(r0, ASR, KeyedLookupCache::kMapHashShift)); | 507 __ srawi(r6, r3, KeyedLookupCache::kMapHashShift); |
519 __ ldr(r4, FieldMemOperand(key, Name::kHashFieldOffset)); | 508 __ lwz(r7, FieldMemOperand(key, Name::kHashFieldOffset)); |
520 __ eor(r3, r3, Operand(r4, ASR, Name::kHashShift)); | 509 __ srawi(r7, r7, Name::kHashShift); |
| 510 __ xor_(r6, r6, r7); |
521 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask; | 511 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask; |
522 __ And(r3, r3, Operand(mask)); | 512 __ mov(r7, Operand(mask)); |
| 513 __ and_(r6, r6, r7, LeaveRC); |
523 | 514 |
524 // Load the key (consisting of map and unique name) from the cache and | 515 // Load the key (consisting of map and unique name) from the cache and |
525 // check for match. | 516 // check for match. |
526 Label load_in_object_property; | 517 Label load_in_object_property; |
527 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; | 518 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; |
528 Label hit_on_nth_entry[kEntriesPerBucket]; | 519 Label hit_on_nth_entry[kEntriesPerBucket]; |
529 ExternalReference cache_keys = | 520 ExternalReference cache_keys = |
530 ExternalReference::keyed_lookup_cache_keys(isolate); | 521 ExternalReference::keyed_lookup_cache_keys(isolate); |
531 | 522 |
532 __ mov(r4, Operand(cache_keys)); | 523 __ mov(r7, Operand(cache_keys)); |
533 __ add(r4, r4, Operand(r3, LSL, kPointerSizeLog2 + 1)); | 524 __ mr(r0, r5); |
| 525 __ ShiftLeftImm(r5, r6, Operand(kPointerSizeLog2 + 1)); |
| 526 __ add(r7, r7, r5); |
| 527 __ mr(r5, r0); |
534 | 528 |
535 for (int i = 0; i < kEntriesPerBucket - 1; i++) { | 529 for (int i = 0; i < kEntriesPerBucket - 1; i++) { |
536 Label try_next_entry; | 530 Label try_next_entry; |
537 // Load map and move r4 to next entry. | 531 // Load map and move r7 to next entry. |
538 __ ldr(r5, MemOperand(r4, kPointerSize * 2, PostIndex)); | 532 __ LoadP(r8, MemOperand(r7)); |
539 __ cmp(r0, r5); | 533 __ addi(r7, r7, Operand(kPointerSize * 2)); |
540 __ b(ne, &try_next_entry); | 534 __ cmp(r3, r8); |
541 __ ldr(r5, MemOperand(r4, -kPointerSize)); // Load name | 535 __ bne(&try_next_entry); |
542 __ cmp(key, r5); | 536 __ LoadP(r8, MemOperand(r7, -kPointerSize)); // Load name |
543 __ b(eq, &hit_on_nth_entry[i]); | 537 __ cmp(key, r8); |
| 538 __ beq(&hit_on_nth_entry[i]); |
544 __ bind(&try_next_entry); | 539 __ bind(&try_next_entry); |
545 } | 540 } |
546 | 541 |
547 // Last entry: Load map and move r4 to name. | 542 // Last entry: Load map and move r7 to name. |
548 __ ldr(r5, MemOperand(r4, kPointerSize, PostIndex)); | 543 __ LoadP(r8, MemOperand(r7)); |
549 __ cmp(r0, r5); | 544 __ addi(r7, r7, Operand(kPointerSize)); |
550 __ b(ne, &slow); | 545 __ cmp(r3, r8); |
551 __ ldr(r5, MemOperand(r4)); | 546 __ bne(&slow); |
552 __ cmp(key, r5); | 547 __ LoadP(r8, MemOperand(r7)); |
553 __ b(ne, &slow); | 548 __ cmp(key, r8); |
| 549 __ bne(&slow); |
554 | 550 |
555 // Get field offset. | 551 // Get field offset. |
556 // r0 : receiver's map | 552 // r3 : receiver's map |
557 // r3 : lookup cache index | 553 // r6 : lookup cache index |
558 ExternalReference cache_field_offsets = | 554 ExternalReference cache_field_offsets = |
559 ExternalReference::keyed_lookup_cache_field_offsets(isolate); | 555 ExternalReference::keyed_lookup_cache_field_offsets(isolate); |
560 | 556 |
561 // Hit on nth entry. | 557 // Hit on nth entry. |
562 for (int i = kEntriesPerBucket - 1; i >= 0; i--) { | 558 for (int i = kEntriesPerBucket - 1; i >= 0; i--) { |
563 __ bind(&hit_on_nth_entry[i]); | 559 __ bind(&hit_on_nth_entry[i]); |
564 __ mov(r4, Operand(cache_field_offsets)); | 560 __ mov(r7, Operand(cache_field_offsets)); |
565 if (i != 0) { | 561 if (i != 0) { |
566 __ add(r3, r3, Operand(i)); | 562 __ addi(r6, r6, Operand(i)); |
567 } | 563 } |
568 __ ldr(r5, MemOperand(r4, r3, LSL, kPointerSizeLog2)); | 564 __ ShiftLeftImm(r8, r6, Operand(2)); |
569 __ ldrb(r6, FieldMemOperand(r0, Map::kInObjectPropertiesOffset)); | 565 __ lwzx(r8, MemOperand(r8, r7)); |
570 __ sub(r5, r5, r6, SetCC); | 566 __ lbz(r9, FieldMemOperand(r3, Map::kInObjectPropertiesOffset)); |
571 __ b(ge, &property_array_property); | 567 __ sub(r8, r8, r9); |
| 568 __ cmpi(r8, Operand::Zero()); |
| 569 __ bge(&property_array_property); |
572 if (i != 0) { | 570 if (i != 0) { |
573 __ jmp(&load_in_object_property); | 571 __ b(&load_in_object_property); |
574 } | 572 } |
575 } | 573 } |
576 | 574 |
577 // Load in-object property. | 575 // Load in-object property. |
578 __ bind(&load_in_object_property); | 576 __ bind(&load_in_object_property); |
579 __ ldrb(r6, FieldMemOperand(r0, Map::kInstanceSizeOffset)); | 577 __ lbz(r9, FieldMemOperand(r3, Map::kInstanceSizeOffset)); |
580 __ add(r6, r6, r5); // Index from start of object. | 578 __ add(r9, r9, r8); // Index from start of object. |
581 __ sub(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag. | 579 __ subi(receiver, receiver, Operand(kHeapObjectTag)); // Remove the heap tag. |
582 __ ldr(r0, MemOperand(receiver, r6, LSL, kPointerSizeLog2)); | 580 __ ShiftLeftImm(r3, r9, Operand(kPointerSizeLog2)); |
| 581 __ LoadPX(r3, MemOperand(r3, receiver)); |
583 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1, | 582 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1, |
584 r4, r3); | 583 r7, r6); |
585 __ Ret(); | 584 __ Ret(); |
586 | 585 |
587 // Load property array property. | 586 // Load property array property. |
588 __ bind(&property_array_property); | 587 __ bind(&property_array_property); |
589 __ ldr(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 588 __ LoadP(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
590 __ add(receiver, receiver, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 589 __ addi(receiver, receiver, |
591 __ ldr(r0, MemOperand(receiver, r5, LSL, kPointerSizeLog2)); | 590 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 591 __ ShiftLeftImm(r3, r8, Operand(kPointerSizeLog2)); |
| 592 __ LoadPX(r3, MemOperand(r3, receiver)); |
592 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1, | 593 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), 1, |
593 r4, r3); | 594 r7, r6); |
594 __ Ret(); | 595 __ Ret(); |
595 | 596 |
596 // Do a quick inline probe of the receiver's dictionary, if it | 597 // Do a quick inline probe of the receiver's dictionary, if it |
597 // exists. | 598 // exists. |
598 __ bind(&probe_dictionary); | 599 __ bind(&probe_dictionary); |
599 // r3: elements | 600 // r6: elements |
600 __ ldr(r0, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 601 __ LoadP(r3, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
601 __ ldrb(r0, FieldMemOperand(r0, Map::kInstanceTypeOffset)); | 602 __ lbz(r3, FieldMemOperand(r3, Map::kInstanceTypeOffset)); |
602 GenerateGlobalInstanceTypeCheck(masm, r0, &slow); | 603 GenerateGlobalInstanceTypeCheck(masm, r3, &slow); |
603 // Load the property to r0. | 604 // Load the property to r3. |
604 GenerateDictionaryLoad(masm, &slow, r3, key, r0, r5, r4); | 605 GenerateDictionaryLoad(masm, &slow, r6, key, r3, r8, r7); |
605 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, r4, | 606 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), 1, r7, |
606 r3); | 607 r6); |
607 __ Ret(); | 608 __ Ret(); |
608 | 609 |
609 __ bind(&index_name); | 610 __ bind(&index_name); |
610 __ IndexFromHash(r3, key); | 611 __ IndexFromHash(r6, key); |
611 // Now jump to the place where smi keys are handled. | 612 // Now jump to the place where smi keys are handled. |
612 __ jmp(&index_smi); | 613 __ b(&index_smi); |
613 } | 614 } |
614 | 615 |
615 | 616 |
616 void KeyedLoadIC::GenerateString(MacroAssembler* masm) { | 617 void KeyedLoadIC::GenerateString(MacroAssembler* masm) { |
617 // Return address is in lr. | 618 // Return address is in lr. |
618 Label miss; | 619 Label miss; |
619 | 620 |
620 Register receiver = LoadDescriptor::ReceiverRegister(); | 621 Register receiver = LoadDescriptor::ReceiverRegister(); |
621 Register index = LoadDescriptor::NameRegister(); | 622 Register index = LoadDescriptor::NameRegister(); |
622 Register scratch = r3; | 623 Register scratch = r6; |
623 Register result = r0; | 624 Register result = r3; |
624 DCHECK(!scratch.is(receiver) && !scratch.is(index)); | 625 DCHECK(!scratch.is(receiver) && !scratch.is(index)); |
625 | 626 |
626 StringCharAtGenerator char_at_generator(receiver, index, scratch, result, | 627 StringCharAtGenerator char_at_generator(receiver, index, scratch, result, |
627 &miss, // When not a string. | 628 &miss, // When not a string. |
628 &miss, // When not a number. | 629 &miss, // When not a number. |
629 &miss, // When index out of range. | 630 &miss, // When index out of range. |
630 STRING_INDEX_IS_ARRAY_INDEX); | 631 STRING_INDEX_IS_ARRAY_INDEX); |
631 char_at_generator.GenerateFast(masm); | 632 char_at_generator.GenerateFast(masm); |
632 __ Ret(); | 633 __ Ret(); |
633 | 634 |
634 StubRuntimeCallHelper call_helper; | 635 StubRuntimeCallHelper call_helper; |
635 char_at_generator.GenerateSlow(masm, call_helper); | 636 char_at_generator.GenerateSlow(masm, call_helper); |
636 | 637 |
637 __ bind(&miss); | 638 __ bind(&miss); |
638 GenerateMiss(masm); | 639 GenerateMiss(masm); |
639 } | 640 } |
640 | 641 |
641 | 642 |
642 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { | |
643 // Return address is in lr. | |
644 Label slow; | |
645 | |
646 Register receiver = LoadDescriptor::ReceiverRegister(); | |
647 Register key = LoadDescriptor::NameRegister(); | |
648 Register scratch1 = r3; | |
649 Register scratch2 = r4; | |
650 DCHECK(!scratch1.is(receiver) && !scratch1.is(key)); | |
651 DCHECK(!scratch2.is(receiver) && !scratch2.is(key)); | |
652 | |
653 // Check that the receiver isn't a smi. | |
654 __ JumpIfSmi(receiver, &slow); | |
655 | |
656 // Check that the key is an array index, that is Uint32. | |
657 __ NonNegativeSmiTst(key); | |
658 __ b(ne, &slow); | |
659 | |
660 // Get the map of the receiver. | |
661 __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
662 | |
663 // Check that it has indexed interceptor and access checks | |
664 // are not enabled for this object. | |
665 __ ldrb(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset)); | |
666 __ and_(scratch2, scratch2, Operand(kSlowCaseBitFieldMask)); | |
667 __ cmp(scratch2, Operand(1 << Map::kHasIndexedInterceptor)); | |
668 __ b(ne, &slow); | |
669 | |
670 // Everything is fine, call runtime. | |
671 __ Push(receiver, key); // Receiver, key. | |
672 | |
673 // Perform tail call to the entry. | |
674 __ TailCallExternalReference( | |
675 ExternalReference(IC_Utility(kLoadElementWithInterceptor), | |
676 masm->isolate()), | |
677 2, 1); | |
678 | |
679 __ bind(&slow); | |
680 GenerateMiss(masm); | |
681 } | |
682 | |
683 | |
684 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { | 643 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { |
685 // Push receiver, key and value for runtime call. | 644 // Push receiver, key and value for runtime call. |
686 __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), | 645 __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), |
687 StoreDescriptor::ValueRegister()); | 646 StoreDescriptor::ValueRegister()); |
688 | 647 |
689 ExternalReference ref = | 648 ExternalReference ref = |
690 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate()); | 649 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate()); |
691 __ TailCallExternalReference(ref, 3, 1); | 650 __ TailCallExternalReference(ref, 3, 1); |
692 } | 651 } |
693 | 652 |
694 | 653 |
695 static void KeyedStoreGenerateGenericHelper( | 654 static void KeyedStoreGenerateGenericHelper( |
696 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow, | 655 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow, |
697 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length, | 656 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length, |
698 Register value, Register key, Register receiver, Register receiver_map, | 657 Register value, Register key, Register receiver, Register receiver_map, |
699 Register elements_map, Register elements) { | 658 Register elements_map, Register elements) { |
700 Label transition_smi_elements; | 659 Label transition_smi_elements; |
701 Label finish_object_store, non_double_value, transition_double_elements; | 660 Label finish_object_store, non_double_value, transition_double_elements; |
702 Label fast_double_without_map_check; | 661 Label fast_double_without_map_check; |
703 | 662 |
704 // Fast case: Do the store, could be either Object or double. | 663 // Fast case: Do the store, could be either Object or double. |
705 __ bind(fast_object); | 664 __ bind(fast_object); |
706 Register scratch_value = r4; | 665 Register scratch_value = r7; |
707 Register address = r5; | 666 Register address = r8; |
708 if (check_map == kCheckMap) { | 667 if (check_map == kCheckMap) { |
709 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | 668 __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); |
710 __ cmp(elements_map, | 669 __ mov(scratch_value, |
711 Operand(masm->isolate()->factory()->fixed_array_map())); | 670 Operand(masm->isolate()->factory()->fixed_array_map())); |
712 __ b(ne, fast_double); | 671 __ cmp(elements_map, scratch_value); |
| 672 __ bne(fast_double); |
713 } | 673 } |
714 | 674 |
715 // HOLECHECK: guards "A[i] = V" | 675 // HOLECHECK: guards "A[i] = V" |
716 // We have to go to the runtime if the current value is the hole because | 676 // We have to go to the runtime if the current value is the hole because |
717 // there may be a callback on the element | 677 // there may be a callback on the element |
718 Label holecheck_passed1; | 678 Label holecheck_passed1; |
719 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 679 __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
720 __ ldr(scratch_value, | 680 __ SmiToPtrArrayOffset(scratch_value, key); |
721 MemOperand::PointerAddressFromSmiKey(address, key, PreIndex)); | 681 __ LoadPX(scratch_value, MemOperand(address, scratch_value)); |
722 __ cmp(scratch_value, Operand(masm->isolate()->factory()->the_hole_value())); | 682 __ Cmpi(scratch_value, Operand(masm->isolate()->factory()->the_hole_value()), |
723 __ b(ne, &holecheck_passed1); | 683 r0); |
| 684 __ bne(&holecheck_passed1); |
724 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, | 685 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, |
725 slow); | 686 slow); |
726 | 687 |
727 __ bind(&holecheck_passed1); | 688 __ bind(&holecheck_passed1); |
728 | 689 |
729 // Smi stores don't require further checks. | 690 // Smi stores don't require further checks. |
730 Label non_smi_value; | 691 Label non_smi_value; |
731 __ JumpIfNotSmi(value, &non_smi_value); | 692 __ JumpIfNotSmi(value, &non_smi_value); |
732 | 693 |
733 if (increment_length == kIncrementLength) { | 694 if (increment_length == kIncrementLength) { |
734 // Add 1 to receiver->length. | 695 // Add 1 to receiver->length. |
735 __ add(scratch_value, key, Operand(Smi::FromInt(1))); | 696 __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0); |
736 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 697 __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset), |
| 698 r0); |
737 } | 699 } |
738 // It's irrelevant whether array is smi-only or not when writing a smi. | 700 // It's irrelevant whether array is smi-only or not when writing a smi. |
739 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 701 __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
740 __ str(value, MemOperand::PointerAddressFromSmiKey(address, key)); | 702 __ SmiToPtrArrayOffset(scratch_value, key); |
| 703 __ StorePX(value, MemOperand(address, scratch_value)); |
741 __ Ret(); | 704 __ Ret(); |
742 | 705 |
743 __ bind(&non_smi_value); | 706 __ bind(&non_smi_value); |
744 // Escape to elements kind transition case. | 707 // Escape to elements kind transition case. |
745 __ CheckFastObjectElements(receiver_map, scratch_value, | 708 __ CheckFastObjectElements(receiver_map, scratch_value, |
746 &transition_smi_elements); | 709 &transition_smi_elements); |
747 | 710 |
748 // Fast elements array, store the value to the elements backing store. | 711 // Fast elements array, store the value to the elements backing store. |
749 __ bind(&finish_object_store); | 712 __ bind(&finish_object_store); |
750 if (increment_length == kIncrementLength) { | 713 if (increment_length == kIncrementLength) { |
751 // Add 1 to receiver->length. | 714 // Add 1 to receiver->length. |
752 __ add(scratch_value, key, Operand(Smi::FromInt(1))); | 715 __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0); |
753 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 716 __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset), |
| 717 r0); |
754 } | 718 } |
755 __ add(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 719 __ addi(address, elements, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
756 __ add(address, address, Operand::PointerOffsetFromSmiKey(key)); | 720 __ SmiToPtrArrayOffset(scratch_value, key); |
757 __ str(value, MemOperand(address)); | 721 __ StorePUX(value, MemOperand(address, scratch_value)); |
758 // Update write barrier for the elements array address. | 722 // Update write barrier for the elements array address. |
759 __ mov(scratch_value, value); // Preserve the value which is returned. | 723 __ mr(scratch_value, value); // Preserve the value which is returned. |
760 __ RecordWrite(elements, address, scratch_value, kLRHasNotBeenSaved, | 724 __ RecordWrite(elements, address, scratch_value, kLRHasNotBeenSaved, |
761 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); | 725 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK); |
762 __ Ret(); | 726 __ Ret(); |
763 | 727 |
764 __ bind(fast_double); | 728 __ bind(fast_double); |
765 if (check_map == kCheckMap) { | 729 if (check_map == kCheckMap) { |
766 // Check for fast double array case. If this fails, call through to the | 730 // Check for fast double array case. If this fails, call through to the |
767 // runtime. | 731 // runtime. |
768 __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex); | 732 __ CompareRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex); |
769 __ b(ne, slow); | 733 __ bne(slow); |
770 } | 734 } |
771 | 735 |
772 // HOLECHECK: guards "A[i] double hole?" | 736 // HOLECHECK: guards "A[i] double hole?" |
773 // We have to see if the double version of the hole is present. If so | 737 // We have to see if the double version of the hole is present. If so |
774 // go to the runtime. | 738 // go to the runtime. |
775 __ add(address, elements, | 739 __ addi(address, elements, |
776 Operand((FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32)) - | 740 Operand((FixedDoubleArray::kHeaderSize + Register::kExponentOffset - |
777 kHeapObjectTag)); | 741 kHeapObjectTag))); |
778 __ ldr(scratch_value, | 742 __ SmiToDoubleArrayOffset(scratch_value, key); |
779 MemOperand(address, key, LSL, kPointerSizeLog2, PreIndex)); | 743 __ lwzx(scratch_value, MemOperand(address, scratch_value)); |
780 __ cmp(scratch_value, Operand(kHoleNanUpper32)); | 744 __ Cmpi(scratch_value, Operand(kHoleNanUpper32), r0); |
781 __ b(ne, &fast_double_without_map_check); | 745 __ bne(&fast_double_without_map_check); |
782 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, | 746 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, |
783 slow); | 747 slow); |
784 | 748 |
785 __ bind(&fast_double_without_map_check); | 749 __ bind(&fast_double_without_map_check); |
786 __ StoreNumberToDoubleElements(value, key, elements, r3, d0, | 750 __ StoreNumberToDoubleElements(value, key, elements, r6, d0, |
787 &transition_double_elements); | 751 &transition_double_elements); |
788 if (increment_length == kIncrementLength) { | 752 if (increment_length == kIncrementLength) { |
789 // Add 1 to receiver->length. | 753 // Add 1 to receiver->length. |
790 __ add(scratch_value, key, Operand(Smi::FromInt(1))); | 754 __ AddSmiLiteral(scratch_value, key, Smi::FromInt(1), r0); |
791 __ str(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 755 __ StoreP(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset), |
| 756 r0); |
792 } | 757 } |
793 __ Ret(); | 758 __ Ret(); |
794 | 759 |
795 __ bind(&transition_smi_elements); | 760 __ bind(&transition_smi_elements); |
796 // Transition the array appropriately depending on the value type. | 761 // Transition the array appropriately depending on the value type. |
797 __ ldr(r4, FieldMemOperand(value, HeapObject::kMapOffset)); | 762 __ LoadP(r7, FieldMemOperand(value, HeapObject::kMapOffset)); |
798 __ CompareRoot(r4, Heap::kHeapNumberMapRootIndex); | 763 __ CompareRoot(r7, Heap::kHeapNumberMapRootIndex); |
799 __ b(ne, &non_double_value); | 764 __ bne(&non_double_value); |
800 | 765 |
801 // Value is a double. Transition FAST_SMI_ELEMENTS -> | 766 // Value is a double. Transition FAST_SMI_ELEMENTS -> |
802 // FAST_DOUBLE_ELEMENTS and complete the store. | 767 // FAST_DOUBLE_ELEMENTS and complete the store. |
803 __ LoadTransitionedArrayMapConditional( | 768 __ LoadTransitionedArrayMapConditional( |
804 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r4, slow); | 769 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, r7, slow); |
805 AllocationSiteMode mode = | 770 AllocationSiteMode mode = |
806 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); | 771 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS); |
807 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value, | 772 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value, |
808 receiver_map, mode, slow); | 773 receiver_map, mode, slow); |
809 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 774 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
810 __ jmp(&fast_double_without_map_check); | 775 __ b(&fast_double_without_map_check); |
811 | 776 |
812 __ bind(&non_double_value); | 777 __ bind(&non_double_value); |
813 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS | 778 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS |
814 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, | 779 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS, |
815 receiver_map, r4, slow); | 780 receiver_map, r7, slow); |
816 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); | 781 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); |
817 ElementsTransitionGenerator::GenerateMapChangeElementsTransition( | 782 ElementsTransitionGenerator::GenerateMapChangeElementsTransition( |
818 masm, receiver, key, value, receiver_map, mode, slow); | 783 masm, receiver, key, value, receiver_map, mode, slow); |
819 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 784 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
820 __ jmp(&finish_object_store); | 785 __ b(&finish_object_store); |
821 | 786 |
822 __ bind(&transition_double_elements); | 787 __ bind(&transition_double_elements); |
823 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a | 788 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a |
824 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and | 789 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and |
825 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS | 790 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS |
826 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS, | 791 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS, |
827 receiver_map, r4, slow); | 792 receiver_map, r7, slow); |
828 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); | 793 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); |
829 ElementsTransitionGenerator::GenerateDoubleToObject( | 794 ElementsTransitionGenerator::GenerateDoubleToObject( |
830 masm, receiver, key, value, receiver_map, mode, slow); | 795 masm, receiver, key, value, receiver_map, mode, slow); |
831 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 796 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
832 __ jmp(&finish_object_store); | 797 __ b(&finish_object_store); |
833 } | 798 } |
834 | 799 |
835 | 800 |
836 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, | 801 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, |
837 StrictMode strict_mode) { | 802 StrictMode strict_mode) { |
838 // ---------- S t a t e -------------- | 803 // ---------- S t a t e -------------- |
839 // -- r0 : value | 804 // -- r3 : value |
840 // -- r1 : key | 805 // -- r4 : key |
841 // -- r2 : receiver | 806 // -- r5 : receiver |
842 // -- lr : return address | 807 // -- lr : return address |
843 // ----------------------------------- | 808 // ----------------------------------- |
844 Label slow, fast_object, fast_object_grow; | 809 Label slow, fast_object, fast_object_grow; |
845 Label fast_double, fast_double_grow; | 810 Label fast_double, fast_double_grow; |
846 Label array, extra, check_if_double_array; | 811 Label array, extra, check_if_double_array; |
847 | 812 |
848 // Register usage. | 813 // Register usage. |
849 Register value = StoreDescriptor::ValueRegister(); | 814 Register value = StoreDescriptor::ValueRegister(); |
850 Register key = StoreDescriptor::NameRegister(); | 815 Register key = StoreDescriptor::NameRegister(); |
851 Register receiver = StoreDescriptor::ReceiverRegister(); | 816 Register receiver = StoreDescriptor::ReceiverRegister(); |
852 DCHECK(receiver.is(r1)); | 817 DCHECK(receiver.is(r4)); |
853 DCHECK(key.is(r2)); | 818 DCHECK(key.is(r5)); |
854 DCHECK(value.is(r0)); | 819 DCHECK(value.is(r3)); |
855 Register receiver_map = r3; | 820 Register receiver_map = r6; |
856 Register elements_map = r6; | 821 Register elements_map = r9; |
857 Register elements = r9; // Elements array of the receiver. | 822 Register elements = r10; // Elements array of the receiver. |
858 // r4 and r5 are used as general scratch registers. | 823 // r7 and r8 are used as general scratch registers. |
859 | 824 |
860 // Check that the key is a smi. | 825 // Check that the key is a smi. |
861 __ JumpIfNotSmi(key, &slow); | 826 __ JumpIfNotSmi(key, &slow); |
862 // Check that the object isn't a smi. | 827 // Check that the object isn't a smi. |
863 __ JumpIfSmi(receiver, &slow); | 828 __ JumpIfSmi(receiver, &slow); |
864 // Get the map of the object. | 829 // Get the map of the object. |
865 __ ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 830 __ LoadP(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
866 // Check that the receiver does not require access checks and is not observed. | 831 // Check that the receiver does not require access checks and is not observed. |
867 // The generic stub does not perform map checks or handle observed objects. | 832 // The generic stub does not perform map checks or handle observed objects. |
868 __ ldrb(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); | 833 __ lbz(ip, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); |
869 __ tst(ip, Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved)); | 834 __ andi(r0, ip, |
870 __ b(ne, &slow); | 835 Operand(1 << Map::kIsAccessCheckNeeded | 1 << Map::kIsObserved)); |
| 836 __ bne(&slow, cr0); |
871 // Check if the object is a JS array or not. | 837 // Check if the object is a JS array or not. |
872 __ ldrb(r4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset)); | 838 __ lbz(r7, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset)); |
873 __ cmp(r4, Operand(JS_ARRAY_TYPE)); | 839 __ cmpi(r7, Operand(JS_ARRAY_TYPE)); |
874 __ b(eq, &array); | 840 __ beq(&array); |
875 // Check that the object is some kind of JSObject. | 841 // Check that the object is some kind of JSObject. |
876 __ cmp(r4, Operand(FIRST_JS_OBJECT_TYPE)); | 842 __ cmpi(r7, Operand(FIRST_JS_OBJECT_TYPE)); |
877 __ b(lt, &slow); | 843 __ blt(&slow); |
878 | 844 |
879 // Object case: Check key against length in the elements array. | 845 // Object case: Check key against length in the elements array. |
880 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 846 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
881 // Check array bounds. Both the key and the length of FixedArray are smis. | 847 // Check array bounds. Both the key and the length of FixedArray are smis. |
882 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); | 848 __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
883 __ cmp(key, Operand(ip)); | 849 __ cmpl(key, ip); |
884 __ b(lo, &fast_object); | 850 __ blt(&fast_object); |
885 | 851 |
886 // Slow case, handle jump to runtime. | 852 // Slow case, handle jump to runtime. |
887 __ bind(&slow); | 853 __ bind(&slow); |
888 // Entry registers are intact. | 854 // Entry registers are intact. |
889 // r0: value. | 855 // r3: value. |
890 // r1: key. | 856 // r4: key. |
891 // r2: receiver. | 857 // r5: receiver. |
892 PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode); | 858 PropertyICCompiler::GenerateRuntimeSetProperty(masm, strict_mode); |
893 | 859 |
894 // Extra capacity case: Check if there is extra capacity to | 860 // Extra capacity case: Check if there is extra capacity to |
895 // perform the store and update the length. Used for adding one | 861 // perform the store and update the length. Used for adding one |
896 // element to the array by writing to array[array.length]. | 862 // element to the array by writing to array[array.length]. |
897 __ bind(&extra); | 863 __ bind(&extra); |
898 // Condition code from comparing key and array length is still available. | 864 // Condition code from comparing key and array length is still available. |
899 __ b(ne, &slow); // Only support writing to writing to array[array.length]. | 865 __ bne(&slow); // Only support writing to writing to array[array.length]. |
900 // Check for room in the elements backing store. | 866 // Check for room in the elements backing store. |
901 // Both the key and the length of FixedArray are smis. | 867 // Both the key and the length of FixedArray are smis. |
902 __ ldr(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); | 868 __ LoadP(ip, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
903 __ cmp(key, Operand(ip)); | 869 __ cmpl(key, ip); |
904 __ b(hs, &slow); | 870 __ bge(&slow); |
905 __ ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | 871 __ LoadP(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); |
906 __ cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map())); | 872 __ mov(ip, Operand(masm->isolate()->factory()->fixed_array_map())); |
907 __ b(ne, &check_if_double_array); | 873 __ cmp(elements_map, ip); // PPC - I think I can re-use ip here |
908 __ jmp(&fast_object_grow); | 874 __ bne(&check_if_double_array); |
| 875 __ b(&fast_object_grow); |
909 | 876 |
910 __ bind(&check_if_double_array); | 877 __ bind(&check_if_double_array); |
911 __ cmp(elements_map, | 878 __ mov(ip, Operand(masm->isolate()->factory()->fixed_double_array_map())); |
912 Operand(masm->isolate()->factory()->fixed_double_array_map())); | 879 __ cmp(elements_map, ip); // PPC - another ip re-use |
913 __ b(ne, &slow); | 880 __ bne(&slow); |
914 __ jmp(&fast_double_grow); | 881 __ b(&fast_double_grow); |
915 | 882 |
916 // Array case: Get the length and the elements array from the JS | 883 // Array case: Get the length and the elements array from the JS |
917 // array. Check that the array is in fast mode (and writable); if it | 884 // array. Check that the array is in fast mode (and writable); if it |
918 // is the length is always a smi. | 885 // is the length is always a smi. |
919 __ bind(&array); | 886 __ bind(&array); |
920 __ ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 887 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
921 | 888 |
922 // Check the key against the length in the array. | 889 // Check the key against the length in the array. |
923 __ ldr(ip, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 890 __ LoadP(ip, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
924 __ cmp(key, Operand(ip)); | 891 __ cmpl(key, ip); |
925 __ b(hs, &extra); | 892 __ bge(&extra); |
926 | 893 |
927 KeyedStoreGenerateGenericHelper( | 894 KeyedStoreGenerateGenericHelper( |
928 masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength, | 895 masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength, |
929 value, key, receiver, receiver_map, elements_map, elements); | 896 value, key, receiver, receiver_map, elements_map, elements); |
930 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow, | 897 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow, |
931 &slow, kDontCheckMap, kIncrementLength, value, | 898 &slow, kDontCheckMap, kIncrementLength, value, |
932 key, receiver, receiver_map, elements_map, | 899 key, receiver, receiver_map, elements_map, |
933 elements); | 900 elements); |
934 } | 901 } |
935 | 902 |
936 | 903 |
937 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { | 904 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { |
938 Register receiver = StoreDescriptor::ReceiverRegister(); | 905 Register receiver = StoreDescriptor::ReceiverRegister(); |
939 Register name = StoreDescriptor::NameRegister(); | 906 Register name = StoreDescriptor::NameRegister(); |
940 DCHECK(receiver.is(r1)); | 907 DCHECK(receiver.is(r4)); |
941 DCHECK(name.is(r2)); | 908 DCHECK(name.is(r5)); |
942 DCHECK(StoreDescriptor::ValueRegister().is(r0)); | 909 DCHECK(StoreDescriptor::ValueRegister().is(r3)); |
943 | 910 |
944 // Get the receiver from the stack and probe the stub cache. | 911 // Get the receiver from the stack and probe the stub cache. |
945 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( | 912 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( |
946 Code::ComputeHandlerFlags(Code::STORE_IC)); | 913 Code::ComputeHandlerFlags(Code::STORE_IC)); |
947 | 914 |
948 masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver, | 915 masm->isolate()->stub_cache()->GenerateProbe(masm, flags, false, receiver, |
949 name, r3, r4, r5, r6); | 916 name, r6, r7, r8, r9); |
950 | 917 |
951 // Cache miss: Jump to runtime. | 918 // Cache miss: Jump to runtime. |
952 GenerateMiss(masm); | 919 GenerateMiss(masm); |
953 } | 920 } |
954 | 921 |
955 | 922 |
956 void StoreIC::GenerateMiss(MacroAssembler* masm) { | 923 void StoreIC::GenerateMiss(MacroAssembler* masm) { |
957 __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), | 924 __ Push(StoreDescriptor::ReceiverRegister(), StoreDescriptor::NameRegister(), |
958 StoreDescriptor::ValueRegister()); | 925 StoreDescriptor::ValueRegister()); |
959 | 926 |
960 // Perform tail call to the entry. | 927 // Perform tail call to the entry. |
961 ExternalReference ref = | 928 ExternalReference ref = |
962 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate()); | 929 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate()); |
963 __ TailCallExternalReference(ref, 3, 1); | 930 __ TailCallExternalReference(ref, 3, 1); |
964 } | 931 } |
965 | 932 |
966 | 933 |
967 void StoreIC::GenerateNormal(MacroAssembler* masm) { | 934 void StoreIC::GenerateNormal(MacroAssembler* masm) { |
968 Label miss; | 935 Label miss; |
969 Register receiver = StoreDescriptor::ReceiverRegister(); | 936 Register receiver = StoreDescriptor::ReceiverRegister(); |
970 Register name = StoreDescriptor::NameRegister(); | 937 Register name = StoreDescriptor::NameRegister(); |
971 Register value = StoreDescriptor::ValueRegister(); | 938 Register value = StoreDescriptor::ValueRegister(); |
972 Register dictionary = r3; | 939 Register dictionary = r6; |
973 DCHECK(receiver.is(r1)); | 940 DCHECK(receiver.is(r4)); |
974 DCHECK(name.is(r2)); | 941 DCHECK(name.is(r5)); |
975 DCHECK(value.is(r0)); | 942 DCHECK(value.is(r3)); |
976 | 943 |
977 __ ldr(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 944 __ LoadP(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
978 | 945 |
979 GenerateDictionaryStore(masm, &miss, dictionary, name, value, r4, r5); | 946 GenerateDictionaryStore(masm, &miss, dictionary, name, value, r7, r8); |
980 Counters* counters = masm->isolate()->counters(); | 947 Counters* counters = masm->isolate()->counters(); |
981 __ IncrementCounter(counters->store_normal_hit(), 1, r4, r5); | 948 __ IncrementCounter(counters->store_normal_hit(), 1, r7, r8); |
982 __ Ret(); | 949 __ Ret(); |
983 | 950 |
984 __ bind(&miss); | 951 __ bind(&miss); |
985 __ IncrementCounter(counters->store_normal_miss(), 1, r4, r5); | 952 __ IncrementCounter(counters->store_normal_miss(), 1, r7, r8); |
986 GenerateMiss(masm); | 953 GenerateMiss(masm); |
987 } | 954 } |
988 | 955 |
989 | 956 |
990 #undef __ | 957 #undef __ |
991 | 958 |
992 | 959 |
993 Condition CompareIC::ComputeCondition(Token::Value op) { | 960 Condition CompareIC::ComputeCondition(Token::Value op) { |
994 switch (op) { | 961 switch (op) { |
995 case Token::EQ_STRICT: | 962 case Token::EQ_STRICT: |
(...skipping 19 matching lines...) Expand all Loading... |
1015 Address cmp_instruction_address = | 982 Address cmp_instruction_address = |
1016 Assembler::return_address_from_call_start(address); | 983 Assembler::return_address_from_call_start(address); |
1017 | 984 |
1018 // If the instruction following the call is not a cmp rx, #yyy, nothing | 985 // If the instruction following the call is not a cmp rx, #yyy, nothing |
1019 // was inlined. | 986 // was inlined. |
1020 Instr instr = Assembler::instr_at(cmp_instruction_address); | 987 Instr instr = Assembler::instr_at(cmp_instruction_address); |
1021 return Assembler::IsCmpImmediate(instr); | 988 return Assembler::IsCmpImmediate(instr); |
1022 } | 989 } |
1023 | 990 |
1024 | 991 |
| 992 // |
| 993 // This code is paired with the JumpPatchSite class in full-codegen-ppc.cc |
| 994 // |
1025 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { | 995 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { |
1026 Address cmp_instruction_address = | 996 Address cmp_instruction_address = |
1027 Assembler::return_address_from_call_start(address); | 997 Assembler::return_address_from_call_start(address); |
1028 | 998 |
1029 // If the instruction following the call is not a cmp rx, #yyy, nothing | 999 // If the instruction following the call is not a cmp rx, #yyy, nothing |
1030 // was inlined. | 1000 // was inlined. |
1031 Instr instr = Assembler::instr_at(cmp_instruction_address); | 1001 Instr instr = Assembler::instr_at(cmp_instruction_address); |
1032 if (!Assembler::IsCmpImmediate(instr)) { | 1002 if (!Assembler::IsCmpImmediate(instr)) { |
1033 return; | 1003 return; |
1034 } | 1004 } |
1035 | 1005 |
1036 // The delta to the start of the map check instruction and the | 1006 // The delta to the start of the map check instruction and the |
1037 // condition code uses at the patched jump. | 1007 // condition code uses at the patched jump. |
1038 int delta = Assembler::GetCmpImmediateRawImmediate(instr); | 1008 int delta = Assembler::GetCmpImmediateRawImmediate(instr); |
1039 delta += Assembler::GetCmpImmediateRegister(instr).code() * kOff12Mask; | 1009 delta += Assembler::GetCmpImmediateRegister(instr).code() * kOff16Mask; |
1040 // If the delta is 0 the instruction is cmp r0, #0 which also signals that | 1010 // If the delta is 0 the instruction is cmp r0, #0 which also signals that |
1041 // nothing was inlined. | 1011 // nothing was inlined. |
1042 if (delta == 0) { | 1012 if (delta == 0) { |
1043 return; | 1013 return; |
1044 } | 1014 } |
1045 | 1015 |
1046 if (FLAG_trace_ic) { | 1016 if (FLAG_trace_ic) { |
1047 PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", address, | 1017 PrintF("[ patching ic at %p, cmp=%p, delta=%d\n", address, |
1048 cmp_instruction_address, delta); | 1018 cmp_instruction_address, delta); |
1049 } | 1019 } |
1050 | 1020 |
1051 Address patch_address = | 1021 Address patch_address = |
1052 cmp_instruction_address - delta * Instruction::kInstrSize; | 1022 cmp_instruction_address - delta * Instruction::kInstrSize; |
1053 Instr instr_at_patch = Assembler::instr_at(patch_address); | 1023 Instr instr_at_patch = Assembler::instr_at(patch_address); |
1054 Instr branch_instr = | 1024 Instr branch_instr = |
1055 Assembler::instr_at(patch_address + Instruction::kInstrSize); | 1025 Assembler::instr_at(patch_address + Instruction::kInstrSize); |
1056 // This is patching a conditional "jump if not smi/jump if smi" site. | 1026 // This is patching a conditional "jump if not smi/jump if smi" site. |
1057 // Enabling by changing from | 1027 // Enabling by changing from |
1058 // cmp rx, rx | 1028 // cmp cr0, rx, rx |
1059 // b eq/ne, <target> | |
1060 // to | 1029 // to |
1061 // tst rx, #kSmiTagMask | 1030 // rlwinm(r0, value, 0, 31, 31, SetRC); |
1062 // b ne/eq, <target> | 1031 // bc(label, BT/BF, 2) |
1063 // and vice-versa to be disabled again. | 1032 // and vice-versa to be disabled again. |
1064 CodePatcher patcher(patch_address, 2); | 1033 CodePatcher patcher(patch_address, 2); |
1065 Register reg = Assembler::GetRn(instr_at_patch); | 1034 Register reg = Assembler::GetRA(instr_at_patch); |
1066 if (check == ENABLE_INLINED_SMI_CHECK) { | 1035 if (check == ENABLE_INLINED_SMI_CHECK) { |
1067 DCHECK(Assembler::IsCmpRegister(instr_at_patch)); | 1036 DCHECK(Assembler::IsCmpRegister(instr_at_patch)); |
1068 DCHECK_EQ(Assembler::GetRn(instr_at_patch).code(), | 1037 DCHECK_EQ(Assembler::GetRA(instr_at_patch).code(), |
1069 Assembler::GetRm(instr_at_patch).code()); | 1038 Assembler::GetRB(instr_at_patch).code()); |
1070 patcher.masm()->tst(reg, Operand(kSmiTagMask)); | 1039 patcher.masm()->TestIfSmi(reg, r0); |
1071 } else { | 1040 } else { |
1072 DCHECK(check == DISABLE_INLINED_SMI_CHECK); | 1041 DCHECK(check == DISABLE_INLINED_SMI_CHECK); |
1073 DCHECK(Assembler::IsTstImmediate(instr_at_patch)); | 1042 #if V8_TARGET_ARCH_PPC64 |
1074 patcher.masm()->cmp(reg, reg); | 1043 DCHECK(Assembler::IsRldicl(instr_at_patch)); |
| 1044 #else |
| 1045 DCHECK(Assembler::IsRlwinm(instr_at_patch)); |
| 1046 #endif |
| 1047 patcher.masm()->cmp(reg, reg, cr0); |
1075 } | 1048 } |
1076 DCHECK(Assembler::IsBranch(branch_instr)); | 1049 DCHECK(Assembler::IsBranch(branch_instr)); |
| 1050 |
| 1051 // Invert the logic of the branch |
1077 if (Assembler::GetCondition(branch_instr) == eq) { | 1052 if (Assembler::GetCondition(branch_instr) == eq) { |
1078 patcher.EmitCondition(ne); | 1053 patcher.EmitCondition(ne); |
1079 } else { | 1054 } else { |
1080 DCHECK(Assembler::GetCondition(branch_instr) == ne); | 1055 DCHECK(Assembler::GetCondition(branch_instr) == ne); |
1081 patcher.EmitCondition(eq); | 1056 patcher.EmitCondition(eq); |
1082 } | 1057 } |
1083 } | 1058 } |
1084 } | 1059 } |
1085 } // namespace v8::internal | 1060 } // namespace v8::internal |
1086 | 1061 |
1087 #endif // V8_TARGET_ARCH_ARM | 1062 #endif // V8_TARGET_ARCH_PPC |
OLD | NEW |