OLD | NEW |
1 // Copyright 2013 the V8 project authors. All rights reserved. | 1 // Copyright 2013 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_ARM64 | 5 #if V8_TARGET_ARCH_ARM64 |
6 | 6 |
7 #include "src/codegen.h" | 7 #include "src/codegen.h" |
8 #include "src/ic/ic.h" | 8 #include "src/ic/ic.h" |
9 #include "src/ic/ic-compiler.h" | 9 #include "src/ic/ic-compiler.h" |
10 #include "src/ic/stub-cache.h" | 10 #include "src/ic/stub-cache.h" |
11 | 11 |
12 namespace v8 { | 12 namespace v8 { |
13 namespace internal { | 13 namespace internal { |
14 | 14 |
15 | 15 |
16 #define __ ACCESS_MASM(masm) | 16 #define __ ACCESS_MASM(masm) |
17 | 17 |
18 | |
19 // "type" holds an instance type on entry and is not clobbered. | |
20 // Generated code branch on "global_object" if type is any kind of global | |
21 // JS object. | |
22 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, Register type, | |
23 Label* global_object) { | |
24 __ Cmp(type, JS_GLOBAL_OBJECT_TYPE); | |
25 __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne); | |
26 __ B(eq, global_object); | |
27 } | |
28 | |
29 | |
30 // Helper function used from LoadIC GenerateNormal. | 18 // Helper function used from LoadIC GenerateNormal. |
31 // | 19 // |
32 // elements: Property dictionary. It is not clobbered if a jump to the miss | 20 // elements: Property dictionary. It is not clobbered if a jump to the miss |
33 // label is done. | 21 // label is done. |
34 // name: Property name. It is not clobbered if a jump to the miss label is | 22 // name: Property name. It is not clobbered if a jump to the miss label is |
35 // done | 23 // done |
36 // result: Register for the result. It is only updated if a jump to the miss | 24 // result: Register for the result. It is only updated if a jump to the miss |
37 // label is not done. | 25 // label is not done. |
38 // The scratch registers need to be different from elements, name and result. | 26 // The scratch registers need to be different from elements, name and result. |
39 // The generated code assumes that the receiver has slow properties, | 27 // The generated code assumes that the receiver has slow properties, |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
109 static const int kValueOffset = kElementsStartOffset + kPointerSize; | 97 static const int kValueOffset = kElementsStartOffset + kPointerSize; |
110 __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag); | 98 __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag); |
111 __ Str(value, MemOperand(scratch2)); | 99 __ Str(value, MemOperand(scratch2)); |
112 | 100 |
113 // Update the write barrier. Make sure not to clobber the value. | 101 // Update the write barrier. Make sure not to clobber the value. |
114 __ Mov(scratch1, value); | 102 __ Mov(scratch1, value); |
115 __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved, | 103 __ RecordWrite(elements, scratch2, scratch1, kLRHasNotBeenSaved, |
116 kDontSaveFPRegs); | 104 kDontSaveFPRegs); |
117 } | 105 } |
118 | 106 |
119 | |
120 // Checks the receiver for special cases (value type, slow case bits). | |
121 // Falls through for regular JS object and return the map of the | |
122 // receiver in 'map_scratch' if the receiver is not a SMI. | |
123 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, | |
124 Register receiver, | |
125 Register map_scratch, | |
126 Register scratch, | |
127 int interceptor_bit, Label* slow) { | |
128 DCHECK(!AreAliased(map_scratch, scratch)); | |
129 | |
130 // Check that the object isn't a smi. | |
131 __ JumpIfSmi(receiver, slow); | |
132 // Get the map of the receiver. | |
133 __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
134 // Check bit field. | |
135 __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset)); | |
136 __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow); | |
137 __ Tbnz(scratch, interceptor_bit, slow); | |
138 | |
139 // Check that the object is some kind of JS object EXCEPT JS Value type. | |
140 // In the case that the object is a value-wrapper object, we enter the | |
141 // runtime system to make sure that indexing into string objects work | |
142 // as intended. | |
143 STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE); | |
144 __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset)); | |
145 __ Cmp(scratch, JS_OBJECT_TYPE); | |
146 __ B(lt, slow); | |
147 } | |
148 | |
149 | |
150 // Loads an indexed element from a fast case array. | |
151 // | |
152 // receiver - holds the receiver on entry. | |
153 // Unchanged unless 'result' is the same register. | |
154 // | |
155 // key - holds the smi key on entry. | |
156 // Unchanged unless 'result' is the same register. | |
157 // | |
158 // elements - holds the elements of the receiver and its prototypes. Clobbered. | |
159 // | |
160 // result - holds the result on exit if the load succeeded. | |
161 // Allowed to be the the same as 'receiver' or 'key'. | |
162 // Unchanged on bailout so 'receiver' and 'key' can be safely | |
163 // used by further computation. | |
164 static void GenerateFastArrayLoad(MacroAssembler* masm, Register receiver, | |
165 Register key, Register elements, | |
166 Register scratch1, Register scratch2, | |
167 Register result, Label* slow) { | |
168 DCHECK(!AreAliased(receiver, key, elements, scratch1, scratch2)); | |
169 | |
170 Label check_prototypes, check_next_prototype; | |
171 Label done, in_bounds, absent; | |
172 | |
173 // Check for fast array. | |
174 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
175 __ AssertFastElements(elements); | |
176 | |
177 // Check that the key (index) is within bounds. | |
178 __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
179 __ Cmp(key, scratch1); | |
180 __ B(lo, &in_bounds); | |
181 | |
182 // Out of bounds. Check the prototype chain to see if we can just return | |
183 // 'undefined'. | |
184 __ Cmp(key, Operand(Smi::kZero)); | |
185 __ B(lt, slow); // Negative keys can't take the fast OOB path. | |
186 __ Bind(&check_prototypes); | |
187 __ Ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
188 __ Bind(&check_next_prototype); | |
189 __ Ldr(scratch2, FieldMemOperand(scratch2, Map::kPrototypeOffset)); | |
190 // scratch2: current prototype | |
191 __ JumpIfRoot(scratch2, Heap::kNullValueRootIndex, &absent); | |
192 __ Ldr(elements, FieldMemOperand(scratch2, JSObject::kElementsOffset)); | |
193 __ Ldr(scratch2, FieldMemOperand(scratch2, HeapObject::kMapOffset)); | |
194 // elements: elements of current prototype | |
195 // scratch2: map of current prototype | |
196 __ CompareInstanceType(scratch2, scratch1, JS_OBJECT_TYPE); | |
197 __ B(lo, slow); | |
198 __ Ldrb(scratch1, FieldMemOperand(scratch2, Map::kBitFieldOffset)); | |
199 __ Tbnz(scratch1, Map::kIsAccessCheckNeeded, slow); | |
200 __ Tbnz(scratch1, Map::kHasIndexedInterceptor, slow); | |
201 __ JumpIfNotRoot(elements, Heap::kEmptyFixedArrayRootIndex, slow); | |
202 __ B(&check_next_prototype); | |
203 | |
204 __ Bind(&absent); | |
205 __ LoadRoot(result, Heap::kUndefinedValueRootIndex); | |
206 __ B(&done); | |
207 | |
208 __ Bind(&in_bounds); | |
209 // Fast case: Do the load. | |
210 __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag); | |
211 __ SmiUntag(scratch2, key); | |
212 __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2)); | |
213 | |
214 // In case the loaded value is the_hole we have to check the prototype chain. | |
215 __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, &check_prototypes); | |
216 | |
217 // Move the value to the result register. | |
218 // 'result' can alias with 'receiver' or 'key' but these two must be | |
219 // preserved if we jump to 'slow'. | |
220 __ Mov(result, scratch2); | |
221 __ Bind(&done); | |
222 } | |
223 | |
224 | |
225 // Checks whether a key is an array index string or a unique name. | |
226 // Falls through if a key is a unique name. | |
227 // The map of the key is returned in 'map_scratch'. | |
228 // If the jump to 'index_string' is done the hash of the key is left | |
229 // in 'hash_scratch'. | |
230 static void GenerateKeyNameCheck(MacroAssembler* masm, Register key, | |
231 Register map_scratch, Register hash_scratch, | |
232 Label* index_string, Label* not_unique) { | |
233 DCHECK(!AreAliased(key, map_scratch, hash_scratch)); | |
234 | |
235 // Is the key a name? | |
236 Label unique; | |
237 __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE, | |
238 not_unique, hi); | |
239 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); | |
240 __ B(eq, &unique); | |
241 | |
242 // Is the string an array index with cached numeric value? | |
243 __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset)); | |
244 __ TestAndBranchIfAllClear(hash_scratch, Name::kContainsCachedArrayIndexMask, | |
245 index_string); | |
246 | |
247 // Is the string internalized? We know it's a string, so a single bit test is | |
248 // enough. | |
249 __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset)); | |
250 STATIC_ASSERT(kInternalizedTag == 0); | |
251 __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique); | |
252 | |
253 __ Bind(&unique); | |
254 // Fall through if the key is a unique name. | |
255 } | |
256 | |
257 void LoadIC::GenerateNormal(MacroAssembler* masm) { | 107 void LoadIC::GenerateNormal(MacroAssembler* masm) { |
258 Register dictionary = x0; | 108 Register dictionary = x0; |
259 DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister())); | 109 DCHECK(!dictionary.is(LoadDescriptor::ReceiverRegister())); |
260 DCHECK(!dictionary.is(LoadDescriptor::NameRegister())); | 110 DCHECK(!dictionary.is(LoadDescriptor::NameRegister())); |
261 Label slow; | 111 Label slow; |
262 | 112 |
263 __ Ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(), | 113 __ Ldr(dictionary, FieldMemOperand(LoadDescriptor::ReceiverRegister(), |
264 JSObject::kPropertiesOffset)); | 114 JSObject::kPropertiesOffset)); |
265 GenerateDictionaryLoad(masm, &slow, dictionary, | 115 GenerateDictionaryLoad(masm, &slow, dictionary, |
266 LoadDescriptor::NameRegister(), x0, x3, x4); | 116 LoadDescriptor::NameRegister(), x0, x3, x4); |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
316 } | 166 } |
317 | 167 |
318 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { | 168 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { |
319 // The return address is in lr. | 169 // The return address is in lr. |
320 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister()); | 170 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister()); |
321 | 171 |
322 // Do tail-call to runtime routine. | 172 // Do tail-call to runtime routine. |
323 __ TailCallRuntime(Runtime::kKeyedGetProperty); | 173 __ TailCallRuntime(Runtime::kKeyedGetProperty); |
324 } | 174 } |
325 | 175 |
326 static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, Register key, | |
327 Register receiver, Register scratch1, | |
328 Register scratch2, Register scratch3, | |
329 Register scratch4, Register scratch5, | |
330 Label* slow) { | |
331 DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4, | |
332 scratch5)); | |
333 | |
334 Isolate* isolate = masm->isolate(); | |
335 Label check_number_dictionary; | |
336 // If we can load the value, it should be returned in x0. | |
337 Register result = x0; | |
338 | |
339 GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2, | |
340 Map::kHasIndexedInterceptor, slow); | |
341 | |
342 // Check the receiver's map to see if it has fast elements. | |
343 __ CheckFastElements(scratch1, scratch2, &check_number_dictionary); | |
344 | |
345 GenerateFastArrayLoad(masm, receiver, key, scratch3, scratch2, scratch1, | |
346 result, slow); | |
347 __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_smi(), 1, | |
348 scratch1, scratch2); | |
349 __ Ret(); | |
350 | |
351 __ Bind(&check_number_dictionary); | |
352 __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
353 __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset)); | |
354 | |
355 // Check whether we have a number dictionary. | |
356 __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow); | |
357 | |
358 __ LoadFromNumberDictionary(slow, scratch3, key, result, scratch1, scratch2, | |
359 scratch4, scratch5); | |
360 __ Ret(); | |
361 } | |
362 | |
363 static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, Register key, | |
364 Register receiver, Register scratch1, | |
365 Register scratch2, Register scratch3, | |
366 Register scratch4, Register scratch5, | |
367 Label* slow) { | |
368 DCHECK(!AreAliased(key, receiver, scratch1, scratch2, scratch3, scratch4, | |
369 scratch5)); | |
370 | |
371 Isolate* isolate = masm->isolate(); | |
372 Label probe_dictionary, property_array_property; | |
373 // If we can load the value, it should be returned in x0. | |
374 Register result = x0; | |
375 | |
376 GenerateKeyedLoadReceiverCheck(masm, receiver, scratch1, scratch2, | |
377 Map::kHasNamedInterceptor, slow); | |
378 | |
379 // If the receiver is a fast-case object, check the stub cache. Otherwise | |
380 // probe the dictionary. | |
381 __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | |
382 __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset)); | |
383 __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary); | |
384 | |
385 // The handlers in the stub cache expect a vector and slot. Since we won't | |
386 // change the IC from any downstream misses, a dummy vector can be used. | |
387 Register vector = LoadWithVectorDescriptor::VectorRegister(); | |
388 Register slot = LoadWithVectorDescriptor::SlotRegister(); | |
389 DCHECK(!AreAliased(vector, slot, scratch1, scratch2, scratch3, scratch4)); | |
390 Handle<TypeFeedbackVector> dummy_vector = | |
391 TypeFeedbackVector::DummyVector(masm->isolate()); | |
392 int slot_index = dummy_vector->GetIndex( | |
393 FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedLoadICSlot)); | |
394 __ LoadRoot(vector, Heap::kDummyVectorRootIndex); | |
395 __ Mov(slot, Operand(Smi::FromInt(slot_index))); | |
396 | |
397 masm->isolate()->load_stub_cache()->GenerateProbe( | |
398 masm, receiver, key, scratch1, scratch2, scratch3, scratch4); | |
399 // Cache miss. | |
400 KeyedLoadIC::GenerateMiss(masm); | |
401 | |
402 // Do a quick inline probe of the receiver's dictionary, if it exists. | |
403 __ Bind(&probe_dictionary); | |
404 __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
405 __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); | |
406 GenerateGlobalInstanceTypeCheck(masm, scratch1, slow); | |
407 // Load the property. | |
408 GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3); | |
409 __ IncrementCounter(isolate->counters()->ic_keyed_load_generic_symbol(), 1, | |
410 scratch1, scratch2); | |
411 __ Ret(); | |
412 } | |
413 | |
414 void KeyedLoadIC::GenerateMegamorphic(MacroAssembler* masm) { | |
415 // The return address is in lr. | |
416 Label slow, check_name, index_smi, index_name; | |
417 | |
418 Register key = LoadDescriptor::NameRegister(); | |
419 Register receiver = LoadDescriptor::ReceiverRegister(); | |
420 DCHECK(key.is(x2)); | |
421 DCHECK(receiver.is(x1)); | |
422 | |
423 __ JumpIfNotSmi(key, &check_name); | |
424 __ Bind(&index_smi); | |
425 // Now the key is known to be a smi. This place is also jumped to from below | |
426 // where a numeric string is converted to a smi. | |
427 GenerateKeyedLoadWithSmiKey(masm, key, receiver, x7, x3, x4, x5, x6, &slow); | |
428 | |
429 // Slow case. | |
430 __ Bind(&slow); | |
431 __ IncrementCounter(masm->isolate()->counters()->ic_keyed_load_generic_slow(), | |
432 1, x4, x3); | |
433 GenerateRuntimeGetProperty(masm); | |
434 | |
435 __ Bind(&check_name); | |
436 GenerateKeyNameCheck(masm, key, x0, x3, &index_name, &slow); | |
437 | |
438 GenerateKeyedLoadWithNameKey(masm, key, receiver, x4, x5, x6, x7, x3, &slow); | |
439 | |
440 __ Bind(&index_name); | |
441 __ IndexFromHash(x3, key); | |
442 // Now jump to the place where smi keys are handled. | |
443 __ B(&index_smi); | |
444 } | |
445 | |
446 | |
447 static void StoreIC_PushArgs(MacroAssembler* masm) { | 176 static void StoreIC_PushArgs(MacroAssembler* masm) { |
448 __ Push(StoreWithVectorDescriptor::ValueRegister(), | 177 __ Push(StoreWithVectorDescriptor::ValueRegister(), |
449 StoreWithVectorDescriptor::SlotRegister(), | 178 StoreWithVectorDescriptor::SlotRegister(), |
450 StoreWithVectorDescriptor::VectorRegister(), | 179 StoreWithVectorDescriptor::VectorRegister(), |
451 StoreWithVectorDescriptor::ReceiverRegister(), | 180 StoreWithVectorDescriptor::ReceiverRegister(), |
452 StoreWithVectorDescriptor::NameRegister()); | 181 StoreWithVectorDescriptor::NameRegister()); |
453 } | 182 } |
454 | 183 |
455 | 184 |
456 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { | 185 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { |
(...skipping 387 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
844 } else { | 573 } else { |
845 DCHECK(to_patch->Mask(TestBranchMask) == TBNZ); | 574 DCHECK(to_patch->Mask(TestBranchMask) == TBNZ); |
846 // This is JumpIfSmi(smi_reg, branch_imm). | 575 // This is JumpIfSmi(smi_reg, branch_imm). |
847 patcher.tbz(smi_reg, 0, branch_imm); | 576 patcher.tbz(smi_reg, 0, branch_imm); |
848 } | 577 } |
849 } | 578 } |
850 } // namespace internal | 579 } // namespace internal |
851 } // namespace v8 | 580 } // namespace v8 |
852 | 581 |
853 #endif // V8_TARGET_ARCH_ARM64 | 582 #endif // V8_TARGET_ARCH_ARM64 |
OLD | NEW |