OLD | NEW |
| (Empty) |
1 // Copyright 2013 the V8 project authors. All rights reserved. | |
2 // Redistribution and use in source and binary forms, with or without | |
3 // modification, are permitted provided that the following conditions are | |
4 // met: | |
5 // | |
6 // * Redistributions of source code must retain the above copyright | |
7 // notice, this list of conditions and the following disclaimer. | |
8 // * Redistributions in binary form must reproduce the above | |
9 // copyright notice, this list of conditions and the following | |
10 // disclaimer in the documentation and/or other materials provided | |
11 // with the distribution. | |
12 // * Neither the name of Google Inc. nor the names of its | |
13 // contributors may be used to endorse or promote products derived | |
14 // from this software without specific prior written permission. | |
15 // | |
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | |
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | |
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | |
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | |
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | |
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | |
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | |
27 | |
28 #include "v8.h" | |
29 | |
30 #if V8_TARGET_ARCH_A64 | |
31 | |
32 #include "a64/assembler-a64.h" | |
33 #include "code-stubs.h" | |
34 #include "codegen.h" | |
35 #include "disasm.h" | |
36 #include "ic-inl.h" | |
37 #include "runtime.h" | |
38 #include "stub-cache.h" | |
39 | |
40 namespace v8 { | |
41 namespace internal { | |
42 | |
43 | |
44 #define __ ACCESS_MASM(masm) | |
45 | |
46 | |
47 // "type" holds an instance type on entry and is not clobbered. | |
48 // Generated code branch on "global_object" if type is any kind of global | |
49 // JS object. | |
50 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, | |
51 Register type, | |
52 Label* global_object) { | |
53 __ Cmp(type, JS_GLOBAL_OBJECT_TYPE); | |
54 __ Ccmp(type, JS_BUILTINS_OBJECT_TYPE, ZFlag, ne); | |
55 __ Ccmp(type, JS_GLOBAL_PROXY_TYPE, ZFlag, ne); | |
56 __ B(eq, global_object); | |
57 } | |
58 | |
59 | |
60 // Generated code falls through if the receiver is a regular non-global | |
61 // JS object with slow properties and no interceptors. | |
62 // | |
63 // "receiver" holds the receiver on entry and is unchanged. | |
64 // "elements" holds the property dictionary on fall through. | |
65 static void GenerateNameDictionaryReceiverCheck(MacroAssembler* masm, | |
66 Register receiver, | |
67 Register elements, | |
68 Register scratch0, | |
69 Register scratch1, | |
70 Label* miss) { | |
71 ASSERT(!AreAliased(receiver, elements, scratch0, scratch1)); | |
72 | |
73 // Check that the receiver isn't a smi. | |
74 __ JumpIfSmi(receiver, miss); | |
75 | |
76 // Check that the receiver is a valid JS object. | |
77 // Let t be the object instance type, we want: | |
78 // FIRST_SPEC_OBJECT_TYPE <= t <= LAST_SPEC_OBJECT_TYPE. | |
79 // Since LAST_SPEC_OBJECT_TYPE is the last possible instance type we only | |
80 // check the lower bound. | |
81 STATIC_ASSERT(LAST_TYPE == LAST_SPEC_OBJECT_TYPE); | |
82 | |
83 __ JumpIfObjectType(receiver, scratch0, scratch1, FIRST_SPEC_OBJECT_TYPE, | |
84 miss, lt); | |
85 | |
86 // scratch0 now contains the map of the receiver and scratch1 the object type. | |
87 Register map = scratch0; | |
88 Register type = scratch1; | |
89 | |
90 // Check if the receiver is a global JS object. | |
91 GenerateGlobalInstanceTypeCheck(masm, type, miss); | |
92 | |
93 // Check that the object does not require access checks. | |
94 __ Ldrb(scratch1, FieldMemOperand(map, Map::kBitFieldOffset)); | |
95 __ Tbnz(scratch1, Map::kIsAccessCheckNeeded, miss); | |
96 __ Tbnz(scratch1, Map::kHasNamedInterceptor, miss); | |
97 | |
98 // Check that the properties dictionary is valid. | |
99 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | |
100 __ Ldr(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); | |
101 __ JumpIfNotRoot(scratch1, Heap::kHashTableMapRootIndex, miss); | |
102 } | |
103 | |
104 | |
105 // Helper function used from LoadIC GenerateNormal. | |
106 // | |
107 // elements: Property dictionary. It is not clobbered if a jump to the miss | |
108 // label is done. | |
109 // name: Property name. It is not clobbered if a jump to the miss label is | |
110 // done | |
111 // result: Register for the result. It is only updated if a jump to the miss | |
112 // label is not done. | |
113 // The scratch registers need to be different from elements, name and result. | |
114 // The generated code assumes that the receiver has slow properties, | |
115 // is not a global object and does not have interceptors. | |
116 static void GenerateDictionaryLoad(MacroAssembler* masm, | |
117 Label* miss, | |
118 Register elements, | |
119 Register name, | |
120 Register result, | |
121 Register scratch1, | |
122 Register scratch2) { | |
123 ASSERT(!AreAliased(elements, name, scratch1, scratch2)); | |
124 ASSERT(!AreAliased(result, scratch1, scratch2)); | |
125 | |
126 Label done; | |
127 | |
128 // Probe the dictionary. | |
129 NameDictionaryLookupStub::GeneratePositiveLookup(masm, | |
130 miss, | |
131 &done, | |
132 elements, | |
133 name, | |
134 scratch1, | |
135 scratch2); | |
136 | |
137 // If probing finds an entry check that the value is a normal property. | |
138 __ Bind(&done); | |
139 | |
140 static const int kElementsStartOffset = NameDictionary::kHeaderSize + | |
141 NameDictionary::kElementsStartIndex * kPointerSize; | |
142 static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; | |
143 __ Ldr(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); | |
144 __ Tst(scratch1, Smi::FromInt(PropertyDetails::TypeField::kMask)); | |
145 __ B(ne, miss); | |
146 | |
147 // Get the value at the masked, scaled index and return. | |
148 __ Ldr(result, | |
149 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize)); | |
150 } | |
151 | |
152 | |
153 // Helper function used from StoreIC::GenerateNormal. | |
154 // | |
155 // elements: Property dictionary. It is not clobbered if a jump to the miss | |
156 // label is done. | |
157 // name: Property name. It is not clobbered if a jump to the miss label is | |
158 // done | |
159 // value: The value to store (never clobbered). | |
160 // | |
161 // The generated code assumes that the receiver has slow properties, | |
162 // is not a global object and does not have interceptors. | |
163 static void GenerateDictionaryStore(MacroAssembler* masm, | |
164 Label* miss, | |
165 Register elements, | |
166 Register name, | |
167 Register value, | |
168 Register scratch1, | |
169 Register scratch2) { | |
170 ASSERT(!AreAliased(elements, name, value, scratch1, scratch2)); | |
171 | |
172 Label done; | |
173 | |
174 // Probe the dictionary. | |
175 NameDictionaryLookupStub::GeneratePositiveLookup(masm, | |
176 miss, | |
177 &done, | |
178 elements, | |
179 name, | |
180 scratch1, | |
181 scratch2); | |
182 | |
183 // If probing finds an entry in the dictionary check that the value | |
184 // is a normal property that is not read only. | |
185 __ Bind(&done); | |
186 | |
187 static const int kElementsStartOffset = NameDictionary::kHeaderSize + | |
188 NameDictionary::kElementsStartIndex * kPointerSize; | |
189 static const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; | |
190 static const int kTypeAndReadOnlyMask = | |
191 PropertyDetails::TypeField::kMask | | |
192 PropertyDetails::AttributesField::encode(READ_ONLY); | |
193 __ Ldrsw(scratch1, UntagSmiFieldMemOperand(scratch2, kDetailsOffset)); | |
194 __ Tst(scratch1, kTypeAndReadOnlyMask); | |
195 __ B(ne, miss); | |
196 | |
197 // Store the value at the masked, scaled index and return. | |
198 static const int kValueOffset = kElementsStartOffset + kPointerSize; | |
199 __ Add(scratch2, scratch2, kValueOffset - kHeapObjectTag); | |
200 __ Str(value, MemOperand(scratch2)); | |
201 | |
202 // Update the write barrier. Make sure not to clobber the value. | |
203 __ Mov(scratch1, value); | |
204 __ RecordWrite( | |
205 elements, scratch2, scratch1, kLRHasNotBeenSaved, kDontSaveFPRegs); | |
206 } | |
207 | |
208 | |
209 // Checks the receiver for special cases (value type, slow case bits). | |
210 // Falls through for regular JS object and return the map of the | |
211 // receiver in 'map_scratch' if the receiver is not a SMI. | |
212 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, | |
213 Register receiver, | |
214 Register map_scratch, | |
215 Register scratch, | |
216 int interceptor_bit, | |
217 Label* slow) { | |
218 ASSERT(!AreAliased(map_scratch, scratch)); | |
219 | |
220 // Check that the object isn't a smi. | |
221 __ JumpIfSmi(receiver, slow); | |
222 // Get the map of the receiver. | |
223 __ Ldr(map_scratch, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
224 // Check bit field. | |
225 __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kBitFieldOffset)); | |
226 __ Tbnz(scratch, Map::kIsAccessCheckNeeded, slow); | |
227 __ Tbnz(scratch, interceptor_bit, slow); | |
228 | |
229 // Check that the object is some kind of JS object EXCEPT JS Value type. | |
230 // In the case that the object is a value-wrapper object, we enter the | |
231 // runtime system to make sure that indexing into string objects work | |
232 // as intended. | |
233 STATIC_ASSERT(JS_OBJECT_TYPE > JS_VALUE_TYPE); | |
234 __ Ldrb(scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset)); | |
235 __ Cmp(scratch, JS_OBJECT_TYPE); | |
236 __ B(lt, slow); | |
237 } | |
238 | |
239 | |
240 // Loads an indexed element from a fast case array. | |
241 // If not_fast_array is NULL, doesn't perform the elements map check. | |
242 // | |
243 // receiver - holds the receiver on entry. | |
244 // Unchanged unless 'result' is the same register. | |
245 // | |
246 // key - holds the smi key on entry. | |
247 // Unchanged unless 'result' is the same register. | |
248 // | |
249 // elements - holds the elements of the receiver on exit. | |
250 // | |
251 // elements_map - holds the elements map on exit if the not_fast_array branch is | |
252 // taken. Otherwise, this is used as a scratch register. | |
253 // | |
254 // result - holds the result on exit if the load succeeded. | |
255 // Allowed to be the the same as 'receiver' or 'key'. | |
256 // Unchanged on bailout so 'receiver' and 'key' can be safely | |
257 // used by further computation. | |
258 static void GenerateFastArrayLoad(MacroAssembler* masm, | |
259 Register receiver, | |
260 Register key, | |
261 Register elements, | |
262 Register elements_map, | |
263 Register scratch2, | |
264 Register result, | |
265 Label* not_fast_array, | |
266 Label* slow) { | |
267 ASSERT(!AreAliased(receiver, key, elements, elements_map, scratch2)); | |
268 | |
269 // Check for fast array. | |
270 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
271 if (not_fast_array != NULL) { | |
272 // Check that the object is in fast mode and writable. | |
273 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | |
274 __ JumpIfNotRoot(elements_map, Heap::kFixedArrayMapRootIndex, | |
275 not_fast_array); | |
276 } else { | |
277 __ AssertFastElements(elements); | |
278 } | |
279 | |
280 // The elements_map register is only used for the not_fast_array path, which | |
281 // was handled above. From this point onward it is a scratch register. | |
282 Register scratch1 = elements_map; | |
283 | |
284 // Check that the key (index) is within bounds. | |
285 __ Ldr(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
286 __ Cmp(key, scratch1); | |
287 __ B(hs, slow); | |
288 | |
289 // Fast case: Do the load. | |
290 __ Add(scratch1, elements, FixedArray::kHeaderSize - kHeapObjectTag); | |
291 __ SmiUntag(scratch2, key); | |
292 __ Ldr(scratch2, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2)); | |
293 | |
294 // In case the loaded value is the_hole we have to consult GetProperty | |
295 // to ensure the prototype chain is searched. | |
296 __ JumpIfRoot(scratch2, Heap::kTheHoleValueRootIndex, slow); | |
297 | |
298 // Move the value to the result register. | |
299 // 'result' can alias with 'receiver' or 'key' but these two must be | |
300 // preserved if we jump to 'slow'. | |
301 __ Mov(result, scratch2); | |
302 } | |
303 | |
304 | |
305 // Checks whether a key is an array index string or a unique name. | |
306 // Falls through if a key is a unique name. | |
307 // The map of the key is returned in 'map_scratch'. | |
308 // If the jump to 'index_string' is done the hash of the key is left | |
309 // in 'hash_scratch'. | |
310 static void GenerateKeyNameCheck(MacroAssembler* masm, | |
311 Register key, | |
312 Register map_scratch, | |
313 Register hash_scratch, | |
314 Label* index_string, | |
315 Label* not_unique) { | |
316 ASSERT(!AreAliased(key, map_scratch, hash_scratch)); | |
317 | |
318 // Is the key a name? | |
319 Label unique; | |
320 __ JumpIfObjectType(key, map_scratch, hash_scratch, LAST_UNIQUE_NAME_TYPE, | |
321 not_unique, hi); | |
322 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); | |
323 __ B(eq, &unique); | |
324 | |
325 // Is the string an array index with cached numeric value? | |
326 __ Ldr(hash_scratch.W(), FieldMemOperand(key, Name::kHashFieldOffset)); | |
327 __ TestAndBranchIfAllClear(hash_scratch, | |
328 Name::kContainsCachedArrayIndexMask, | |
329 index_string); | |
330 | |
331 // Is the string internalized? We know it's a string, so a single bit test is | |
332 // enough. | |
333 __ Ldrb(hash_scratch, FieldMemOperand(map_scratch, Map::kInstanceTypeOffset)); | |
334 STATIC_ASSERT(kInternalizedTag == 0); | |
335 __ TestAndBranchIfAnySet(hash_scratch, kIsNotInternalizedMask, not_unique); | |
336 | |
337 __ Bind(&unique); | |
338 // Fall through if the key is a unique name. | |
339 } | |
340 | |
341 | |
342 // Neither 'object' nor 'key' are modified by this function. | |
343 // | |
344 // If the 'unmapped_case' or 'slow_case' exit is taken, the 'map' register is | |
345 // left with the object's elements map. Otherwise, it is used as a scratch | |
346 // register. | |
347 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm, | |
348 Register object, | |
349 Register key, | |
350 Register map, | |
351 Register scratch1, | |
352 Register scratch2, | |
353 Label* unmapped_case, | |
354 Label* slow_case) { | |
355 ASSERT(!AreAliased(object, key, map, scratch1, scratch2)); | |
356 | |
357 Heap* heap = masm->isolate()->heap(); | |
358 | |
359 // Check that the receiver is a JSObject. Because of the elements | |
360 // map check later, we do not need to check for interceptors or | |
361 // whether it requires access checks. | |
362 __ JumpIfSmi(object, slow_case); | |
363 // Check that the object is some kind of JSObject. | |
364 __ JumpIfObjectType(object, map, scratch1, FIRST_JS_RECEIVER_TYPE, | |
365 slow_case, lt); | |
366 | |
367 // Check that the key is a positive smi. | |
368 __ JumpIfNotSmi(key, slow_case); | |
369 __ Tbnz(key, kXSignBit, slow_case); | |
370 | |
371 // Load the elements object and check its map. | |
372 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map()); | |
373 __ Ldr(map, FieldMemOperand(object, JSObject::kElementsOffset)); | |
374 __ CheckMap(map, scratch1, arguments_map, slow_case, DONT_DO_SMI_CHECK); | |
375 | |
376 // Check if element is in the range of mapped arguments. If not, jump | |
377 // to the unmapped lookup. | |
378 __ Ldr(scratch1, FieldMemOperand(map, FixedArray::kLengthOffset)); | |
379 __ Sub(scratch1, scratch1, Smi::FromInt(2)); | |
380 __ Cmp(key, scratch1); | |
381 __ B(hs, unmapped_case); | |
382 | |
383 // Load element index and check whether it is the hole. | |
384 static const int offset = | |
385 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag; | |
386 | |
387 __ Add(scratch1, map, offset); | |
388 __ SmiUntag(scratch2, key); | |
389 __ Ldr(scratch1, MemOperand(scratch1, scratch2, LSL, kPointerSizeLog2)); | |
390 __ JumpIfRoot(scratch1, Heap::kTheHoleValueRootIndex, unmapped_case); | |
391 | |
392 // Load value from context and return it. | |
393 __ Ldr(scratch2, FieldMemOperand(map, FixedArray::kHeaderSize)); | |
394 __ SmiUntag(scratch1); | |
395 __ Lsl(scratch1, scratch1, kPointerSizeLog2); | |
396 __ Add(scratch1, scratch1, Context::kHeaderSize - kHeapObjectTag); | |
397 // The base of the result (scratch2) is passed to RecordWrite in | |
398 // KeyedStoreIC::GenerateSloppyArguments and it must be a HeapObject. | |
399 return MemOperand(scratch2, scratch1); | |
400 } | |
401 | |
402 | |
403 // The 'parameter_map' register must be loaded with the parameter map of the | |
404 // arguments object and is overwritten. | |
405 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, | |
406 Register key, | |
407 Register parameter_map, | |
408 Register scratch, | |
409 Label* slow_case) { | |
410 ASSERT(!AreAliased(key, parameter_map, scratch)); | |
411 | |
412 // Element is in arguments backing store, which is referenced by the | |
413 // second element of the parameter_map. | |
414 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize; | |
415 Register backing_store = parameter_map; | |
416 __ Ldr(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); | |
417 Handle<Map> fixed_array_map(masm->isolate()->heap()->fixed_array_map()); | |
418 __ CheckMap( | |
419 backing_store, scratch, fixed_array_map, slow_case, DONT_DO_SMI_CHECK); | |
420 __ Ldr(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); | |
421 __ Cmp(key, scratch); | |
422 __ B(hs, slow_case); | |
423 | |
424 __ Add(backing_store, | |
425 backing_store, | |
426 FixedArray::kHeaderSize - kHeapObjectTag); | |
427 __ SmiUntag(scratch, key); | |
428 return MemOperand(backing_store, scratch, LSL, kPointerSizeLog2); | |
429 } | |
430 | |
431 | |
432 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { | |
433 // ----------- S t a t e ------------- | |
434 // -- x2 : name | |
435 // -- lr : return address | |
436 // -- x0 : receiver | |
437 // ----------------------------------- | |
438 | |
439 // Probe the stub cache. | |
440 Code::Flags flags = Code::ComputeHandlerFlags(Code::LOAD_IC); | |
441 masm->isolate()->stub_cache()->GenerateProbe( | |
442 masm, flags, x0, x2, x3, x4, x5, x6); | |
443 | |
444 // Cache miss: Jump to runtime. | |
445 GenerateMiss(masm); | |
446 } | |
447 | |
448 | |
449 void LoadIC::GenerateNormal(MacroAssembler* masm) { | |
450 // ----------- S t a t e ------------- | |
451 // -- x2 : name | |
452 // -- lr : return address | |
453 // -- x0 : receiver | |
454 // ----------------------------------- | |
455 Label miss; | |
456 | |
457 GenerateNameDictionaryReceiverCheck(masm, x0, x1, x3, x4, &miss); | |
458 | |
459 // x1 now holds the property dictionary. | |
460 GenerateDictionaryLoad(masm, &miss, x1, x2, x0, x3, x4); | |
461 __ Ret(); | |
462 | |
463 // Cache miss: Jump to runtime. | |
464 __ Bind(&miss); | |
465 GenerateMiss(masm); | |
466 } | |
467 | |
468 | |
469 void LoadIC::GenerateMiss(MacroAssembler* masm) { | |
470 // ----------- S t a t e ------------- | |
471 // -- x2 : name | |
472 // -- lr : return address | |
473 // -- x0 : receiver | |
474 // ----------------------------------- | |
475 Isolate* isolate = masm->isolate(); | |
476 ASM_LOCATION("LoadIC::GenerateMiss"); | |
477 | |
478 __ IncrementCounter(isolate->counters()->load_miss(), 1, x3, x4); | |
479 | |
480 // Perform tail call to the entry. | |
481 __ Push(x0, x2); | |
482 ExternalReference ref = | |
483 ExternalReference(IC_Utility(kLoadIC_Miss), isolate); | |
484 __ TailCallExternalReference(ref, 2, 1); | |
485 } | |
486 | |
487 | |
488 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { | |
489 // ---------- S t a t e -------------- | |
490 // -- x2 : name | |
491 // -- lr : return address | |
492 // -- x0 : receiver | |
493 // ----------------------------------- | |
494 | |
495 __ Push(x0, x2); | |
496 __ TailCallRuntime(Runtime::kGetProperty, 2, 1); | |
497 } | |
498 | |
499 | |
500 void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { | |
501 // ---------- S t a t e -------------- | |
502 // -- lr : return address | |
503 // -- x0 : key | |
504 // -- x1 : receiver | |
505 // ----------------------------------- | |
506 Register result = x0; | |
507 Register key = x0; | |
508 Register receiver = x1; | |
509 Label miss, unmapped; | |
510 | |
511 Register map_scratch = x2; | |
512 MemOperand mapped_location = GenerateMappedArgumentsLookup( | |
513 masm, receiver, key, map_scratch, x3, x4, &unmapped, &miss); | |
514 __ Ldr(result, mapped_location); | |
515 __ Ret(); | |
516 | |
517 __ Bind(&unmapped); | |
518 // Parameter map is left in map_scratch when a jump on unmapped is done. | |
519 MemOperand unmapped_location = | |
520 GenerateUnmappedArgumentsLookup(masm, key, map_scratch, x3, &miss); | |
521 __ Ldr(x2, unmapped_location); | |
522 __ JumpIfRoot(x2, Heap::kTheHoleValueRootIndex, &miss); | |
523 // Move the result in x0. x0 must be preserved on miss. | |
524 __ Mov(result, x2); | |
525 __ Ret(); | |
526 | |
527 __ Bind(&miss); | |
528 GenerateMiss(masm); | |
529 } | |
530 | |
531 | |
532 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { | |
533 ASM_LOCATION("KeyedStoreIC::GenerateSloppyArguments"); | |
534 // ---------- S t a t e -------------- | |
535 // -- lr : return address | |
536 // -- x0 : value | |
537 // -- x1 : key | |
538 // -- x2 : receiver | |
539 // ----------------------------------- | |
540 | |
541 Label slow, notin; | |
542 | |
543 Register value = x0; | |
544 Register key = x1; | |
545 Register receiver = x2; | |
546 Register map = x3; | |
547 | |
548 // These registers are used by GenerateMappedArgumentsLookup to build a | |
549 // MemOperand. They are live for as long as the MemOperand is live. | |
550 Register mapped1 = x4; | |
551 Register mapped2 = x5; | |
552 | |
553 MemOperand mapped = | |
554 GenerateMappedArgumentsLookup(masm, receiver, key, map, | |
555 mapped1, mapped2, | |
556 ¬in, &slow); | |
557 Operand mapped_offset = mapped.OffsetAsOperand(); | |
558 __ Str(value, mapped); | |
559 __ Add(x10, mapped.base(), mapped_offset); | |
560 __ Mov(x11, value); | |
561 __ RecordWrite(mapped.base(), x10, x11, kLRHasNotBeenSaved, kDontSaveFPRegs); | |
562 __ Ret(); | |
563 | |
564 __ Bind(¬in); | |
565 | |
566 // These registers are used by GenerateMappedArgumentsLookup to build a | |
567 // MemOperand. They are live for as long as the MemOperand is live. | |
568 Register unmapped1 = map; // This is assumed to alias 'map'. | |
569 Register unmapped2 = x4; | |
570 MemOperand unmapped = | |
571 GenerateUnmappedArgumentsLookup(masm, key, unmapped1, unmapped2, &slow); | |
572 Operand unmapped_offset = unmapped.OffsetAsOperand(); | |
573 __ Str(value, unmapped); | |
574 __ Add(x10, unmapped.base(), unmapped_offset); | |
575 __ Mov(x11, value); | |
576 __ RecordWrite(unmapped.base(), x10, x11, | |
577 kLRHasNotBeenSaved, kDontSaveFPRegs); | |
578 __ Ret(); | |
579 __ Bind(&slow); | |
580 GenerateMiss(masm); | |
581 } | |
582 | |
583 | |
584 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { | |
585 // ---------- S t a t e -------------- | |
586 // -- lr : return address | |
587 // -- x0 : key | |
588 // -- x1 : receiver | |
589 // ----------------------------------- | |
590 Isolate* isolate = masm->isolate(); | |
591 | |
592 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, x10, x11); | |
593 | |
594 __ Push(x1, x0); | |
595 | |
596 // Perform tail call to the entry. | |
597 ExternalReference ref = | |
598 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate); | |
599 | |
600 __ TailCallExternalReference(ref, 2, 1); | |
601 } | |
602 | |
603 | |
604 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { | |
605 // ---------- S t a t e -------------- | |
606 // -- lr : return address | |
607 // -- x0 : key | |
608 // -- x1 : receiver | |
609 // ----------------------------------- | |
610 Register key = x0; | |
611 Register receiver = x1; | |
612 | |
613 __ Push(receiver, key); | |
614 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); | |
615 } | |
616 | |
617 | |
618 static void GenerateKeyedLoadWithSmiKey(MacroAssembler* masm, | |
619 Register key, | |
620 Register receiver, | |
621 Register scratch1, | |
622 Register scratch2, | |
623 Register scratch3, | |
624 Register scratch4, | |
625 Register scratch5, | |
626 Label *slow) { | |
627 ASSERT(!AreAliased( | |
628 key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5)); | |
629 | |
630 Isolate* isolate = masm->isolate(); | |
631 Label check_number_dictionary; | |
632 // If we can load the value, it should be returned in x0. | |
633 Register result = x0; | |
634 | |
635 GenerateKeyedLoadReceiverCheck( | |
636 masm, receiver, scratch1, scratch2, Map::kHasIndexedInterceptor, slow); | |
637 | |
638 // Check the receiver's map to see if it has fast elements. | |
639 __ CheckFastElements(scratch1, scratch2, &check_number_dictionary); | |
640 | |
641 GenerateFastArrayLoad( | |
642 masm, receiver, key, scratch3, scratch2, scratch1, result, NULL, slow); | |
643 __ IncrementCounter( | |
644 isolate->counters()->keyed_load_generic_smi(), 1, scratch1, scratch2); | |
645 __ Ret(); | |
646 | |
647 __ Bind(&check_number_dictionary); | |
648 __ Ldr(scratch3, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
649 __ Ldr(scratch2, FieldMemOperand(scratch3, JSObject::kMapOffset)); | |
650 | |
651 // Check whether we have a number dictionary. | |
652 __ JumpIfNotRoot(scratch2, Heap::kHashTableMapRootIndex, slow); | |
653 | |
654 __ LoadFromNumberDictionary( | |
655 slow, scratch3, key, result, scratch1, scratch2, scratch4, scratch5); | |
656 __ Ret(); | |
657 } | |
658 | |
659 static void GenerateKeyedLoadWithNameKey(MacroAssembler* masm, | |
660 Register key, | |
661 Register receiver, | |
662 Register scratch1, | |
663 Register scratch2, | |
664 Register scratch3, | |
665 Register scratch4, | |
666 Register scratch5, | |
667 Label *slow) { | |
668 ASSERT(!AreAliased( | |
669 key, receiver, scratch1, scratch2, scratch3, scratch4, scratch5)); | |
670 | |
671 Isolate* isolate = masm->isolate(); | |
672 Label probe_dictionary, property_array_property; | |
673 // If we can load the value, it should be returned in x0. | |
674 Register result = x0; | |
675 | |
676 GenerateKeyedLoadReceiverCheck( | |
677 masm, receiver, scratch1, scratch2, Map::kHasNamedInterceptor, slow); | |
678 | |
679 // If the receiver is a fast-case object, check the keyed lookup cache. | |
680 // Otherwise probe the dictionary. | |
681 __ Ldr(scratch2, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | |
682 __ Ldr(scratch3, FieldMemOperand(scratch2, HeapObject::kMapOffset)); | |
683 __ JumpIfRoot(scratch3, Heap::kHashTableMapRootIndex, &probe_dictionary); | |
684 | |
685 // We keep the map of the receiver in scratch1. | |
686 Register receiver_map = scratch1; | |
687 | |
688 // Load the map of the receiver, compute the keyed lookup cache hash | |
689 // based on 32 bits of the map pointer and the name hash. | |
690 __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
691 __ Mov(scratch2, Operand(receiver_map, ASR, KeyedLookupCache::kMapHashShift)); | |
692 __ Ldr(scratch3.W(), FieldMemOperand(key, Name::kHashFieldOffset)); | |
693 __ Eor(scratch2, scratch2, Operand(scratch3, ASR, Name::kHashShift)); | |
694 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask; | |
695 __ And(scratch2, scratch2, mask); | |
696 | |
697 // Load the key (consisting of map and unique name) from the cache and | |
698 // check for match. | |
699 Label load_in_object_property; | |
700 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; | |
701 Label hit_on_nth_entry[kEntriesPerBucket]; | |
702 ExternalReference cache_keys = | |
703 ExternalReference::keyed_lookup_cache_keys(isolate); | |
704 | |
705 __ Mov(scratch3, cache_keys); | |
706 __ Add(scratch3, scratch3, Operand(scratch2, LSL, kPointerSizeLog2 + 1)); | |
707 | |
708 for (int i = 0; i < kEntriesPerBucket - 1; i++) { | |
709 Label try_next_entry; | |
710 // Load map and make scratch3 pointing to the next entry. | |
711 __ Ldr(scratch4, MemOperand(scratch3, kPointerSize * 2, PostIndex)); | |
712 __ Cmp(receiver_map, scratch4); | |
713 __ B(ne, &try_next_entry); | |
714 __ Ldr(scratch4, MemOperand(scratch3, -kPointerSize)); // Load name | |
715 __ Cmp(key, scratch4); | |
716 __ B(eq, &hit_on_nth_entry[i]); | |
717 __ Bind(&try_next_entry); | |
718 } | |
719 | |
720 // Last entry. | |
721 __ Ldr(scratch4, MemOperand(scratch3, kPointerSize, PostIndex)); | |
722 __ Cmp(receiver_map, scratch4); | |
723 __ B(ne, slow); | |
724 __ Ldr(scratch4, MemOperand(scratch3)); | |
725 __ Cmp(key, scratch4); | |
726 __ B(ne, slow); | |
727 | |
728 // Get field offset. | |
729 ExternalReference cache_field_offsets = | |
730 ExternalReference::keyed_lookup_cache_field_offsets(isolate); | |
731 | |
732 // Hit on nth entry. | |
733 for (int i = kEntriesPerBucket - 1; i >= 0; i--) { | |
734 __ Bind(&hit_on_nth_entry[i]); | |
735 __ Mov(scratch3, cache_field_offsets); | |
736 if (i != 0) { | |
737 __ Add(scratch2, scratch2, i); | |
738 } | |
739 __ Ldr(scratch4.W(), MemOperand(scratch3, scratch2, LSL, 2)); | |
740 __ Ldrb(scratch5, | |
741 FieldMemOperand(receiver_map, Map::kInObjectPropertiesOffset)); | |
742 __ Subs(scratch4, scratch4, scratch5); | |
743 __ B(ge, &property_array_property); | |
744 if (i != 0) { | |
745 __ B(&load_in_object_property); | |
746 } | |
747 } | |
748 | |
749 // Load in-object property. | |
750 __ Bind(&load_in_object_property); | |
751 __ Ldrb(scratch5, FieldMemOperand(receiver_map, Map::kInstanceSizeOffset)); | |
752 __ Add(scratch5, scratch5, scratch4); // Index from start of object. | |
753 __ Sub(receiver, receiver, kHeapObjectTag); // Remove the heap tag. | |
754 __ Ldr(result, MemOperand(receiver, scratch5, LSL, kPointerSizeLog2)); | |
755 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), | |
756 1, scratch1, scratch2); | |
757 __ Ret(); | |
758 | |
759 // Load property array property. | |
760 __ Bind(&property_array_property); | |
761 __ Ldr(scratch1, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | |
762 __ Add(scratch1, scratch1, FixedArray::kHeaderSize - kHeapObjectTag); | |
763 __ Ldr(result, MemOperand(scratch1, scratch4, LSL, kPointerSizeLog2)); | |
764 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), | |
765 1, scratch1, scratch2); | |
766 __ Ret(); | |
767 | |
768 // Do a quick inline probe of the receiver's dictionary, if it exists. | |
769 __ Bind(&probe_dictionary); | |
770 __ Ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
771 __ Ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); | |
772 GenerateGlobalInstanceTypeCheck(masm, scratch1, slow); | |
773 // Load the property. | |
774 GenerateDictionaryLoad(masm, slow, scratch2, key, result, scratch1, scratch3); | |
775 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), | |
776 1, scratch1, scratch2); | |
777 __ Ret(); | |
778 } | |
779 | |
780 | |
781 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { | |
782 // ---------- S t a t e -------------- | |
783 // -- lr : return address | |
784 // -- x0 : key | |
785 // -- x1 : receiver | |
786 // ----------------------------------- | |
787 Label slow, check_name, index_smi, index_name; | |
788 | |
789 Register key = x0; | |
790 Register receiver = x1; | |
791 | |
792 __ JumpIfNotSmi(key, &check_name); | |
793 __ Bind(&index_smi); | |
794 // Now the key is known to be a smi. This place is also jumped to from below | |
795 // where a numeric string is converted to a smi. | |
796 GenerateKeyedLoadWithSmiKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow); | |
797 | |
798 // Slow case, key and receiver still in x0 and x1. | |
799 __ Bind(&slow); | |
800 __ IncrementCounter( | |
801 masm->isolate()->counters()->keyed_load_generic_slow(), 1, x2, x3); | |
802 GenerateRuntimeGetProperty(masm); | |
803 | |
804 __ Bind(&check_name); | |
805 GenerateKeyNameCheck(masm, key, x2, x3, &index_name, &slow); | |
806 | |
807 GenerateKeyedLoadWithNameKey(masm, key, receiver, x2, x3, x4, x5, x6, &slow); | |
808 | |
809 __ Bind(&index_name); | |
810 __ IndexFromHash(x3, key); | |
811 // Now jump to the place where smi keys are handled. | |
812 __ B(&index_smi); | |
813 } | |
814 | |
815 | |
816 void KeyedLoadIC::GenerateString(MacroAssembler* masm) { | |
817 // ---------- S t a t e -------------- | |
818 // -- lr : return address | |
819 // -- x0 : key (index) | |
820 // -- x1 : receiver | |
821 // ----------------------------------- | |
822 Label miss; | |
823 | |
824 Register index = x0; | |
825 Register receiver = x1; | |
826 Register result = x0; | |
827 Register scratch = x3; | |
828 | |
829 StringCharAtGenerator char_at_generator(receiver, | |
830 index, | |
831 scratch, | |
832 result, | |
833 &miss, // When not a string. | |
834 &miss, // When not a number. | |
835 &miss, // When index out of range. | |
836 STRING_INDEX_IS_ARRAY_INDEX); | |
837 char_at_generator.GenerateFast(masm); | |
838 __ Ret(); | |
839 | |
840 StubRuntimeCallHelper call_helper; | |
841 char_at_generator.GenerateSlow(masm, call_helper); | |
842 | |
843 __ Bind(&miss); | |
844 GenerateMiss(masm); | |
845 } | |
846 | |
847 | |
848 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { | |
849 // ---------- S t a t e -------------- | |
850 // -- lr : return address | |
851 // -- x0 : key | |
852 // -- x1 : receiver | |
853 // ----------------------------------- | |
854 Label slow; | |
855 Register key = x0; | |
856 Register receiver = x1; | |
857 | |
858 // Check that the receiver isn't a smi. | |
859 __ JumpIfSmi(receiver, &slow); | |
860 | |
861 // Check that the key is an array index, that is Uint32. | |
862 __ TestAndBranchIfAnySet(key, kSmiTagMask | kSmiSignMask, &slow); | |
863 | |
864 // Get the map of the receiver. | |
865 Register map = x2; | |
866 __ Ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
867 | |
868 // Check that it has indexed interceptor and access checks | |
869 // are not enabled for this object. | |
870 __ Ldrb(x3, FieldMemOperand(map, Map::kBitFieldOffset)); | |
871 ASSERT(kSlowCaseBitFieldMask == | |
872 ((1 << Map::kIsAccessCheckNeeded) | (1 << Map::kHasIndexedInterceptor))); | |
873 __ Tbnz(x3, Map::kIsAccessCheckNeeded, &slow); | |
874 __ Tbz(x3, Map::kHasIndexedInterceptor, &slow); | |
875 | |
876 // Everything is fine, call runtime. | |
877 __ Push(receiver, key); | |
878 __ TailCallExternalReference( | |
879 ExternalReference(IC_Utility(kKeyedLoadPropertyWithInterceptor), | |
880 masm->isolate()), | |
881 2, | |
882 1); | |
883 | |
884 __ Bind(&slow); | |
885 GenerateMiss(masm); | |
886 } | |
887 | |
888 | |
889 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { | |
890 ASM_LOCATION("KeyedStoreIC::GenerateMiss"); | |
891 // ---------- S t a t e -------------- | |
892 // -- x0 : value | |
893 // -- x1 : key | |
894 // -- x2 : receiver | |
895 // -- lr : return address | |
896 // ----------------------------------- | |
897 | |
898 // Push receiver, key and value for runtime call. | |
899 __ Push(x2, x1, x0); | |
900 | |
901 ExternalReference ref = | |
902 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate()); | |
903 __ TailCallExternalReference(ref, 3, 1); | |
904 } | |
905 | |
906 | |
907 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) { | |
908 ASM_LOCATION("KeyedStoreIC::GenerateSlow"); | |
909 // ---------- S t a t e -------------- | |
910 // -- lr : return address | |
911 // -- x0 : value | |
912 // -- x1 : key | |
913 // -- x2 : receiver | |
914 // ----------------------------------- | |
915 | |
916 // Push receiver, key and value for runtime call. | |
917 __ Push(x2, x1, x0); | |
918 | |
919 // The slow case calls into the runtime to complete the store without causing | |
920 // an IC miss that would otherwise cause a transition to the generic stub. | |
921 ExternalReference ref = | |
922 ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate()); | |
923 __ TailCallExternalReference(ref, 3, 1); | |
924 } | |
925 | |
926 | |
927 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, | |
928 StrictMode strict_mode) { | |
929 ASM_LOCATION("KeyedStoreIC::GenerateRuntimeSetProperty"); | |
930 // ---------- S t a t e -------------- | |
931 // -- x0 : value | |
932 // -- x1 : key | |
933 // -- x2 : receiver | |
934 // -- lr : return address | |
935 // ----------------------------------- | |
936 | |
937 // Push receiver, key and value for runtime call. | |
938 __ Push(x2, x1, x0); | |
939 | |
940 // Push PropertyAttributes(NONE) and strict_mode for runtime call. | |
941 STATIC_ASSERT(NONE == 0); | |
942 __ Mov(x10, Smi::FromInt(strict_mode)); | |
943 __ Push(xzr, x10); | |
944 | |
945 __ TailCallRuntime(Runtime::kSetProperty, 5, 1); | |
946 } | |
947 | |
948 | |
949 static void KeyedStoreGenerateGenericHelper( | |
950 MacroAssembler* masm, | |
951 Label* fast_object, | |
952 Label* fast_double, | |
953 Label* slow, | |
954 KeyedStoreCheckMap check_map, | |
955 KeyedStoreIncrementLength increment_length, | |
956 Register value, | |
957 Register key, | |
958 Register receiver, | |
959 Register receiver_map, | |
960 Register elements_map, | |
961 Register elements) { | |
962 ASSERT(!AreAliased( | |
963 value, key, receiver, receiver_map, elements_map, elements, x10, x11)); | |
964 | |
965 Label transition_smi_elements; | |
966 Label transition_double_elements; | |
967 Label fast_double_without_map_check; | |
968 Label non_double_value; | |
969 Label finish_store; | |
970 | |
971 __ Bind(fast_object); | |
972 if (check_map == kCheckMap) { | |
973 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | |
974 __ Cmp(elements_map, | |
975 Operand(masm->isolate()->factory()->fixed_array_map())); | |
976 __ B(ne, fast_double); | |
977 } | |
978 | |
979 // HOLECHECK: guards "A[i] = V" | |
980 // We have to go to the runtime if the current value is the hole because there | |
981 // may be a callback on the element. | |
982 Label holecheck_passed; | |
983 __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag); | |
984 __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2)); | |
985 __ Ldr(x11, MemOperand(x10)); | |
986 __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed); | |
987 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow); | |
988 __ bind(&holecheck_passed); | |
989 | |
990 // Smi stores don't require further checks. | |
991 __ JumpIfSmi(value, &finish_store); | |
992 | |
993 // Escape to elements kind transition case. | |
994 __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements); | |
995 | |
996 __ Bind(&finish_store); | |
997 if (increment_length == kIncrementLength) { | |
998 // Add 1 to receiver->length. | |
999 __ Add(x10, key, Smi::FromInt(1)); | |
1000 __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
1001 } | |
1002 | |
1003 Register address = x11; | |
1004 __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag); | |
1005 __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2)); | |
1006 __ Str(value, MemOperand(address)); | |
1007 | |
1008 Label dont_record_write; | |
1009 __ JumpIfSmi(value, &dont_record_write); | |
1010 | |
1011 // Update write barrier for the elements array address. | |
1012 __ Mov(x10, value); // Preserve the value which is returned. | |
1013 __ RecordWrite(elements, | |
1014 address, | |
1015 x10, | |
1016 kLRHasNotBeenSaved, | |
1017 kDontSaveFPRegs, | |
1018 EMIT_REMEMBERED_SET, | |
1019 OMIT_SMI_CHECK); | |
1020 | |
1021 __ Bind(&dont_record_write); | |
1022 __ Ret(); | |
1023 | |
1024 | |
1025 __ Bind(fast_double); | |
1026 if (check_map == kCheckMap) { | |
1027 // Check for fast double array case. If this fails, call through to the | |
1028 // runtime. | |
1029 __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow); | |
1030 } | |
1031 | |
1032 // HOLECHECK: guards "A[i] double hole?" | |
1033 // We have to see if the double version of the hole is present. If so go to | |
1034 // the runtime. | |
1035 __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag); | |
1036 __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2)); | |
1037 __ Ldr(x11, MemOperand(x10)); | |
1038 __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check); | |
1039 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow); | |
1040 | |
1041 __ Bind(&fast_double_without_map_check); | |
1042 __ StoreNumberToDoubleElements(value, | |
1043 key, | |
1044 elements, | |
1045 x10, | |
1046 d0, | |
1047 d1, | |
1048 &transition_double_elements); | |
1049 if (increment_length == kIncrementLength) { | |
1050 // Add 1 to receiver->length. | |
1051 __ Add(x10, key, Smi::FromInt(1)); | |
1052 __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
1053 } | |
1054 __ Ret(); | |
1055 | |
1056 | |
1057 __ Bind(&transition_smi_elements); | |
1058 // Transition the array appropriately depending on the value type. | |
1059 __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset)); | |
1060 __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value); | |
1061 | |
1062 // Value is a double. Transition FAST_SMI_ELEMENTS -> | |
1063 // FAST_DOUBLE_ELEMENTS and complete the store. | |
1064 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, | |
1065 FAST_DOUBLE_ELEMENTS, | |
1066 receiver_map, | |
1067 x10, | |
1068 x11, | |
1069 slow); | |
1070 ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3. | |
1071 AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, | |
1072 FAST_DOUBLE_ELEMENTS); | |
1073 ElementsTransitionGenerator::GenerateSmiToDouble(masm, mode, slow); | |
1074 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
1075 __ B(&fast_double_without_map_check); | |
1076 | |
1077 __ Bind(&non_double_value); | |
1078 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS. | |
1079 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, | |
1080 FAST_ELEMENTS, | |
1081 receiver_map, | |
1082 x10, | |
1083 x11, | |
1084 slow); | |
1085 ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3. | |
1086 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); | |
1087 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(masm, mode, | |
1088 slow); | |
1089 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
1090 __ B(&finish_store); | |
1091 | |
1092 __ Bind(&transition_double_elements); | |
1093 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a | |
1094 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and | |
1095 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS | |
1096 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, | |
1097 FAST_ELEMENTS, | |
1098 receiver_map, | |
1099 x10, | |
1100 x11, | |
1101 slow); | |
1102 ASSERT(receiver_map.Is(x3)); // Transition code expects map in x3. | |
1103 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); | |
1104 ElementsTransitionGenerator::GenerateDoubleToObject(masm, mode, slow); | |
1105 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
1106 __ B(&finish_store); | |
1107 } | |
1108 | |
1109 | |
1110 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, | |
1111 StrictMode strict_mode) { | |
1112 ASM_LOCATION("KeyedStoreIC::GenerateGeneric"); | |
1113 // ---------- S t a t e -------------- | |
1114 // -- x0 : value | |
1115 // -- x1 : key | |
1116 // -- x2 : receiver | |
1117 // -- lr : return address | |
1118 // ----------------------------------- | |
1119 Label slow; | |
1120 Label array; | |
1121 Label fast_object; | |
1122 Label extra; | |
1123 Label fast_object_grow; | |
1124 Label fast_double_grow; | |
1125 Label fast_double; | |
1126 | |
1127 Register value = x0; | |
1128 Register key = x1; | |
1129 Register receiver = x2; | |
1130 Register receiver_map = x3; | |
1131 Register elements = x4; | |
1132 Register elements_map = x5; | |
1133 | |
1134 __ JumpIfNotSmi(key, &slow); | |
1135 __ JumpIfSmi(receiver, &slow); | |
1136 __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
1137 | |
1138 // Check that the receiver does not require access checks and is not observed. | |
1139 // The generic stub does not perform map checks or handle observed objects. | |
1140 __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); | |
1141 __ TestAndBranchIfAnySet( | |
1142 x10, (1 << Map::kIsAccessCheckNeeded) | (1 << Map::kIsObserved), &slow); | |
1143 | |
1144 // Check if the object is a JS array or not. | |
1145 Register instance_type = x10; | |
1146 __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE); | |
1147 __ B(eq, &array); | |
1148 // Check that the object is some kind of JSObject. | |
1149 __ Cmp(instance_type, FIRST_JS_OBJECT_TYPE); | |
1150 __ B(lt, &slow); | |
1151 | |
1152 // Object case: Check key against length in the elements array. | |
1153 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
1154 // Check array bounds. Both the key and the length of FixedArray are smis. | |
1155 __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset)); | |
1156 __ Cmp(x10, Operand::UntagSmi(key)); | |
1157 __ B(hi, &fast_object); | |
1158 | |
1159 | |
1160 __ Bind(&slow); | |
1161 // Slow case, handle jump to runtime. | |
1162 // Live values: | |
1163 // x0: value | |
1164 // x1: key | |
1165 // x2: receiver | |
1166 GenerateRuntimeSetProperty(masm, strict_mode); | |
1167 | |
1168 | |
1169 __ Bind(&extra); | |
1170 // Extra capacity case: Check if there is extra capacity to | |
1171 // perform the store and update the length. Used for adding one | |
1172 // element to the array by writing to array[array.length]. | |
1173 | |
1174 // Check for room in the elements backing store. | |
1175 // Both the key and the length of FixedArray are smis. | |
1176 __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset)); | |
1177 __ Cmp(x10, Operand::UntagSmi(key)); | |
1178 __ B(ls, &slow); | |
1179 | |
1180 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | |
1181 __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map())); | |
1182 __ B(eq, &fast_object_grow); | |
1183 __ Cmp(elements_map, | |
1184 Operand(masm->isolate()->factory()->fixed_double_array_map())); | |
1185 __ B(eq, &fast_double_grow); | |
1186 __ B(&slow); | |
1187 | |
1188 | |
1189 __ Bind(&array); | |
1190 // Array case: Get the length and the elements array from the JS | |
1191 // array. Check that the array is in fast mode (and writable); if it | |
1192 // is the length is always a smi. | |
1193 | |
1194 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
1195 | |
1196 // Check the key against the length in the array. | |
1197 __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset)); | |
1198 __ Cmp(x10, Operand::UntagSmi(key)); | |
1199 __ B(eq, &extra); // We can handle the case where we are appending 1 element. | |
1200 __ B(lo, &slow); | |
1201 | |
1202 KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, | |
1203 &slow, kCheckMap, kDontIncrementLength, | |
1204 value, key, receiver, receiver_map, | |
1205 elements_map, elements); | |
1206 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow, | |
1207 &slow, kDontCheckMap, kIncrementLength, | |
1208 value, key, receiver, receiver_map, | |
1209 elements_map, elements); | |
1210 } | |
1211 | |
1212 | |
1213 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { | |
1214 // ----------- S t a t e ------------- | |
1215 // -- x0 : value | |
1216 // -- x1 : receiver | |
1217 // -- x2 : name | |
1218 // -- lr : return address | |
1219 // ----------------------------------- | |
1220 | |
1221 // Probe the stub cache. | |
1222 Code::Flags flags = Code::ComputeHandlerFlags(Code::STORE_IC); | |
1223 masm->isolate()->stub_cache()->GenerateProbe( | |
1224 masm, flags, x1, x2, x3, x4, x5, x6); | |
1225 | |
1226 // Cache miss: Jump to runtime. | |
1227 GenerateMiss(masm); | |
1228 } | |
1229 | |
1230 | |
1231 void StoreIC::GenerateMiss(MacroAssembler* masm) { | |
1232 // ----------- S t a t e ------------- | |
1233 // -- x0 : value | |
1234 // -- x1 : receiver | |
1235 // -- x2 : name | |
1236 // -- lr : return address | |
1237 // ----------------------------------- | |
1238 | |
1239 __ Push(x1, x2, x0); | |
1240 | |
1241 // Tail call to the entry. | |
1242 ExternalReference ref = | |
1243 ExternalReference(IC_Utility(kStoreIC_Miss), masm->isolate()); | |
1244 __ TailCallExternalReference(ref, 3, 1); | |
1245 } | |
1246 | |
1247 | |
1248 void StoreIC::GenerateNormal(MacroAssembler* masm) { | |
1249 // ----------- S t a t e ------------- | |
1250 // -- x0 : value | |
1251 // -- x1 : receiver | |
1252 // -- x2 : name | |
1253 // -- lr : return address | |
1254 // ----------------------------------- | |
1255 Label miss; | |
1256 Register value = x0; | |
1257 Register receiver = x1; | |
1258 Register name = x2; | |
1259 Register dictionary = x3; | |
1260 | |
1261 GenerateNameDictionaryReceiverCheck( | |
1262 masm, receiver, dictionary, x4, x5, &miss); | |
1263 | |
1264 GenerateDictionaryStore(masm, &miss, dictionary, name, value, x4, x5); | |
1265 Counters* counters = masm->isolate()->counters(); | |
1266 __ IncrementCounter(counters->store_normal_hit(), 1, x4, x5); | |
1267 __ Ret(); | |
1268 | |
1269 // Cache miss: Jump to runtime. | |
1270 __ Bind(&miss); | |
1271 __ IncrementCounter(counters->store_normal_miss(), 1, x4, x5); | |
1272 GenerateMiss(masm); | |
1273 } | |
1274 | |
1275 | |
1276 void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, | |
1277 StrictMode strict_mode) { | |
1278 ASM_LOCATION("StoreIC::GenerateRuntimeSetProperty"); | |
1279 // ----------- S t a t e ------------- | |
1280 // -- x0 : value | |
1281 // -- x1 : receiver | |
1282 // -- x2 : name | |
1283 // -- lr : return address | |
1284 // ----------------------------------- | |
1285 | |
1286 __ Push(x1, x2, x0); | |
1287 | |
1288 __ Mov(x11, Smi::FromInt(NONE)); // PropertyAttributes | |
1289 __ Mov(x10, Smi::FromInt(strict_mode)); | |
1290 __ Push(x11, x10); | |
1291 | |
1292 // Do tail-call to runtime routine. | |
1293 __ TailCallRuntime(Runtime::kSetProperty, 5, 1); | |
1294 } | |
1295 | |
1296 | |
1297 void StoreIC::GenerateSlow(MacroAssembler* masm) { | |
1298 // ---------- S t a t e -------------- | |
1299 // -- x0 : value | |
1300 // -- x1 : receiver | |
1301 // -- x2 : name | |
1302 // -- lr : return address | |
1303 // ----------------------------------- | |
1304 | |
1305 // Push receiver, name and value for runtime call. | |
1306 __ Push(x1, x2, x0); | |
1307 | |
1308 // The slow case calls into the runtime to complete the store without causing | |
1309 // an IC miss that would otherwise cause a transition to the generic stub. | |
1310 ExternalReference ref = | |
1311 ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate()); | |
1312 __ TailCallExternalReference(ref, 3, 1); | |
1313 } | |
1314 | |
1315 | |
1316 Condition CompareIC::ComputeCondition(Token::Value op) { | |
1317 switch (op) { | |
1318 case Token::EQ_STRICT: | |
1319 case Token::EQ: | |
1320 return eq; | |
1321 case Token::LT: | |
1322 return lt; | |
1323 case Token::GT: | |
1324 return gt; | |
1325 case Token::LTE: | |
1326 return le; | |
1327 case Token::GTE: | |
1328 return ge; | |
1329 default: | |
1330 UNREACHABLE(); | |
1331 return al; | |
1332 } | |
1333 } | |
1334 | |
1335 | |
1336 bool CompareIC::HasInlinedSmiCode(Address address) { | |
1337 // The address of the instruction following the call. | |
1338 Address info_address = | |
1339 Assembler::return_address_from_call_start(address); | |
1340 | |
1341 InstructionSequence* patch_info = InstructionSequence::At(info_address); | |
1342 return patch_info->IsInlineData(); | |
1343 } | |
1344 | |
1345 | |
1346 // Activate a SMI fast-path by patching the instructions generated by | |
1347 // JumpPatchSite::EmitJumpIf(Not)Smi(), using the information encoded by | |
1348 // JumpPatchSite::EmitPatchInfo(). | |
1349 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { | |
1350 // The patch information is encoded in the instruction stream using | |
1351 // instructions which have no side effects, so we can safely execute them. | |
1352 // The patch information is encoded directly after the call to the helper | |
1353 // function which is requesting this patch operation. | |
1354 Address info_address = | |
1355 Assembler::return_address_from_call_start(address); | |
1356 InlineSmiCheckInfo info(info_address); | |
1357 | |
1358 // Check and decode the patch information instruction. | |
1359 if (!info.HasSmiCheck()) { | |
1360 return; | |
1361 } | |
1362 | |
1363 if (FLAG_trace_ic) { | |
1364 PrintF("[ Patching ic at %p, marker=%p, SMI check=%p\n", | |
1365 address, info_address, reinterpret_cast<void*>(info.SmiCheck())); | |
1366 } | |
1367 | |
1368 // Patch and activate code generated by JumpPatchSite::EmitJumpIfNotSmi() | |
1369 // and JumpPatchSite::EmitJumpIfSmi(). | |
1370 // Changing | |
1371 // tb(n)z xzr, #0, <target> | |
1372 // to | |
1373 // tb(!n)z test_reg, #0, <target> | |
1374 Instruction* to_patch = info.SmiCheck(); | |
1375 PatchingAssembler patcher(to_patch, 1); | |
1376 ASSERT(to_patch->IsTestBranch()); | |
1377 ASSERT(to_patch->ImmTestBranchBit5() == 0); | |
1378 ASSERT(to_patch->ImmTestBranchBit40() == 0); | |
1379 | |
1380 STATIC_ASSERT(kSmiTag == 0); | |
1381 STATIC_ASSERT(kSmiTagMask == 1); | |
1382 | |
1383 int branch_imm = to_patch->ImmTestBranch(); | |
1384 Register smi_reg; | |
1385 if (check == ENABLE_INLINED_SMI_CHECK) { | |
1386 ASSERT(to_patch->Rt() == xzr.code()); | |
1387 smi_reg = info.SmiRegister(); | |
1388 } else { | |
1389 ASSERT(check == DISABLE_INLINED_SMI_CHECK); | |
1390 ASSERT(to_patch->Rt() != xzr.code()); | |
1391 smi_reg = xzr; | |
1392 } | |
1393 | |
1394 if (to_patch->Mask(TestBranchMask) == TBZ) { | |
1395 // This is JumpIfNotSmi(smi_reg, branch_imm). | |
1396 patcher.tbnz(smi_reg, 0, branch_imm); | |
1397 } else { | |
1398 ASSERT(to_patch->Mask(TestBranchMask) == TBNZ); | |
1399 // This is JumpIfSmi(smi_reg, branch_imm). | |
1400 patcher.tbz(smi_reg, 0, branch_imm); | |
1401 } | |
1402 } | |
1403 | |
1404 | |
1405 } } // namespace v8::internal | |
1406 | |
1407 #endif // V8_TARGET_ARCH_A64 | |
OLD | NEW |