OLD | NEW |
| (Empty) |
1 // Copyright 2012 the V8 project authors. All rights reserved. | |
2 // Use of this source code is governed by a BSD-style license that can be | |
3 // found in the LICENSE file. | |
4 | |
5 | |
6 | |
7 #include "src/v8.h" | |
8 | |
9 #if V8_TARGET_ARCH_MIPS64 | |
10 | |
11 #include "src/code-stubs.h" | |
12 #include "src/codegen.h" | |
13 #include "src/ic-inl.h" | |
14 #include "src/runtime.h" | |
15 #include "src/stub-cache.h" | |
16 | |
17 namespace v8 { | |
18 namespace internal { | |
19 | |
20 | |
21 // ---------------------------------------------------------------------------- | |
22 // Static IC stub generators. | |
23 // | |
24 | |
25 #define __ ACCESS_MASM(masm) | |
26 | |
27 | |
28 static void GenerateGlobalInstanceTypeCheck(MacroAssembler* masm, | |
29 Register type, | |
30 Label* global_object) { | |
31 // Register usage: | |
32 // type: holds the receiver instance type on entry. | |
33 __ Branch(global_object, eq, type, Operand(JS_GLOBAL_OBJECT_TYPE)); | |
34 __ Branch(global_object, eq, type, Operand(JS_BUILTINS_OBJECT_TYPE)); | |
35 __ Branch(global_object, eq, type, Operand(JS_GLOBAL_PROXY_TYPE)); | |
36 } | |
37 | |
38 | |
39 // Helper function used from LoadIC GenerateNormal. | |
40 // | |
41 // elements: Property dictionary. It is not clobbered if a jump to the miss | |
42 // label is done. | |
43 // name: Property name. It is not clobbered if a jump to the miss label is | |
44 // done | |
45 // result: Register for the result. It is only updated if a jump to the miss | |
46 // label is not done. Can be the same as elements or name clobbering | |
47 // one of these in the case of not jumping to the miss label. | |
48 // The two scratch registers need to be different from elements, name and | |
49 // result. | |
50 // The generated code assumes that the receiver has slow properties, | |
51 // is not a global object and does not have interceptors. | |
52 // The address returned from GenerateStringDictionaryProbes() in scratch2 | |
53 // is used. | |
54 static void GenerateDictionaryLoad(MacroAssembler* masm, | |
55 Label* miss, | |
56 Register elements, | |
57 Register name, | |
58 Register result, | |
59 Register scratch1, | |
60 Register scratch2) { | |
61 // Main use of the scratch registers. | |
62 // scratch1: Used as temporary and to hold the capacity of the property | |
63 // dictionary. | |
64 // scratch2: Used as temporary. | |
65 Label done; | |
66 | |
67 // Probe the dictionary. | |
68 NameDictionaryLookupStub::GeneratePositiveLookup(masm, | |
69 miss, | |
70 &done, | |
71 elements, | |
72 name, | |
73 scratch1, | |
74 scratch2); | |
75 | |
76 // If probing finds an entry check that the value is a normal | |
77 // property. | |
78 __ bind(&done); // scratch2 == elements + 4 * index. | |
79 const int kElementsStartOffset = NameDictionary::kHeaderSize + | |
80 NameDictionary::kElementsStartIndex * kPointerSize; | |
81 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; | |
82 __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); | |
83 __ And(at, | |
84 scratch1, | |
85 Operand(Smi::FromInt(PropertyDetails::TypeField::kMask))); | |
86 __ Branch(miss, ne, at, Operand(zero_reg)); | |
87 | |
88 // Get the value at the masked, scaled index and return. | |
89 __ ld(result, | |
90 FieldMemOperand(scratch2, kElementsStartOffset + 1 * kPointerSize)); | |
91 } | |
92 | |
93 | |
94 // Helper function used from StoreIC::GenerateNormal. | |
95 // | |
96 // elements: Property dictionary. It is not clobbered if a jump to the miss | |
97 // label is done. | |
98 // name: Property name. It is not clobbered if a jump to the miss label is | |
99 // done | |
100 // value: The value to store. | |
101 // The two scratch registers need to be different from elements, name and | |
102 // result. | |
103 // The generated code assumes that the receiver has slow properties, | |
104 // is not a global object and does not have interceptors. | |
105 // The address returned from GenerateStringDictionaryProbes() in scratch2 | |
106 // is used. | |
107 static void GenerateDictionaryStore(MacroAssembler* masm, | |
108 Label* miss, | |
109 Register elements, | |
110 Register name, | |
111 Register value, | |
112 Register scratch1, | |
113 Register scratch2) { | |
114 // Main use of the scratch registers. | |
115 // scratch1: Used as temporary and to hold the capacity of the property | |
116 // dictionary. | |
117 // scratch2: Used as temporary. | |
118 Label done; | |
119 | |
120 // Probe the dictionary. | |
121 NameDictionaryLookupStub::GeneratePositiveLookup(masm, | |
122 miss, | |
123 &done, | |
124 elements, | |
125 name, | |
126 scratch1, | |
127 scratch2); | |
128 | |
129 // If probing finds an entry in the dictionary check that the value | |
130 // is a normal property that is not read only. | |
131 __ bind(&done); // scratch2 == elements + 4 * index. | |
132 const int kElementsStartOffset = NameDictionary::kHeaderSize + | |
133 NameDictionary::kElementsStartIndex * kPointerSize; | |
134 const int kDetailsOffset = kElementsStartOffset + 2 * kPointerSize; | |
135 const int kTypeAndReadOnlyMask = | |
136 (PropertyDetails::TypeField::kMask | | |
137 PropertyDetails::AttributesField::encode(READ_ONLY)); | |
138 __ ld(scratch1, FieldMemOperand(scratch2, kDetailsOffset)); | |
139 __ And(at, scratch1, Operand(Smi::FromInt(kTypeAndReadOnlyMask))); | |
140 __ Branch(miss, ne, at, Operand(zero_reg)); | |
141 | |
142 // Store the value at the masked, scaled index and return. | |
143 const int kValueOffset = kElementsStartOffset + kPointerSize; | |
144 __ Daddu(scratch2, scratch2, Operand(kValueOffset - kHeapObjectTag)); | |
145 __ sd(value, MemOperand(scratch2)); | |
146 | |
147 // Update the write barrier. Make sure not to clobber the value. | |
148 __ mov(scratch1, value); | |
149 __ RecordWrite( | |
150 elements, scratch2, scratch1, kRAHasNotBeenSaved, kDontSaveFPRegs); | |
151 } | |
152 | |
153 | |
154 // Checks the receiver for special cases (value type, slow case bits). | |
155 // Falls through for regular JS object. | |
156 static void GenerateKeyedLoadReceiverCheck(MacroAssembler* masm, | |
157 Register receiver, | |
158 Register map, | |
159 Register scratch, | |
160 int interceptor_bit, | |
161 Label* slow) { | |
162 // Check that the object isn't a smi. | |
163 __ JumpIfSmi(receiver, slow); | |
164 // Get the map of the receiver. | |
165 __ ld(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
166 // Check bit field. | |
167 __ lbu(scratch, FieldMemOperand(map, Map::kBitFieldOffset)); | |
168 __ And(at, scratch, | |
169 Operand((1 << Map::kIsAccessCheckNeeded) | (1 << interceptor_bit))); | |
170 __ Branch(slow, ne, at, Operand(zero_reg)); | |
171 // Check that the object is some kind of JS object EXCEPT JS Value type. | |
172 // In the case that the object is a value-wrapper object, | |
173 // we enter the runtime system to make sure that indexing into string | |
174 // objects work as intended. | |
175 DCHECK(JS_OBJECT_TYPE > JS_VALUE_TYPE); | |
176 __ lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); | |
177 __ Branch(slow, lt, scratch, Operand(JS_OBJECT_TYPE)); | |
178 } | |
179 | |
180 | |
181 // Loads an indexed element from a fast case array. | |
182 // If not_fast_array is NULL, doesn't perform the elements map check. | |
183 static void GenerateFastArrayLoad(MacroAssembler* masm, | |
184 Register receiver, | |
185 Register key, | |
186 Register elements, | |
187 Register scratch1, | |
188 Register scratch2, | |
189 Register result, | |
190 Label* not_fast_array, | |
191 Label* out_of_range) { | |
192 // Register use: | |
193 // | |
194 // receiver - holds the receiver on entry. | |
195 // Unchanged unless 'result' is the same register. | |
196 // | |
197 // key - holds the smi key on entry. | |
198 // Unchanged unless 'result' is the same register. | |
199 // | |
200 // elements - holds the elements of the receiver on exit. | |
201 // | |
202 // result - holds the result on exit if the load succeeded. | |
203 // Allowed to be the the same as 'receiver' or 'key'. | |
204 // Unchanged on bailout so 'receiver' and 'key' can be safely | |
205 // used by further computation. | |
206 // | |
207 // Scratch registers: | |
208 // | |
209 // scratch1 - used to hold elements map and elements length. | |
210 // Holds the elements map if not_fast_array branch is taken. | |
211 // | |
212 // scratch2 - used to hold the loaded value. | |
213 | |
214 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
215 if (not_fast_array != NULL) { | |
216 // Check that the object is in fast mode (not dictionary). | |
217 __ ld(scratch1, FieldMemOperand(elements, HeapObject::kMapOffset)); | |
218 __ LoadRoot(at, Heap::kFixedArrayMapRootIndex); | |
219 __ Branch(not_fast_array, ne, scratch1, Operand(at)); | |
220 } else { | |
221 __ AssertFastElements(elements); | |
222 } | |
223 | |
224 // Check that the key (index) is within bounds. | |
225 __ ld(scratch1, FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
226 __ Branch(out_of_range, hs, key, Operand(scratch1)); | |
227 | |
228 // Fast case: Do the load. | |
229 __ Daddu(scratch1, elements, | |
230 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
231 // The key is a smi. | |
232 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); | |
233 __ SmiScale(at, key, kPointerSizeLog2); | |
234 __ daddu(at, at, scratch1); | |
235 __ ld(scratch2, MemOperand(at)); | |
236 | |
237 __ LoadRoot(at, Heap::kTheHoleValueRootIndex); | |
238 // In case the loaded value is the_hole we have to consult GetProperty | |
239 // to ensure the prototype chain is searched. | |
240 __ Branch(out_of_range, eq, scratch2, Operand(at)); | |
241 __ mov(result, scratch2); | |
242 } | |
243 | |
244 | |
245 // Checks whether a key is an array index string or a unique name. | |
246 // Falls through if a key is a unique name. | |
247 static void GenerateKeyNameCheck(MacroAssembler* masm, | |
248 Register key, | |
249 Register map, | |
250 Register hash, | |
251 Label* index_string, | |
252 Label* not_unique) { | |
253 // The key is not a smi. | |
254 Label unique; | |
255 // Is it a name? | |
256 __ GetObjectType(key, map, hash); | |
257 __ Branch(not_unique, hi, hash, Operand(LAST_UNIQUE_NAME_TYPE)); | |
258 STATIC_ASSERT(LAST_UNIQUE_NAME_TYPE == FIRST_NONSTRING_TYPE); | |
259 __ Branch(&unique, eq, hash, Operand(LAST_UNIQUE_NAME_TYPE)); | |
260 | |
261 // Is the string an array index, with cached numeric value? | |
262 __ lwu(hash, FieldMemOperand(key, Name::kHashFieldOffset)); | |
263 __ And(at, hash, Operand(Name::kContainsCachedArrayIndexMask)); | |
264 __ Branch(index_string, eq, at, Operand(zero_reg)); | |
265 | |
266 // Is the string internalized? We know it's a string, so a single | |
267 // bit test is enough. | |
268 // map: key map | |
269 __ lbu(hash, FieldMemOperand(map, Map::kInstanceTypeOffset)); | |
270 STATIC_ASSERT(kInternalizedTag == 0); | |
271 __ And(at, hash, Operand(kIsNotInternalizedMask)); | |
272 __ Branch(not_unique, ne, at, Operand(zero_reg)); | |
273 | |
274 __ bind(&unique); | |
275 } | |
276 | |
277 | |
278 void LoadIC::GenerateMegamorphic(MacroAssembler* masm) { | |
279 // The return address is in lr. | |
280 Register receiver = ReceiverRegister(); | |
281 Register name = NameRegister(); | |
282 DCHECK(receiver.is(a1)); | |
283 DCHECK(name.is(a2)); | |
284 | |
285 // Probe the stub cache. | |
286 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( | |
287 Code::ComputeHandlerFlags(Code::LOAD_IC)); | |
288 masm->isolate()->stub_cache()->GenerateProbe( | |
289 masm, flags, receiver, name, a3, a4, a5, a6); | |
290 | |
291 // Cache miss: Jump to runtime. | |
292 GenerateMiss(masm); | |
293 } | |
294 | |
295 | |
296 void LoadIC::GenerateNormal(MacroAssembler* masm) { | |
297 Register dictionary = a0; | |
298 DCHECK(!dictionary.is(ReceiverRegister())); | |
299 DCHECK(!dictionary.is(NameRegister())); | |
300 Label slow; | |
301 | |
302 __ ld(dictionary, | |
303 FieldMemOperand(ReceiverRegister(), JSObject::kPropertiesOffset)); | |
304 GenerateDictionaryLoad(masm, &slow, dictionary, NameRegister(), v0, a3, a4); | |
305 __ Ret(); | |
306 | |
307 // Dictionary load failed, go slow (but don't miss). | |
308 __ bind(&slow); | |
309 GenerateRuntimeGetProperty(masm); | |
310 } | |
311 | |
312 | |
313 // A register that isn't one of the parameters to the load ic. | |
314 static const Register LoadIC_TempRegister() { return a3; } | |
315 | |
316 | |
317 void LoadIC::GenerateMiss(MacroAssembler* masm) { | |
318 // The return address is on the stack. | |
319 Isolate* isolate = masm->isolate(); | |
320 | |
321 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4); | |
322 | |
323 __ mov(LoadIC_TempRegister(), ReceiverRegister()); | |
324 __ Push(LoadIC_TempRegister(), NameRegister()); | |
325 | |
326 // Perform tail call to the entry. | |
327 ExternalReference ref = ExternalReference(IC_Utility(kLoadIC_Miss), isolate); | |
328 __ TailCallExternalReference(ref, 2, 1); | |
329 } | |
330 | |
331 | |
332 void LoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { | |
333 // The return address is in ra. | |
334 | |
335 __ mov(LoadIC_TempRegister(), ReceiverRegister()); | |
336 __ Push(LoadIC_TempRegister(), NameRegister()); | |
337 | |
338 __ TailCallRuntime(Runtime::kGetProperty, 2, 1); | |
339 } | |
340 | |
341 | |
342 static MemOperand GenerateMappedArgumentsLookup(MacroAssembler* masm, | |
343 Register object, | |
344 Register key, | |
345 Register scratch1, | |
346 Register scratch2, | |
347 Register scratch3, | |
348 Label* unmapped_case, | |
349 Label* slow_case) { | |
350 Heap* heap = masm->isolate()->heap(); | |
351 | |
352 // Check that the receiver is a JSObject. Because of the map check | |
353 // later, we do not need to check for interceptors or whether it | |
354 // requires access checks. | |
355 __ JumpIfSmi(object, slow_case); | |
356 // Check that the object is some kind of JSObject. | |
357 __ GetObjectType(object, scratch1, scratch2); | |
358 __ Branch(slow_case, lt, scratch2, Operand(FIRST_JS_RECEIVER_TYPE)); | |
359 | |
360 // Check that the key is a positive smi. | |
361 __ NonNegativeSmiTst(key, scratch1); | |
362 __ Branch(slow_case, ne, scratch1, Operand(zero_reg)); | |
363 | |
364 // Load the elements into scratch1 and check its map. | |
365 Handle<Map> arguments_map(heap->sloppy_arguments_elements_map()); | |
366 __ ld(scratch1, FieldMemOperand(object, JSObject::kElementsOffset)); | |
367 __ CheckMap(scratch1, | |
368 scratch2, | |
369 arguments_map, | |
370 slow_case, | |
371 DONT_DO_SMI_CHECK); | |
372 // Check if element is in the range of mapped arguments. If not, jump | |
373 // to the unmapped lookup with the parameter map in scratch1. | |
374 __ ld(scratch2, FieldMemOperand(scratch1, FixedArray::kLengthOffset)); | |
375 __ Dsubu(scratch2, scratch2, Operand(Smi::FromInt(2))); | |
376 __ Branch(unmapped_case, Ugreater_equal, key, Operand(scratch2)); | |
377 | |
378 // Load element index and check whether it is the hole. | |
379 const int kOffset = | |
380 FixedArray::kHeaderSize + 2 * kPointerSize - kHeapObjectTag; | |
381 | |
382 __ SmiUntag(scratch3, key); | |
383 __ dsll(scratch3, scratch3, kPointerSizeLog2); | |
384 __ Daddu(scratch3, scratch3, Operand(kOffset)); | |
385 | |
386 __ Daddu(scratch2, scratch1, scratch3); | |
387 __ ld(scratch2, MemOperand(scratch2)); | |
388 __ LoadRoot(scratch3, Heap::kTheHoleValueRootIndex); | |
389 __ Branch(unmapped_case, eq, scratch2, Operand(scratch3)); | |
390 | |
391 // Load value from context and return it. We can reuse scratch1 because | |
392 // we do not jump to the unmapped lookup (which requires the parameter | |
393 // map in scratch1). | |
394 __ ld(scratch1, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); | |
395 __ SmiUntag(scratch3, scratch2); | |
396 __ dsll(scratch3, scratch3, kPointerSizeLog2); | |
397 __ Daddu(scratch3, scratch3, Operand(Context::kHeaderSize - kHeapObjectTag)); | |
398 __ Daddu(scratch2, scratch1, scratch3); | |
399 return MemOperand(scratch2); | |
400 } | |
401 | |
402 | |
403 static MemOperand GenerateUnmappedArgumentsLookup(MacroAssembler* masm, | |
404 Register key, | |
405 Register parameter_map, | |
406 Register scratch, | |
407 Label* slow_case) { | |
408 // Element is in arguments backing store, which is referenced by the | |
409 // second element of the parameter_map. The parameter_map register | |
410 // must be loaded with the parameter map of the arguments object and is | |
411 // overwritten. | |
412 const int kBackingStoreOffset = FixedArray::kHeaderSize + kPointerSize; | |
413 Register backing_store = parameter_map; | |
414 __ ld(backing_store, FieldMemOperand(parameter_map, kBackingStoreOffset)); | |
415 __ CheckMap(backing_store, | |
416 scratch, | |
417 Heap::kFixedArrayMapRootIndex, | |
418 slow_case, | |
419 DONT_DO_SMI_CHECK); | |
420 __ ld(scratch, FieldMemOperand(backing_store, FixedArray::kLengthOffset)); | |
421 __ Branch(slow_case, Ugreater_equal, key, Operand(scratch)); | |
422 __ SmiUntag(scratch, key); | |
423 __ dsll(scratch, scratch, kPointerSizeLog2); | |
424 __ Daddu(scratch, | |
425 scratch, | |
426 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
427 __ Daddu(scratch, backing_store, scratch); | |
428 return MemOperand(scratch); | |
429 } | |
430 | |
431 | |
432 void KeyedLoadIC::GenerateSloppyArguments(MacroAssembler* masm) { | |
433 // The return address is in ra. | |
434 Register receiver = ReceiverRegister(); | |
435 Register key = NameRegister(); | |
436 DCHECK(receiver.is(a1)); | |
437 DCHECK(key.is(a2)); | |
438 | |
439 Label slow, notin; | |
440 MemOperand mapped_location = | |
441 GenerateMappedArgumentsLookup( | |
442 masm, receiver, key, a0, a3, a4, ¬in, &slow); | |
443 __ Ret(USE_DELAY_SLOT); | |
444 __ ld(v0, mapped_location); | |
445 __ bind(¬in); | |
446 // The unmapped lookup expects that the parameter map is in a2. | |
447 MemOperand unmapped_location = | |
448 GenerateUnmappedArgumentsLookup(masm, key, a0, a3, &slow); | |
449 __ ld(a0, unmapped_location); | |
450 __ LoadRoot(a3, Heap::kTheHoleValueRootIndex); | |
451 __ Branch(&slow, eq, a0, Operand(a3)); | |
452 __ Ret(USE_DELAY_SLOT); | |
453 __ mov(v0, a0); | |
454 __ bind(&slow); | |
455 GenerateMiss(masm); | |
456 } | |
457 | |
458 | |
459 void KeyedStoreIC::GenerateSloppyArguments(MacroAssembler* masm) { | |
460 Register receiver = ReceiverRegister(); | |
461 Register key = NameRegister(); | |
462 Register value = ValueRegister(); | |
463 DCHECK(value.is(a0)); | |
464 | |
465 Label slow, notin; | |
466 // Store address is returned in register (of MemOperand) mapped_location. | |
467 MemOperand mapped_location = GenerateMappedArgumentsLookup( | |
468 masm, receiver, key, a3, a4, a5, ¬in, &slow); | |
469 __ sd(value, mapped_location); | |
470 __ mov(t1, value); | |
471 DCHECK_EQ(mapped_location.offset(), 0); | |
472 __ RecordWrite(a3, mapped_location.rm(), t1, | |
473 kRAHasNotBeenSaved, kDontSaveFPRegs); | |
474 __ Ret(USE_DELAY_SLOT); | |
475 __ mov(v0, value); // (In delay slot) return the value stored in v0. | |
476 __ bind(¬in); | |
477 // The unmapped lookup expects that the parameter map is in a3. | |
478 // Store address is returned in register (of MemOperand) unmapped_location. | |
479 MemOperand unmapped_location = | |
480 GenerateUnmappedArgumentsLookup(masm, key, a3, a4, &slow); | |
481 __ sd(value, unmapped_location); | |
482 __ mov(t1, value); | |
483 DCHECK_EQ(unmapped_location.offset(), 0); | |
484 __ RecordWrite(a3, unmapped_location.rm(), t1, | |
485 kRAHasNotBeenSaved, kDontSaveFPRegs); | |
486 __ Ret(USE_DELAY_SLOT); | |
487 __ mov(v0, a0); // (In delay slot) return the value stored in v0. | |
488 __ bind(&slow); | |
489 GenerateMiss(masm); | |
490 } | |
491 | |
492 | |
493 void KeyedLoadIC::GenerateMiss(MacroAssembler* masm) { | |
494 // The return address is in ra. | |
495 Isolate* isolate = masm->isolate(); | |
496 | |
497 __ IncrementCounter(isolate->counters()->keyed_load_miss(), 1, a3, a4); | |
498 | |
499 __ Push(ReceiverRegister(), NameRegister()); | |
500 | |
501 // Perform tail call to the entry. | |
502 ExternalReference ref = | |
503 ExternalReference(IC_Utility(kKeyedLoadIC_Miss), isolate); | |
504 | |
505 __ TailCallExternalReference(ref, 2, 1); | |
506 } | |
507 | |
508 | |
509 // IC register specifications | |
510 const Register LoadIC::ReceiverRegister() { return a1; } | |
511 const Register LoadIC::NameRegister() { return a2; } | |
512 | |
513 | |
514 const Register LoadIC::SlotRegister() { | |
515 DCHECK(FLAG_vector_ics); | |
516 return a0; | |
517 } | |
518 | |
519 | |
520 const Register LoadIC::VectorRegister() { | |
521 DCHECK(FLAG_vector_ics); | |
522 return a3; | |
523 } | |
524 | |
525 | |
526 const Register StoreIC::ReceiverRegister() { return a1; } | |
527 const Register StoreIC::NameRegister() { return a2; } | |
528 const Register StoreIC::ValueRegister() { return a0; } | |
529 | |
530 | |
531 const Register KeyedStoreIC::MapRegister() { | |
532 return a3; | |
533 } | |
534 | |
535 | |
536 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { | |
537 // The return address is in ra. | |
538 | |
539 __ Push(ReceiverRegister(), NameRegister()); | |
540 | |
541 __ TailCallRuntime(Runtime::kKeyedGetProperty, 2, 1); | |
542 } | |
543 | |
544 | |
545 void KeyedLoadIC::GenerateGeneric(MacroAssembler* masm) { | |
546 // The return address is in ra. | |
547 Label slow, check_name, index_smi, index_name, property_array_property; | |
548 Label probe_dictionary, check_number_dictionary; | |
549 | |
550 Register key = NameRegister(); | |
551 Register receiver = ReceiverRegister(); | |
552 DCHECK(key.is(a2)); | |
553 DCHECK(receiver.is(a1)); | |
554 | |
555 Isolate* isolate = masm->isolate(); | |
556 | |
557 // Check that the key is a smi. | |
558 __ JumpIfNotSmi(key, &check_name); | |
559 __ bind(&index_smi); | |
560 // Now the key is known to be a smi. This place is also jumped to from below | |
561 // where a numeric string is converted to a smi. | |
562 | |
563 GenerateKeyedLoadReceiverCheck( | |
564 masm, receiver, a0, a3, Map::kHasIndexedInterceptor, &slow); | |
565 | |
566 // Check the receiver's map to see if it has fast elements. | |
567 __ CheckFastElements(a0, a3, &check_number_dictionary); | |
568 | |
569 GenerateFastArrayLoad( | |
570 masm, receiver, key, a0, a3, a4, v0, NULL, &slow); | |
571 __ IncrementCounter(isolate->counters()->keyed_load_generic_smi(), 1, a4, a3); | |
572 __ Ret(); | |
573 | |
574 __ bind(&check_number_dictionary); | |
575 __ ld(a4, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
576 __ ld(a3, FieldMemOperand(a4, JSObject::kMapOffset)); | |
577 | |
578 // Check whether the elements is a number dictionary. | |
579 // a3: elements map | |
580 // a4: elements | |
581 __ LoadRoot(at, Heap::kHashTableMapRootIndex); | |
582 __ Branch(&slow, ne, a3, Operand(at)); | |
583 __ dsra32(a0, key, 0); | |
584 __ LoadFromNumberDictionary(&slow, a4, key, v0, a0, a3, a5); | |
585 __ Ret(); | |
586 | |
587 // Slow case, key and receiver still in a2 and a1. | |
588 __ bind(&slow); | |
589 __ IncrementCounter(isolate->counters()->keyed_load_generic_slow(), | |
590 1, | |
591 a4, | |
592 a3); | |
593 GenerateRuntimeGetProperty(masm); | |
594 | |
595 __ bind(&check_name); | |
596 GenerateKeyNameCheck(masm, key, a0, a3, &index_name, &slow); | |
597 | |
598 GenerateKeyedLoadReceiverCheck( | |
599 masm, receiver, a0, a3, Map::kHasNamedInterceptor, &slow); | |
600 | |
601 | |
602 // If the receiver is a fast-case object, check the keyed lookup | |
603 // cache. Otherwise probe the dictionary. | |
604 __ ld(a3, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | |
605 __ ld(a4, FieldMemOperand(a3, HeapObject::kMapOffset)); | |
606 __ LoadRoot(at, Heap::kHashTableMapRootIndex); | |
607 __ Branch(&probe_dictionary, eq, a4, Operand(at)); | |
608 | |
609 // Load the map of the receiver, compute the keyed lookup cache hash | |
610 // based on 32 bits of the map pointer and the name hash. | |
611 __ ld(a0, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
612 __ dsll32(a3, a0, 0); | |
613 __ dsrl32(a3, a3, 0); | |
614 __ dsra(a3, a3, KeyedLookupCache::kMapHashShift); | |
615 __ lwu(a4, FieldMemOperand(key, Name::kHashFieldOffset)); | |
616 __ dsra(at, a4, Name::kHashShift); | |
617 __ xor_(a3, a3, at); | |
618 int mask = KeyedLookupCache::kCapacityMask & KeyedLookupCache::kHashMask; | |
619 __ And(a3, a3, Operand(mask)); | |
620 | |
621 // Load the key (consisting of map and unique name) from the cache and | |
622 // check for match. | |
623 Label load_in_object_property; | |
624 static const int kEntriesPerBucket = KeyedLookupCache::kEntriesPerBucket; | |
625 Label hit_on_nth_entry[kEntriesPerBucket]; | |
626 ExternalReference cache_keys = | |
627 ExternalReference::keyed_lookup_cache_keys(isolate); | |
628 __ li(a4, Operand(cache_keys)); | |
629 __ dsll(at, a3, kPointerSizeLog2 + 1); | |
630 __ daddu(a4, a4, at); | |
631 | |
632 for (int i = 0; i < kEntriesPerBucket - 1; i++) { | |
633 Label try_next_entry; | |
634 __ ld(a5, MemOperand(a4, kPointerSize * i * 2)); | |
635 __ Branch(&try_next_entry, ne, a0, Operand(a5)); | |
636 __ ld(a5, MemOperand(a4, kPointerSize * (i * 2 + 1))); | |
637 __ Branch(&hit_on_nth_entry[i], eq, key, Operand(a5)); | |
638 __ bind(&try_next_entry); | |
639 } | |
640 | |
641 __ ld(a5, MemOperand(a4, kPointerSize * (kEntriesPerBucket - 1) * 2)); | |
642 __ Branch(&slow, ne, a0, Operand(a5)); | |
643 __ ld(a5, MemOperand(a4, kPointerSize * ((kEntriesPerBucket - 1) * 2 + 1))); | |
644 __ Branch(&slow, ne, key, Operand(a5)); | |
645 | |
646 // Get field offset. | |
647 // a0 : receiver's map | |
648 // a3 : lookup cache index | |
649 ExternalReference cache_field_offsets = | |
650 ExternalReference::keyed_lookup_cache_field_offsets(isolate); | |
651 | |
652 // Hit on nth entry. | |
653 for (int i = kEntriesPerBucket - 1; i >= 0; i--) { | |
654 __ bind(&hit_on_nth_entry[i]); | |
655 __ li(a4, Operand(cache_field_offsets)); | |
656 | |
657 // TODO(yy) This data structure does NOT follow natural pointer size. | |
658 __ dsll(at, a3, kPointerSizeLog2 - 1); | |
659 __ daddu(at, a4, at); | |
660 __ lwu(a5, MemOperand(at, kPointerSize / 2 * i)); | |
661 | |
662 __ lbu(a6, FieldMemOperand(a0, Map::kInObjectPropertiesOffset)); | |
663 __ Dsubu(a5, a5, a6); | |
664 __ Branch(&property_array_property, ge, a5, Operand(zero_reg)); | |
665 if (i != 0) { | |
666 __ Branch(&load_in_object_property); | |
667 } | |
668 } | |
669 | |
670 // Load in-object property. | |
671 __ bind(&load_in_object_property); | |
672 __ lbu(a6, FieldMemOperand(a0, Map::kInstanceSizeOffset)); | |
673 // Index from start of object. | |
674 __ daddu(a6, a6, a5); | |
675 // Remove the heap tag. | |
676 __ Dsubu(receiver, receiver, Operand(kHeapObjectTag)); | |
677 __ dsll(at, a6, kPointerSizeLog2); | |
678 __ daddu(at, receiver, at); | |
679 __ ld(v0, MemOperand(at)); | |
680 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), | |
681 1, | |
682 a4, | |
683 a3); | |
684 __ Ret(); | |
685 | |
686 // Load property array property. | |
687 __ bind(&property_array_property); | |
688 __ ld(receiver, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | |
689 __ Daddu(receiver, receiver, FixedArray::kHeaderSize - kHeapObjectTag); | |
690 __ dsll(v0, a5, kPointerSizeLog2); | |
691 __ Daddu(v0, v0, a1); | |
692 __ ld(v0, MemOperand(v0)); | |
693 __ IncrementCounter(isolate->counters()->keyed_load_generic_lookup_cache(), | |
694 1, | |
695 a4, | |
696 a3); | |
697 __ Ret(); | |
698 | |
699 | |
700 // Do a quick inline probe of the receiver's dictionary, if it | |
701 // exists. | |
702 __ bind(&probe_dictionary); | |
703 // a3: elements | |
704 __ ld(a0, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
705 __ lbu(a0, FieldMemOperand(a0, Map::kInstanceTypeOffset)); | |
706 GenerateGlobalInstanceTypeCheck(masm, a0, &slow); | |
707 // Load the property to v0. | |
708 GenerateDictionaryLoad(masm, &slow, a3, key, v0, a5, a4); | |
709 __ IncrementCounter(isolate->counters()->keyed_load_generic_symbol(), | |
710 1, | |
711 a4, | |
712 a3); | |
713 __ Ret(); | |
714 | |
715 __ bind(&index_name); | |
716 __ IndexFromHash(a3, key); | |
717 // Now jump to the place where smi keys are handled. | |
718 __ Branch(&index_smi); | |
719 } | |
720 | |
721 | |
722 void KeyedLoadIC::GenerateString(MacroAssembler* masm) { | |
723 // Return address is in ra. | |
724 Label miss; | |
725 | |
726 Register receiver = ReceiverRegister(); | |
727 Register index = NameRegister(); | |
728 Register scratch = a3; | |
729 Register result = v0; | |
730 DCHECK(!scratch.is(receiver) && !scratch.is(index)); | |
731 | |
732 StringCharAtGenerator char_at_generator(receiver, | |
733 index, | |
734 scratch, | |
735 result, | |
736 &miss, // When not a string. | |
737 &miss, // When not a number. | |
738 &miss, // When index out of range. | |
739 STRING_INDEX_IS_ARRAY_INDEX); | |
740 char_at_generator.GenerateFast(masm); | |
741 __ Ret(); | |
742 | |
743 StubRuntimeCallHelper call_helper; | |
744 char_at_generator.GenerateSlow(masm, call_helper); | |
745 | |
746 __ bind(&miss); | |
747 GenerateMiss(masm); | |
748 } | |
749 | |
750 | |
751 void KeyedStoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, | |
752 StrictMode strict_mode) { | |
753 // Push receiver, key and value for runtime call. | |
754 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); | |
755 | |
756 __ li(a0, Operand(Smi::FromInt(strict_mode))); // Strict mode. | |
757 __ Push(a0); | |
758 | |
759 __ TailCallRuntime(Runtime::kSetProperty, 4, 1); | |
760 } | |
761 | |
762 | |
763 static void KeyedStoreGenerateGenericHelper( | |
764 MacroAssembler* masm, | |
765 Label* fast_object, | |
766 Label* fast_double, | |
767 Label* slow, | |
768 KeyedStoreCheckMap check_map, | |
769 KeyedStoreIncrementLength increment_length, | |
770 Register value, | |
771 Register key, | |
772 Register receiver, | |
773 Register receiver_map, | |
774 Register elements_map, | |
775 Register elements) { | |
776 Label transition_smi_elements; | |
777 Label finish_object_store, non_double_value, transition_double_elements; | |
778 Label fast_double_without_map_check; | |
779 | |
780 // Fast case: Do the store, could be either Object or double. | |
781 __ bind(fast_object); | |
782 Register scratch_value = a4; | |
783 Register address = a5; | |
784 if (check_map == kCheckMap) { | |
785 __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | |
786 __ Branch(fast_double, ne, elements_map, | |
787 Operand(masm->isolate()->factory()->fixed_array_map())); | |
788 } | |
789 | |
790 // HOLECHECK: guards "A[i] = V" | |
791 // We have to go to the runtime if the current value is the hole because | |
792 // there may be a callback on the element. | |
793 Label holecheck_passed1; | |
794 __ Daddu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag); | |
795 __ SmiScale(at, key, kPointerSizeLog2); | |
796 __ daddu(address, address, at); | |
797 __ ld(scratch_value, MemOperand(address)); | |
798 | |
799 __ Branch(&holecheck_passed1, ne, scratch_value, | |
800 Operand(masm->isolate()->factory()->the_hole_value())); | |
801 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, | |
802 slow); | |
803 | |
804 __ bind(&holecheck_passed1); | |
805 | |
806 // Smi stores don't require further checks. | |
807 Label non_smi_value; | |
808 __ JumpIfNotSmi(value, &non_smi_value); | |
809 | |
810 if (increment_length == kIncrementLength) { | |
811 // Add 1 to receiver->length. | |
812 __ Daddu(scratch_value, key, Operand(Smi::FromInt(1))); | |
813 __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
814 } | |
815 // It's irrelevant whether array is smi-only or not when writing a smi. | |
816 __ Daddu(address, elements, | |
817 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
818 __ SmiScale(scratch_value, key, kPointerSizeLog2); | |
819 __ Daddu(address, address, scratch_value); | |
820 __ sd(value, MemOperand(address)); | |
821 __ Ret(); | |
822 | |
823 __ bind(&non_smi_value); | |
824 // Escape to elements kind transition case. | |
825 __ CheckFastObjectElements(receiver_map, scratch_value, | |
826 &transition_smi_elements); | |
827 | |
828 // Fast elements array, store the value to the elements backing store. | |
829 __ bind(&finish_object_store); | |
830 if (increment_length == kIncrementLength) { | |
831 // Add 1 to receiver->length. | |
832 __ Daddu(scratch_value, key, Operand(Smi::FromInt(1))); | |
833 __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
834 } | |
835 __ Daddu(address, elements, | |
836 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | |
837 __ SmiScale(scratch_value, key, kPointerSizeLog2); | |
838 __ Daddu(address, address, scratch_value); | |
839 __ sd(value, MemOperand(address)); | |
840 // Update write barrier for the elements array address. | |
841 __ mov(scratch_value, value); // Preserve the value which is returned. | |
842 __ RecordWrite(elements, | |
843 address, | |
844 scratch_value, | |
845 kRAHasNotBeenSaved, | |
846 kDontSaveFPRegs, | |
847 EMIT_REMEMBERED_SET, | |
848 OMIT_SMI_CHECK); | |
849 __ Ret(); | |
850 | |
851 __ bind(fast_double); | |
852 if (check_map == kCheckMap) { | |
853 // Check for fast double array case. If this fails, call through to the | |
854 // runtime. | |
855 __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex); | |
856 __ Branch(slow, ne, elements_map, Operand(at)); | |
857 } | |
858 | |
859 // HOLECHECK: guards "A[i] double hole?" | |
860 // We have to see if the double version of the hole is present. If so | |
861 // go to the runtime. | |
862 __ Daddu(address, elements, | |
863 Operand(FixedDoubleArray::kHeaderSize + sizeof(kHoleNanLower32) | |
864 - kHeapObjectTag)); | |
865 __ SmiScale(at, key, kPointerSizeLog2); | |
866 __ daddu(address, address, at); | |
867 __ lw(scratch_value, MemOperand(address)); | |
868 __ Branch(&fast_double_without_map_check, ne, scratch_value, | |
869 Operand(kHoleNanUpper32)); | |
870 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch_value, | |
871 slow); | |
872 | |
873 __ bind(&fast_double_without_map_check); | |
874 __ StoreNumberToDoubleElements(value, | |
875 key, | |
876 elements, // Overwritten. | |
877 a3, // Scratch regs... | |
878 a4, | |
879 a5, | |
880 &transition_double_elements); | |
881 if (increment_length == kIncrementLength) { | |
882 // Add 1 to receiver->length. | |
883 __ Daddu(scratch_value, key, Operand(Smi::FromInt(1))); | |
884 __ sd(scratch_value, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
885 } | |
886 __ Ret(); | |
887 | |
888 __ bind(&transition_smi_elements); | |
889 // Transition the array appropriately depending on the value type. | |
890 __ ld(a4, FieldMemOperand(value, HeapObject::kMapOffset)); | |
891 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex); | |
892 __ Branch(&non_double_value, ne, a4, Operand(at)); | |
893 | |
894 // Value is a double. Transition FAST_SMI_ELEMENTS -> | |
895 // FAST_DOUBLE_ELEMENTS and complete the store. | |
896 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, | |
897 FAST_DOUBLE_ELEMENTS, | |
898 receiver_map, | |
899 a4, | |
900 slow); | |
901 AllocationSiteMode mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, | |
902 FAST_DOUBLE_ELEMENTS); | |
903 ElementsTransitionGenerator::GenerateSmiToDouble( | |
904 masm, receiver, key, value, receiver_map, mode, slow); | |
905 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
906 __ jmp(&fast_double_without_map_check); | |
907 | |
908 __ bind(&non_double_value); | |
909 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS | |
910 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, | |
911 FAST_ELEMENTS, | |
912 receiver_map, | |
913 a4, | |
914 slow); | |
915 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS); | |
916 ElementsTransitionGenerator::GenerateMapChangeElementsTransition( | |
917 masm, receiver, key, value, receiver_map, mode, slow); | |
918 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
919 __ jmp(&finish_object_store); | |
920 | |
921 __ bind(&transition_double_elements); | |
922 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a | |
923 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and | |
924 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS | |
925 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, | |
926 FAST_ELEMENTS, | |
927 receiver_map, | |
928 a4, | |
929 slow); | |
930 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS); | |
931 ElementsTransitionGenerator::GenerateDoubleToObject( | |
932 masm, receiver, key, value, receiver_map, mode, slow); | |
933 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
934 __ jmp(&finish_object_store); | |
935 } | |
936 | |
937 | |
938 void KeyedStoreIC::GenerateGeneric(MacroAssembler* masm, | |
939 StrictMode strict_mode) { | |
940 // ---------- S t a t e -------------- | |
941 // -- a0 : value | |
942 // -- a1 : key | |
943 // -- a2 : receiver | |
944 // -- ra : return address | |
945 // ----------------------------------- | |
946 Label slow, fast_object, fast_object_grow; | |
947 Label fast_double, fast_double_grow; | |
948 Label array, extra, check_if_double_array; | |
949 | |
950 // Register usage. | |
951 Register value = ValueRegister(); | |
952 Register key = NameRegister(); | |
953 Register receiver = ReceiverRegister(); | |
954 DCHECK(value.is(a0)); | |
955 Register receiver_map = a3; | |
956 Register elements_map = a6; | |
957 Register elements = a7; // Elements array of the receiver. | |
958 // a4 and a5 are used as general scratch registers. | |
959 | |
960 // Check that the key is a smi. | |
961 __ JumpIfNotSmi(key, &slow); | |
962 // Check that the object isn't a smi. | |
963 __ JumpIfSmi(receiver, &slow); | |
964 // Get the map of the object. | |
965 __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
966 // Check that the receiver does not require access checks and is not observed. | |
967 // The generic stub does not perform map checks or handle observed objects. | |
968 __ lbu(a4, FieldMemOperand(receiver_map, Map::kBitFieldOffset)); | |
969 __ And(a4, a4, Operand(1 << Map::kIsAccessCheckNeeded | | |
970 1 << Map::kIsObserved)); | |
971 __ Branch(&slow, ne, a4, Operand(zero_reg)); | |
972 // Check if the object is a JS array or not. | |
973 __ lbu(a4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset)); | |
974 __ Branch(&array, eq, a4, Operand(JS_ARRAY_TYPE)); | |
975 // Check that the object is some kind of JSObject. | |
976 __ Branch(&slow, lt, a4, Operand(FIRST_JS_OBJECT_TYPE)); | |
977 | |
978 // Object case: Check key against length in the elements array. | |
979 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
980 // Check array bounds. Both the key and the length of FixedArray are smis. | |
981 __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
982 __ Branch(&fast_object, lo, key, Operand(a4)); | |
983 | |
984 // Slow case, handle jump to runtime. | |
985 __ bind(&slow); | |
986 // Entry registers are intact. | |
987 // a0: value. | |
988 // a1: key. | |
989 // a2: receiver. | |
990 GenerateRuntimeSetProperty(masm, strict_mode); | |
991 | |
992 // Extra capacity case: Check if there is extra capacity to | |
993 // perform the store and update the length. Used for adding one | |
994 // element to the array by writing to array[array.length]. | |
995 __ bind(&extra); | |
996 // Condition code from comparing key and array length is still available. | |
997 // Only support writing to array[array.length]. | |
998 __ Branch(&slow, ne, key, Operand(a4)); | |
999 // Check for room in the elements backing store. | |
1000 // Both the key and the length of FixedArray are smis. | |
1001 __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset)); | |
1002 __ Branch(&slow, hs, key, Operand(a4)); | |
1003 __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset)); | |
1004 __ Branch( | |
1005 &check_if_double_array, ne, elements_map, Heap::kFixedArrayMapRootIndex); | |
1006 | |
1007 __ jmp(&fast_object_grow); | |
1008 | |
1009 __ bind(&check_if_double_array); | |
1010 __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex); | |
1011 __ jmp(&fast_double_grow); | |
1012 | |
1013 // Array case: Get the length and the elements array from the JS | |
1014 // array. Check that the array is in fast mode (and writable); if it | |
1015 // is the length is always a smi. | |
1016 __ bind(&array); | |
1017 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset)); | |
1018 | |
1019 // Check the key against the length in the array. | |
1020 __ ld(a4, FieldMemOperand(receiver, JSArray::kLengthOffset)); | |
1021 __ Branch(&extra, hs, key, Operand(a4)); | |
1022 | |
1023 KeyedStoreGenerateGenericHelper(masm, &fast_object, &fast_double, | |
1024 &slow, kCheckMap, kDontIncrementLength, | |
1025 value, key, receiver, receiver_map, | |
1026 elements_map, elements); | |
1027 KeyedStoreGenerateGenericHelper(masm, &fast_object_grow, &fast_double_grow, | |
1028 &slow, kDontCheckMap, kIncrementLength, | |
1029 value, key, receiver, receiver_map, | |
1030 elements_map, elements); | |
1031 } | |
1032 | |
1033 | |
1034 void KeyedLoadIC::GenerateIndexedInterceptor(MacroAssembler* masm) { | |
1035 // Return address is in ra. | |
1036 Label slow; | |
1037 | |
1038 Register receiver = ReceiverRegister(); | |
1039 Register key = NameRegister(); | |
1040 Register scratch1 = a3; | |
1041 Register scratch2 = a4; | |
1042 DCHECK(!scratch1.is(receiver) && !scratch1.is(key)); | |
1043 DCHECK(!scratch2.is(receiver) && !scratch2.is(key)); | |
1044 | |
1045 // Check that the receiver isn't a smi. | |
1046 __ JumpIfSmi(receiver, &slow); | |
1047 | |
1048 // Check that the key is an array index, that is Uint32. | |
1049 __ And(a4, key, Operand(kSmiTagMask | kSmiSignMask)); | |
1050 __ Branch(&slow, ne, a4, Operand(zero_reg)); | |
1051 | |
1052 // Get the map of the receiver. | |
1053 __ ld(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); | |
1054 | |
1055 // Check that it has indexed interceptor and access checks | |
1056 // are not enabled for this object. | |
1057 __ lbu(scratch2, FieldMemOperand(scratch1, Map::kBitFieldOffset)); | |
1058 __ And(scratch2, scratch2, Operand(kSlowCaseBitFieldMask)); | |
1059 __ Branch(&slow, ne, scratch2, Operand(1 << Map::kHasIndexedInterceptor)); | |
1060 // Everything is fine, call runtime. | |
1061 __ Push(receiver, key); // Receiver, key. | |
1062 | |
1063 // Perform tail call to the entry. | |
1064 __ TailCallExternalReference(ExternalReference( | |
1065 IC_Utility(kLoadElementWithInterceptor), masm->isolate()), 2, 1); | |
1066 | |
1067 __ bind(&slow); | |
1068 GenerateMiss(masm); | |
1069 } | |
1070 | |
1071 | |
1072 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { | |
1073 // Push receiver, key and value for runtime call. | |
1074 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); | |
1075 | |
1076 ExternalReference ref = | |
1077 ExternalReference(IC_Utility(kKeyedStoreIC_Miss), masm->isolate()); | |
1078 __ TailCallExternalReference(ref, 3, 1); | |
1079 } | |
1080 | |
1081 | |
1082 void StoreIC::GenerateSlow(MacroAssembler* masm) { | |
1083 // Push receiver, key and value for runtime call. | |
1084 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); | |
1085 | |
1086 // The slow case calls into the runtime to complete the store without causing | |
1087 // an IC miss that would otherwise cause a transition to the generic stub. | |
1088 ExternalReference ref = | |
1089 ExternalReference(IC_Utility(kStoreIC_Slow), masm->isolate()); | |
1090 __ TailCallExternalReference(ref, 3, 1); | |
1091 } | |
1092 | |
1093 | |
1094 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) { | |
1095 // Push receiver, key and value for runtime call. | |
1096 // We can't use MultiPush as the order of the registers is important. | |
1097 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); | |
1098 // The slow case calls into the runtime to complete the store without causing | |
1099 // an IC miss that would otherwise cause a transition to the generic stub. | |
1100 ExternalReference ref = | |
1101 ExternalReference(IC_Utility(kKeyedStoreIC_Slow), masm->isolate()); | |
1102 | |
1103 __ TailCallExternalReference(ref, 3, 1); | |
1104 } | |
1105 | |
1106 | |
1107 void StoreIC::GenerateMegamorphic(MacroAssembler* masm) { | |
1108 Register receiver = ReceiverRegister(); | |
1109 Register name = NameRegister(); | |
1110 DCHECK(receiver.is(a1)); | |
1111 DCHECK(name.is(a2)); | |
1112 DCHECK(ValueRegister().is(a0)); | |
1113 | |
1114 // Get the receiver from the stack and probe the stub cache. | |
1115 Code::Flags flags = Code::RemoveTypeAndHolderFromFlags( | |
1116 Code::ComputeHandlerFlags(Code::STORE_IC)); | |
1117 masm->isolate()->stub_cache()->GenerateProbe( | |
1118 masm, flags, receiver, name, a3, a4, a5, a6); | |
1119 | |
1120 // Cache miss: Jump to runtime. | |
1121 GenerateMiss(masm); | |
1122 } | |
1123 | |
1124 | |
1125 void StoreIC::GenerateMiss(MacroAssembler* masm) { | |
1126 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); | |
1127 // Perform tail call to the entry. | |
1128 ExternalReference ref = ExternalReference(IC_Utility(kStoreIC_Miss), | |
1129 masm->isolate()); | |
1130 __ TailCallExternalReference(ref, 3, 1); | |
1131 } | |
1132 | |
1133 | |
1134 void StoreIC::GenerateNormal(MacroAssembler* masm) { | |
1135 Label miss; | |
1136 Register receiver = ReceiverRegister(); | |
1137 Register name = NameRegister(); | |
1138 Register value = ValueRegister(); | |
1139 Register dictionary = a3; | |
1140 DCHECK(!AreAliased(value, receiver, name, dictionary, a4, a5)); | |
1141 | |
1142 __ ld(dictionary, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | |
1143 | |
1144 GenerateDictionaryStore(masm, &miss, a3, name, value, a4, a5); | |
1145 Counters* counters = masm->isolate()->counters(); | |
1146 __ IncrementCounter(counters->store_normal_hit(), 1, a4, a5); | |
1147 __ Ret(); | |
1148 | |
1149 __ bind(&miss); | |
1150 __ IncrementCounter(counters->store_normal_miss(), 1, a4, a5); | |
1151 GenerateMiss(masm); | |
1152 } | |
1153 | |
1154 | |
1155 void StoreIC::GenerateRuntimeSetProperty(MacroAssembler* masm, | |
1156 StrictMode strict_mode) { | |
1157 __ Push(ReceiverRegister(), NameRegister(), ValueRegister()); | |
1158 | |
1159 __ li(a0, Operand(Smi::FromInt(strict_mode))); | |
1160 __ Push(a0); | |
1161 | |
1162 // Do tail-call to runtime routine. | |
1163 __ TailCallRuntime(Runtime::kSetProperty, 4, 1); | |
1164 } | |
1165 | |
1166 | |
1167 #undef __ | |
1168 | |
1169 | |
1170 Condition CompareIC::ComputeCondition(Token::Value op) { | |
1171 switch (op) { | |
1172 case Token::EQ_STRICT: | |
1173 case Token::EQ: | |
1174 return eq; | |
1175 case Token::LT: | |
1176 return lt; | |
1177 case Token::GT: | |
1178 return gt; | |
1179 case Token::LTE: | |
1180 return le; | |
1181 case Token::GTE: | |
1182 return ge; | |
1183 default: | |
1184 UNREACHABLE(); | |
1185 return kNoCondition; | |
1186 } | |
1187 } | |
1188 | |
1189 | |
1190 bool CompareIC::HasInlinedSmiCode(Address address) { | |
1191 // The address of the instruction following the call. | |
1192 Address andi_instruction_address = | |
1193 address + Assembler::kCallTargetAddressOffset; | |
1194 | |
1195 // If the instruction following the call is not a andi at, rx, #yyy, nothing | |
1196 // was inlined. | |
1197 Instr instr = Assembler::instr_at(andi_instruction_address); | |
1198 return Assembler::IsAndImmediate(instr) && | |
1199 Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()); | |
1200 } | |
1201 | |
1202 | |
1203 void PatchInlinedSmiCode(Address address, InlinedSmiCheck check) { | |
1204 Address andi_instruction_address = | |
1205 address + Assembler::kCallTargetAddressOffset; | |
1206 | |
1207 // If the instruction following the call is not a andi at, rx, #yyy, nothing | |
1208 // was inlined. | |
1209 Instr instr = Assembler::instr_at(andi_instruction_address); | |
1210 if (!(Assembler::IsAndImmediate(instr) && | |
1211 Assembler::GetRt(instr) == static_cast<uint32_t>(zero_reg.code()))) { | |
1212 return; | |
1213 } | |
1214 | |
1215 // The delta to the start of the map check instruction and the | |
1216 // condition code uses at the patched jump. | |
1217 int delta = Assembler::GetImmediate16(instr); | |
1218 delta += Assembler::GetRs(instr) * kImm16Mask; | |
1219 // If the delta is 0 the instruction is andi at, zero_reg, #0 which also | |
1220 // signals that nothing was inlined. | |
1221 if (delta == 0) { | |
1222 return; | |
1223 } | |
1224 | |
1225 if (FLAG_trace_ic) { | |
1226 PrintF("[ patching ic at %p, andi=%p, delta=%d\n", | |
1227 address, andi_instruction_address, delta); | |
1228 } | |
1229 | |
1230 Address patch_address = | |
1231 andi_instruction_address - delta * Instruction::kInstrSize; | |
1232 Instr instr_at_patch = Assembler::instr_at(patch_address); | |
1233 Instr branch_instr = | |
1234 Assembler::instr_at(patch_address + Instruction::kInstrSize); | |
1235 // This is patching a conditional "jump if not smi/jump if smi" site. | |
1236 // Enabling by changing from | |
1237 // andi at, rx, 0 | |
1238 // Branch <target>, eq, at, Operand(zero_reg) | |
1239 // to: | |
1240 // andi at, rx, #kSmiTagMask | |
1241 // Branch <target>, ne, at, Operand(zero_reg) | |
1242 // and vice-versa to be disabled again. | |
1243 CodePatcher patcher(patch_address, 2); | |
1244 Register reg = Register::from_code(Assembler::GetRs(instr_at_patch)); | |
1245 if (check == ENABLE_INLINED_SMI_CHECK) { | |
1246 DCHECK(Assembler::IsAndImmediate(instr_at_patch)); | |
1247 DCHECK_EQ(0, Assembler::GetImmediate16(instr_at_patch)); | |
1248 patcher.masm()->andi(at, reg, kSmiTagMask); | |
1249 } else { | |
1250 DCHECK(check == DISABLE_INLINED_SMI_CHECK); | |
1251 DCHECK(Assembler::IsAndImmediate(instr_at_patch)); | |
1252 patcher.masm()->andi(at, reg, 0); | |
1253 } | |
1254 DCHECK(Assembler::IsBranch(branch_instr)); | |
1255 if (Assembler::IsBeq(branch_instr)) { | |
1256 patcher.ChangeBranchCondition(ne); | |
1257 } else { | |
1258 DCHECK(Assembler::IsBne(branch_instr)); | |
1259 patcher.ChangeBranchCondition(eq); | |
1260 } | |
1261 } | |
1262 | |
1263 | |
1264 } } // namespace v8::internal | |
1265 | |
1266 #endif // V8_TARGET_ARCH_MIPS64 | |
OLD | NEW |