Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(104)

Side by Side Diff: src/ic/arm64/ic-arm64.cc

Issue 2523473002: [cleanup] Drop handwritten KeyedStoreIC code (Closed)
Patch Set: rebased Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/ic/arm/stub-cache-arm.cc ('k') | src/ic/arm64/ic-compiler-arm64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2013 the V8 project authors. All rights reserved. 1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if V8_TARGET_ARCH_ARM64 5 #if V8_TARGET_ARCH_ARM64
6 6
7 #include "src/codegen.h" 7 #include "src/codegen.h"
8 #include "src/ic/ic.h" 8 #include "src/ic/ic.h"
9 #include "src/ic/ic-compiler.h" 9 #include "src/ic/ic-compiler.h"
10 #include "src/ic/stub-cache.h" 10 #include "src/ic/stub-cache.h"
(...skipping 179 matching lines...) Expand 10 before | Expand all | Expand 10 after
190 190
191 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) { 191 void KeyedStoreIC::GenerateSlow(MacroAssembler* masm) {
192 ASM_LOCATION("KeyedStoreIC::GenerateSlow"); 192 ASM_LOCATION("KeyedStoreIC::GenerateSlow");
193 StoreIC_PushArgs(masm); 193 StoreIC_PushArgs(masm);
194 194
195 // The slow case calls into the runtime to complete the store without causing 195 // The slow case calls into the runtime to complete the store without causing
196 // an IC miss that would otherwise cause a transition to the generic stub. 196 // an IC miss that would otherwise cause a transition to the generic stub.
197 __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow); 197 __ TailCallRuntime(Runtime::kKeyedStoreIC_Slow);
198 } 198 }
199 199
200 static void KeyedStoreGenerateMegamorphicHelper(
201 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
202 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
203 Register value, Register key, Register receiver, Register receiver_map,
204 Register elements_map, Register elements) {
205 DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
206 x10, x11));
207
208 Label transition_smi_elements;
209 Label transition_double_elements;
210 Label fast_double_without_map_check;
211 Label non_double_value;
212 Label finish_store;
213
214 __ Bind(fast_object);
215 if (check_map == kCheckMap) {
216 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
217 __ Cmp(elements_map,
218 Operand(masm->isolate()->factory()->fixed_array_map()));
219 __ B(ne, fast_double);
220 }
221
222 // HOLECHECK: guards "A[i] = V"
223 // We have to go to the runtime if the current value is the hole because there
224 // may be a callback on the element.
225 Label holecheck_passed;
226 __ Add(x10, elements, FixedArray::kHeaderSize - kHeapObjectTag);
227 __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
228 __ Ldr(x11, MemOperand(x10));
229 __ JumpIfNotRoot(x11, Heap::kTheHoleValueRootIndex, &holecheck_passed);
230 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
231 __ bind(&holecheck_passed);
232
233 // Smi stores don't require further checks.
234 __ JumpIfSmi(value, &finish_store);
235
236 // Escape to elements kind transition case.
237 __ CheckFastObjectElements(receiver_map, x10, &transition_smi_elements);
238
239 __ Bind(&finish_store);
240 if (increment_length == kIncrementLength) {
241 // Add 1 to receiver->length.
242 __ Add(x10, key, Smi::FromInt(1));
243 __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
244 }
245
246 Register address = x11;
247 __ Add(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
248 __ Add(address, address, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
249 __ Str(value, MemOperand(address));
250
251 Label dont_record_write;
252 __ JumpIfSmi(value, &dont_record_write);
253
254 // Update write barrier for the elements array address.
255 __ Mov(x10, value); // Preserve the value which is returned.
256 __ RecordWrite(elements, address, x10, kLRHasNotBeenSaved, kDontSaveFPRegs,
257 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
258
259 __ Bind(&dont_record_write);
260 __ Ret();
261
262
263 __ Bind(fast_double);
264 if (check_map == kCheckMap) {
265 // Check for fast double array case. If this fails, call through to the
266 // runtime.
267 __ JumpIfNotRoot(elements_map, Heap::kFixedDoubleArrayMapRootIndex, slow);
268 }
269
270 // HOLECHECK: guards "A[i] double hole?"
271 // We have to see if the double version of the hole is present. If so go to
272 // the runtime.
273 __ Add(x10, elements, FixedDoubleArray::kHeaderSize - kHeapObjectTag);
274 __ Add(x10, x10, Operand::UntagSmiAndScale(key, kPointerSizeLog2));
275 __ Ldr(x11, MemOperand(x10));
276 __ CompareAndBranch(x11, kHoleNanInt64, ne, &fast_double_without_map_check);
277 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, x10, slow);
278
279 __ Bind(&fast_double_without_map_check);
280 __ StoreNumberToDoubleElements(value, key, elements, x10, d0,
281 &transition_double_elements);
282 if (increment_length == kIncrementLength) {
283 // Add 1 to receiver->length.
284 __ Add(x10, key, Smi::FromInt(1));
285 __ Str(x10, FieldMemOperand(receiver, JSArray::kLengthOffset));
286 }
287 __ Ret();
288
289
290 __ Bind(&transition_smi_elements);
291 // Transition the array appropriately depending on the value type.
292 __ Ldr(x10, FieldMemOperand(value, HeapObject::kMapOffset));
293 __ JumpIfNotRoot(x10, Heap::kHeapNumberMapRootIndex, &non_double_value);
294
295 // Value is a double. Transition FAST_SMI_ELEMENTS ->
296 // FAST_DOUBLE_ELEMENTS and complete the store.
297 __ LoadTransitionedArrayMapConditional(
298 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, x10, x11, slow);
299 AllocationSiteMode mode =
300 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
301 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
302 receiver_map, mode, slow);
303 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
304 __ B(&fast_double_without_map_check);
305
306 __ Bind(&non_double_value);
307 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS.
308 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
309 receiver_map, x10, x11, slow);
310
311 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
312 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
313 masm, receiver, key, value, receiver_map, mode, slow);
314
315 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
316 __ B(&finish_store);
317
318 __ Bind(&transition_double_elements);
319 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
320 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
321 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
322 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
323 receiver_map, x10, x11, slow);
324 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
325 ElementsTransitionGenerator::GenerateDoubleToObject(
326 masm, receiver, key, value, receiver_map, mode, slow);
327 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
328 __ B(&finish_store);
329 }
330
331
332 void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
333 LanguageMode language_mode) {
334 ASM_LOCATION("KeyedStoreIC::GenerateMegamorphic");
335 Label slow;
336 Label array;
337 Label fast_object;
338 Label extra;
339 Label fast_object_grow;
340 Label fast_double_grow;
341 Label fast_double;
342 Label maybe_name_key;
343 Label miss;
344
345 Register value = StoreDescriptor::ValueRegister();
346 Register key = StoreDescriptor::NameRegister();
347 Register receiver = StoreDescriptor::ReceiverRegister();
348 DCHECK(receiver.is(x1));
349 DCHECK(key.is(x2));
350 DCHECK(value.is(x0));
351
352 Register receiver_map = x3;
353 Register elements = x4;
354 Register elements_map = x5;
355
356 __ JumpIfNotSmi(key, &maybe_name_key);
357 __ JumpIfSmi(receiver, &slow);
358 __ Ldr(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
359
360 // Check that the receiver does not require access checks.
361 // The generic stub does not perform map checks.
362 __ Ldrb(x10, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
363 __ TestAndBranchIfAnySet(x10, (1 << Map::kIsAccessCheckNeeded), &slow);
364
365 // Check if the object is a JS array or not.
366 Register instance_type = x10;
367 __ CompareInstanceType(receiver_map, instance_type, JS_ARRAY_TYPE);
368 __ B(eq, &array);
369 // Check that the object is some kind of JS object EXCEPT JS Value type. In
370 // the case that the object is a value-wrapper object, we enter the runtime
371 // system to make sure that indexing into string objects works as intended.
372 STATIC_ASSERT(JS_VALUE_TYPE < JS_OBJECT_TYPE);
373 __ Cmp(instance_type, JS_OBJECT_TYPE);
374 __ B(lo, &slow);
375
376 // Object case: Check key against length in the elements array.
377 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
378 // Check array bounds. Both the key and the length of FixedArray are smis.
379 __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
380 __ Cmp(x10, Operand::UntagSmi(key));
381 __ B(hi, &fast_object);
382
383
384 __ Bind(&slow);
385 // Slow case, handle jump to runtime.
386 // Live values:
387 // x0: value
388 // x1: key
389 // x2: receiver
390 PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
391 // Never returns to here.
392
393 __ bind(&maybe_name_key);
394 __ Ldr(x10, FieldMemOperand(key, HeapObject::kMapOffset));
395 __ Ldrb(x10, FieldMemOperand(x10, Map::kInstanceTypeOffset));
396 __ JumpIfNotUniqueNameInstanceType(x10, &slow);
397
398 // The handlers in the stub cache expect a vector and slot. Since we won't
399 // change the IC from any downstream misses, a dummy vector can be used.
400 Register vector = StoreWithVectorDescriptor::VectorRegister();
401 Register slot = StoreWithVectorDescriptor::SlotRegister();
402 DCHECK(!AreAliased(vector, slot, x5, x6, x7, x8));
403 Handle<TypeFeedbackVector> dummy_vector =
404 TypeFeedbackVector::DummyVector(masm->isolate());
405 int slot_index = dummy_vector->GetIndex(
406 FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
407 __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
408 __ Mov(slot, Operand(Smi::FromInt(slot_index)));
409
410 masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, x5,
411 x6, x7, x8);
412 // Cache miss.
413 __ B(&miss);
414
415 __ Bind(&extra);
416 // Extra capacity case: Check if there is extra capacity to
417 // perform the store and update the length. Used for adding one
418 // element to the array by writing to array[array.length].
419
420 // Check for room in the elements backing store.
421 // Both the key and the length of FixedArray are smis.
422 __ Ldrsw(x10, UntagSmiFieldMemOperand(elements, FixedArray::kLengthOffset));
423 __ Cmp(x10, Operand::UntagSmi(key));
424 __ B(ls, &slow);
425
426 __ Ldr(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
427 __ Cmp(elements_map, Operand(masm->isolate()->factory()->fixed_array_map()));
428 __ B(eq, &fast_object_grow);
429 __ Cmp(elements_map,
430 Operand(masm->isolate()->factory()->fixed_double_array_map()));
431 __ B(eq, &fast_double_grow);
432 __ B(&slow);
433
434
435 __ Bind(&array);
436 // Array case: Get the length and the elements array from the JS
437 // array. Check that the array is in fast mode (and writable); if it
438 // is the length is always a smi.
439
440 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
441
442 // Check the key against the length in the array.
443 __ Ldrsw(x10, UntagSmiFieldMemOperand(receiver, JSArray::kLengthOffset));
444 __ Cmp(x10, Operand::UntagSmi(key));
445 __ B(eq, &extra); // We can handle the case where we are appending 1 element.
446 __ B(lo, &slow);
447
448 KeyedStoreGenerateMegamorphicHelper(
449 masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
450 value, key, receiver, receiver_map, elements_map, elements);
451 KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
452 &fast_double_grow, &slow, kDontCheckMap,
453 kIncrementLength, value, key, receiver,
454 receiver_map, elements_map, elements);
455
456 __ bind(&miss);
457 GenerateMiss(masm);
458 }
459
460 void StoreIC::GenerateMiss(MacroAssembler* masm) { 200 void StoreIC::GenerateMiss(MacroAssembler* masm) {
461 StoreIC_PushArgs(masm); 201 StoreIC_PushArgs(masm);
462 202
463 // Tail call to the entry. 203 // Tail call to the entry.
464 __ TailCallRuntime(Runtime::kStoreIC_Miss); 204 __ TailCallRuntime(Runtime::kStoreIC_Miss);
465 } 205 }
466 206
467 207
468 void StoreIC::GenerateNormal(MacroAssembler* masm) { 208 void StoreIC::GenerateNormal(MacroAssembler* masm) {
469 Label miss; 209 Label miss;
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after
573 } else { 313 } else {
574 DCHECK(to_patch->Mask(TestBranchMask) == TBNZ); 314 DCHECK(to_patch->Mask(TestBranchMask) == TBNZ);
575 // This is JumpIfSmi(smi_reg, branch_imm). 315 // This is JumpIfSmi(smi_reg, branch_imm).
576 patcher.tbz(smi_reg, 0, branch_imm); 316 patcher.tbz(smi_reg, 0, branch_imm);
577 } 317 }
578 } 318 }
579 } // namespace internal 319 } // namespace internal
580 } // namespace v8 320 } // namespace v8
581 321
582 #endif // V8_TARGET_ARCH_ARM64 322 #endif // V8_TARGET_ARCH_ARM64
OLDNEW
« no previous file with comments | « src/ic/arm/stub-cache-arm.cc ('k') | src/ic/arm64/ic-compiler-arm64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698