Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(367)

Side by Side Diff: src/ic/mips64/ic-mips64.cc

Issue 2523473002: [cleanup] Drop handwritten KeyedStoreIC code (Closed)
Patch Set: rebased Created 4 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/ic/mips64/ic-compiler-mips64.cc ('k') | src/ic/mips64/stub-cache-mips64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #if V8_TARGET_ARCH_MIPS64 5 #if V8_TARGET_ARCH_MIPS64
6 6
7 #include "src/codegen.h" 7 #include "src/codegen.h"
8 #include "src/ic/ic.h" 8 #include "src/ic/ic.h"
9 #include "src/ic/ic-compiler.h" 9 #include "src/ic/ic-compiler.h"
10 #include "src/ic/stub-cache.h" 10 #include "src/ic/stub-cache.h"
(...skipping 180 matching lines...) Expand 10 before | Expand all | Expand 10 after
191 191
192 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) { 192 void KeyedLoadIC::GenerateRuntimeGetProperty(MacroAssembler* masm) {
193 // The return address is in ra. 193 // The return address is in ra.
194 194
195 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister()); 195 __ Push(LoadDescriptor::ReceiverRegister(), LoadDescriptor::NameRegister());
196 196
197 // Do tail-call to runtime routine. 197 // Do tail-call to runtime routine.
198 __ TailCallRuntime(Runtime::kKeyedGetProperty); 198 __ TailCallRuntime(Runtime::kKeyedGetProperty);
199 } 199 }
200 200
201 static void KeyedStoreGenerateMegamorphicHelper(
202 MacroAssembler* masm, Label* fast_object, Label* fast_double, Label* slow,
203 KeyedStoreCheckMap check_map, KeyedStoreIncrementLength increment_length,
204 Register value, Register key, Register receiver, Register receiver_map,
205 Register elements_map, Register elements) {
206 Label transition_smi_elements;
207 Label finish_object_store, non_double_value, transition_double_elements;
208 Label fast_double_without_map_check;
209
210 // Fast case: Do the store, could be either Object or double.
211 __ bind(fast_object);
212 Register scratch = a4;
213 Register scratch2 = t0;
214 Register address = a5;
215 DCHECK(!AreAliased(value, key, receiver, receiver_map, elements_map, elements,
216 scratch, scratch2, address));
217
218 if (check_map == kCheckMap) {
219 __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
220 __ Branch(fast_double, ne, elements_map,
221 Operand(masm->isolate()->factory()->fixed_array_map()));
222 }
223
224 // HOLECHECK: guards "A[i] = V"
225 // We have to go to the runtime if the current value is the hole because
226 // there may be a callback on the element.
227 Label holecheck_passed1;
228 __ Daddu(address, elements, FixedArray::kHeaderSize - kHeapObjectTag);
229 __ SmiScale(at, key, kPointerSizeLog2);
230 __ daddu(address, address, at);
231 __ ld(scratch, MemOperand(address));
232
233 __ Branch(&holecheck_passed1, ne, scratch,
234 Operand(masm->isolate()->factory()->the_hole_value()));
235 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
236
237 __ bind(&holecheck_passed1);
238
239 // Smi stores don't require further checks.
240 Label non_smi_value;
241 __ JumpIfNotSmi(value, &non_smi_value);
242
243 if (increment_length == kIncrementLength) {
244 // Add 1 to receiver->length.
245 __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
246 __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
247 }
248 // It's irrelevant whether array is smi-only or not when writing a smi.
249 __ Daddu(address, elements,
250 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
251 __ SmiScale(scratch, key, kPointerSizeLog2);
252 __ Daddu(address, address, scratch);
253 __ sd(value, MemOperand(address));
254 __ Ret(USE_DELAY_SLOT);
255 __ Move(v0, value); // Ensure the stub returns correct value.
256
257 __ bind(&non_smi_value);
258 // Escape to elements kind transition case.
259 __ CheckFastObjectElements(receiver_map, scratch, &transition_smi_elements);
260
261 // Fast elements array, store the value to the elements backing store.
262 __ bind(&finish_object_store);
263 if (increment_length == kIncrementLength) {
264 // Add 1 to receiver->length.
265 __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
266 __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
267 }
268 __ Daddu(address, elements,
269 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
270 __ SmiScale(scratch, key, kPointerSizeLog2);
271 __ Daddu(address, address, scratch);
272 __ sd(value, MemOperand(address));
273 // Update write barrier for the elements array address.
274 __ mov(scratch, value); // Preserve the value which is returned.
275 __ RecordWrite(elements, address, scratch, kRAHasNotBeenSaved,
276 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
277 __ Ret(USE_DELAY_SLOT);
278 __ Move(v0, value); // Ensure the stub returns correct value.
279
280 __ bind(fast_double);
281 if (check_map == kCheckMap) {
282 // Check for fast double array case. If this fails, call through to the
283 // runtime.
284 __ LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
285 __ Branch(slow, ne, elements_map, Operand(at));
286 }
287
288 // HOLECHECK: guards "A[i] double hole?"
289 // We have to see if the double version of the hole is present. If so
290 // go to the runtime.
291 __ Daddu(address, elements,
292 Operand(FixedDoubleArray::kHeaderSize + Register::kExponentOffset -
293 kHeapObjectTag));
294 __ SmiScale(at, key, kPointerSizeLog2);
295 __ daddu(address, address, at);
296 __ lw(scratch, MemOperand(address));
297 __ Branch(&fast_double_without_map_check, ne, scratch,
298 Operand(static_cast<int32_t>(kHoleNanUpper32)));
299 __ JumpIfDictionaryInPrototypeChain(receiver, elements_map, scratch, slow);
300
301 __ bind(&fast_double_without_map_check);
302 __ StoreNumberToDoubleElements(value, key, elements, scratch, scratch2,
303 &transition_double_elements);
304 if (increment_length == kIncrementLength) {
305 // Add 1 to receiver->length.
306 __ Daddu(scratch, key, Operand(Smi::FromInt(1)));
307 __ sd(scratch, FieldMemOperand(receiver, JSArray::kLengthOffset));
308 }
309 __ Ret(USE_DELAY_SLOT);
310 __ Move(v0, value); // Ensure the stub returns correct value.
311
312 __ bind(&transition_smi_elements);
313 // Transition the array appropriately depending on the value type.
314 __ ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
315 __ LoadRoot(at, Heap::kHeapNumberMapRootIndex);
316 __ Branch(&non_double_value, ne, scratch, Operand(at));
317
318 // Value is a double. Transition FAST_SMI_ELEMENTS ->
319 // FAST_DOUBLE_ELEMENTS and complete the store.
320 __ LoadTransitionedArrayMapConditional(
321 FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS, receiver_map, scratch, slow);
322 AllocationSiteMode mode =
323 AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_DOUBLE_ELEMENTS);
324 ElementsTransitionGenerator::GenerateSmiToDouble(masm, receiver, key, value,
325 receiver_map, mode, slow);
326 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
327 __ jmp(&fast_double_without_map_check);
328
329 __ bind(&non_double_value);
330 // Value is not a double, FAST_SMI_ELEMENTS -> FAST_ELEMENTS
331 __ LoadTransitionedArrayMapConditional(FAST_SMI_ELEMENTS, FAST_ELEMENTS,
332 receiver_map, scratch, slow);
333 mode = AllocationSite::GetMode(FAST_SMI_ELEMENTS, FAST_ELEMENTS);
334 ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
335 masm, receiver, key, value, receiver_map, mode, slow);
336 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
337 __ jmp(&finish_object_store);
338
339 __ bind(&transition_double_elements);
340 // Elements are FAST_DOUBLE_ELEMENTS, but value is an Object that's not a
341 // HeapNumber. Make sure that the receiver is a Array with FAST_ELEMENTS and
342 // transition array from FAST_DOUBLE_ELEMENTS to FAST_ELEMENTS
343 __ LoadTransitionedArrayMapConditional(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS,
344 receiver_map, scratch, slow);
345 mode = AllocationSite::GetMode(FAST_DOUBLE_ELEMENTS, FAST_ELEMENTS);
346 ElementsTransitionGenerator::GenerateDoubleToObject(
347 masm, receiver, key, value, receiver_map, mode, slow);
348 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
349 __ jmp(&finish_object_store);
350 }
351
352
353 void KeyedStoreIC::GenerateMegamorphic(MacroAssembler* masm,
354 LanguageMode language_mode) {
355 // ---------- S t a t e --------------
356 // -- a0 : value
357 // -- a1 : key
358 // -- a2 : receiver
359 // -- ra : return address
360 // -----------------------------------
361 Label slow, fast_object, fast_object_grow;
362 Label fast_double, fast_double_grow;
363 Label array, extra, check_if_double_array, maybe_name_key, miss;
364
365 // Register usage.
366 Register value = StoreDescriptor::ValueRegister();
367 Register key = StoreDescriptor::NameRegister();
368 Register receiver = StoreDescriptor::ReceiverRegister();
369 DCHECK(value.is(a0));
370 Register receiver_map = a3;
371 Register elements_map = a6;
372 Register elements = a7; // Elements array of the receiver.
373 // a4 and a5 are used as general scratch registers.
374
375 // Check that the key is a smi.
376 __ JumpIfNotSmi(key, &maybe_name_key);
377 // Check that the object isn't a smi.
378 __ JumpIfSmi(receiver, &slow);
379 // Get the map of the object.
380 __ ld(receiver_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
381 // Check that the receiver does not require access checks.
382 // The generic stub does not perform map checks.
383 __ lbu(a4, FieldMemOperand(receiver_map, Map::kBitFieldOffset));
384 __ And(a4, a4, Operand(1 << Map::kIsAccessCheckNeeded));
385 __ Branch(&slow, ne, a4, Operand(zero_reg));
386 // Check if the object is a JS array or not.
387 __ lbu(a4, FieldMemOperand(receiver_map, Map::kInstanceTypeOffset));
388 __ Branch(&array, eq, a4, Operand(JS_ARRAY_TYPE));
389 // Check that the object is some kind of JSObject.
390 __ Branch(&slow, lt, a4, Operand(FIRST_JS_OBJECT_TYPE));
391
392 // Object case: Check key against length in the elements array.
393 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
394 // Check array bounds. Both the key and the length of FixedArray are smis.
395 __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
396 __ Branch(&fast_object, lo, key, Operand(a4));
397
398 // Slow case, handle jump to runtime.
399 __ bind(&slow);
400 // Entry registers are intact.
401 // a0: value.
402 // a1: key.
403 // a2: receiver.
404 PropertyICCompiler::GenerateRuntimeSetProperty(masm, language_mode);
405 // Never returns to here.
406
407 __ bind(&maybe_name_key);
408 __ ld(a4, FieldMemOperand(key, HeapObject::kMapOffset));
409 __ lb(a4, FieldMemOperand(a4, Map::kInstanceTypeOffset));
410 __ JumpIfNotUniqueNameInstanceType(a4, &slow);
411
412 // The handlers in the stub cache expect a vector and slot. Since we won't
413 // change the IC from any downstream misses, a dummy vector can be used.
414 Register vector = StoreWithVectorDescriptor::VectorRegister();
415 Register slot = StoreWithVectorDescriptor::SlotRegister();
416
417 DCHECK(!AreAliased(vector, slot, a5, a6, a7, t0));
418 Handle<TypeFeedbackVector> dummy_vector =
419 TypeFeedbackVector::DummyVector(masm->isolate());
420 int slot_index = dummy_vector->GetIndex(
421 FeedbackVectorSlot(TypeFeedbackVector::kDummyKeyedStoreICSlot));
422 __ LoadRoot(vector, Heap::kDummyVectorRootIndex);
423 __ li(slot, Operand(Smi::FromInt(slot_index)));
424
425 masm->isolate()->store_stub_cache()->GenerateProbe(masm, receiver, key, a5,
426 a6, a7, t0);
427 // Cache miss.
428 __ Branch(&miss);
429
430 // Extra capacity case: Check if there is extra capacity to
431 // perform the store and update the length. Used for adding one
432 // element to the array by writing to array[array.length].
433 __ bind(&extra);
434 // Condition code from comparing key and array length is still available.
435 // Only support writing to array[array.length].
436 __ Branch(&slow, ne, key, Operand(a4));
437 // Check for room in the elements backing store.
438 // Both the key and the length of FixedArray are smis.
439 __ ld(a4, FieldMemOperand(elements, FixedArray::kLengthOffset));
440 __ Branch(&slow, hs, key, Operand(a4));
441 __ ld(elements_map, FieldMemOperand(elements, HeapObject::kMapOffset));
442 __ Branch(&check_if_double_array, ne, elements_map,
443 Heap::kFixedArrayMapRootIndex);
444
445 __ jmp(&fast_object_grow);
446
447 __ bind(&check_if_double_array);
448 __ Branch(&slow, ne, elements_map, Heap::kFixedDoubleArrayMapRootIndex);
449 __ jmp(&fast_double_grow);
450
451 // Array case: Get the length and the elements array from the JS
452 // array. Check that the array is in fast mode (and writable); if it
453 // is the length is always a smi.
454 __ bind(&array);
455 __ ld(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
456
457 // Check the key against the length in the array.
458 __ ld(a4, FieldMemOperand(receiver, JSArray::kLengthOffset));
459 __ Branch(&extra, hs, key, Operand(a4));
460
461 KeyedStoreGenerateMegamorphicHelper(
462 masm, &fast_object, &fast_double, &slow, kCheckMap, kDontIncrementLength,
463 value, key, receiver, receiver_map, elements_map, elements);
464 KeyedStoreGenerateMegamorphicHelper(masm, &fast_object_grow,
465 &fast_double_grow, &slow, kDontCheckMap,
466 kIncrementLength, value, key, receiver,
467 receiver_map, elements_map, elements);
468
469 __ bind(&miss);
470 GenerateMiss(masm);
471 }
472
473
474 static void StoreIC_PushArgs(MacroAssembler* masm) { 201 static void StoreIC_PushArgs(MacroAssembler* masm) {
475 __ Push(StoreWithVectorDescriptor::ValueRegister(), 202 __ Push(StoreWithVectorDescriptor::ValueRegister(),
476 StoreWithVectorDescriptor::SlotRegister(), 203 StoreWithVectorDescriptor::SlotRegister(),
477 StoreWithVectorDescriptor::VectorRegister(), 204 StoreWithVectorDescriptor::VectorRegister(),
478 StoreWithVectorDescriptor::ReceiverRegister(), 205 StoreWithVectorDescriptor::ReceiverRegister(),
479 StoreWithVectorDescriptor::NameRegister()); 206 StoreWithVectorDescriptor::NameRegister());
480 } 207 }
481 208
482 209
483 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) { 210 void KeyedStoreIC::GenerateMiss(MacroAssembler* masm) {
(...skipping 165 matching lines...) Expand 10 before | Expand all | Expand 10 after
649 break; 376 break;
650 default: 377 default:
651 UNIMPLEMENTED(); 378 UNIMPLEMENTED();
652 } 379 }
653 patcher.ChangeBranchCondition(branch_instr, opcode); 380 patcher.ChangeBranchCondition(branch_instr, opcode);
654 } 381 }
655 } // namespace internal 382 } // namespace internal
656 } // namespace v8 383 } // namespace v8
657 384
658 #endif // V8_TARGET_ARCH_MIPS64 385 #endif // V8_TARGET_ARCH_MIPS64
OLDNEW
« no previous file with comments | « src/ic/mips64/ic-compiler-mips64.cc ('k') | src/ic/mips64/stub-cache-mips64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698