OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 325 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
336 // Need to collect. Call into runtime system. | 336 // Need to collect. Call into runtime system. |
337 __ bind(&gc); | 337 __ bind(&gc); |
338 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); | 338 __ TailCallRuntime(Runtime::kPushBlockContext, 2, 1); |
339 } | 339 } |
340 | 340 |
341 | 341 |
342 static void GenerateFastCloneShallowArrayCommon( | 342 static void GenerateFastCloneShallowArrayCommon( |
343 MacroAssembler* masm, | 343 MacroAssembler* masm, |
344 int length, | 344 int length, |
345 FastCloneShallowArrayStub::Mode mode, | 345 FastCloneShallowArrayStub::Mode mode, |
346 AllocationSiteInfoMode allocation_site_info_mode, | 346 AllocationSiteMode allocation_site_mode, |
347 Label* fail) { | 347 Label* fail) { |
348 // Registers on entry: | 348 // Registers on entry: |
349 // a3: boilerplate literal array. | 349 // a3: boilerplate literal array. |
350 ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS); | 350 ASSERT(mode != FastCloneShallowArrayStub::CLONE_ANY_ELEMENTS); |
351 | 351 |
352 // All sizes here are multiples of kPointerSize. | 352 // All sizes here are multiples of kPointerSize. |
353 int elements_size = 0; | 353 int elements_size = 0; |
354 if (length > 0) { | 354 if (length > 0) { |
355 elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS | 355 elements_size = mode == FastCloneShallowArrayStub::CLONE_DOUBLE_ELEMENTS |
356 ? FixedDoubleArray::SizeFor(length) | 356 ? FixedDoubleArray::SizeFor(length) |
357 : FixedArray::SizeFor(length); | 357 : FixedArray::SizeFor(length); |
358 } | 358 } |
| 359 |
359 int size = JSArray::kSize; | 360 int size = JSArray::kSize; |
360 int allocation_info_start = size; | 361 int allocation_info_start = size; |
361 if (allocation_site_info_mode == TRACK_ALLOCATION_SITE_INFO) { | 362 if (allocation_site_mode == TRACK_ALLOCATION_SITE) { |
362 size += AllocationSiteInfo::kSize; | 363 size += AllocationSiteInfo::kSize; |
363 } | 364 } |
364 size += elements_size; | 365 size += elements_size; |
365 | 366 |
366 // Allocate both the JS array and the elements array in one big | 367 // Allocate both the JS array and the elements array in one big |
367 // allocation. This avoids multiple limit checks. | 368 // allocation. This avoids multiple limit checks. |
368 __ AllocateInNewSpace(size, | 369 __ AllocateInNewSpace(size, |
369 v0, | 370 v0, |
370 a1, | 371 a1, |
371 a2, | 372 a2, |
372 fail, | 373 fail, |
373 TAG_OBJECT); | 374 TAG_OBJECT); |
374 | 375 |
375 if (allocation_site_info_mode == TRACK_ALLOCATION_SITE_INFO) { | 376 if (allocation_site_mode == TRACK_ALLOCATION_SITE) { |
376 __ li(a2, Operand(Handle<Map>(masm->isolate()->heap()-> | 377 __ li(a2, Operand(Handle<Map>(masm->isolate()->heap()-> |
377 allocation_site_info_map()))); | 378 allocation_site_info_map()))); |
378 __ sw(a2, FieldMemOperand(v0, allocation_info_start)); | 379 __ sw(a2, FieldMemOperand(v0, allocation_info_start)); |
379 __ sw(a3, FieldMemOperand(v0, allocation_info_start + kPointerSize)); | 380 __ sw(a3, FieldMemOperand(v0, allocation_info_start + kPointerSize)); |
380 } | 381 } |
381 | 382 |
382 // Copy the JS array part. | 383 // Copy the JS array part. |
383 for (int i = 0; i < JSArray::kSize; i += kPointerSize) { | 384 for (int i = 0; i < JSArray::kSize; i += kPointerSize) { |
384 if ((i != JSArray::kElementsOffset) || (length == 0)) { | 385 if ((i != JSArray::kElementsOffset) || (length == 0)) { |
385 __ lw(a1, FieldMemOperand(a3, i)); | 386 __ lw(a1, FieldMemOperand(a3, i)); |
386 __ sw(a1, FieldMemOperand(v0, i)); | 387 __ sw(a1, FieldMemOperand(v0, i)); |
387 } | 388 } |
388 } | 389 } |
389 | 390 |
390 if (length > 0) { | 391 if (length > 0) { |
391 // Get hold of the elements array of the boilerplate and setup the | 392 // Get hold of the elements array of the boilerplate and setup the |
392 // elements pointer in the resulting object. | 393 // elements pointer in the resulting object. |
393 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset)); | 394 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset)); |
394 if (allocation_site_info_mode == TRACK_ALLOCATION_SITE_INFO) { | 395 if (allocation_site_mode == TRACK_ALLOCATION_SITE) { |
395 __ Addu(a2, v0, Operand(JSArray::kSize + AllocationSiteInfo::kSize)); | 396 __ Addu(a2, v0, Operand(JSArray::kSize + AllocationSiteInfo::kSize)); |
396 } else { | 397 } else { |
397 __ Addu(a2, v0, Operand(JSArray::kSize)); | 398 __ Addu(a2, v0, Operand(JSArray::kSize)); |
398 } | 399 } |
399 __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset)); | 400 __ sw(a2, FieldMemOperand(v0, JSArray::kElementsOffset)); |
400 | 401 |
401 // Copy the elements array. | 402 // Copy the elements array. |
402 ASSERT((elements_size % kPointerSize) == 0); | 403 ASSERT((elements_size % kPointerSize) == 0); |
403 __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize); | 404 __ CopyFields(a2, a3, a1.bit(), elements_size / kPointerSize); |
404 } | 405 } |
(...skipping 12 matching lines...) Expand all Loading... |
417 __ lw(a3, MemOperand(sp, 2 * kPointerSize)); | 418 __ lw(a3, MemOperand(sp, 2 * kPointerSize)); |
418 __ lw(a0, MemOperand(sp, 1 * kPointerSize)); | 419 __ lw(a0, MemOperand(sp, 1 * kPointerSize)); |
419 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 420 __ Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
420 __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize); | 421 __ sll(t0, a0, kPointerSizeLog2 - kSmiTagSize); |
421 __ Addu(t0, a3, t0); | 422 __ Addu(t0, a3, t0); |
422 __ lw(a3, MemOperand(t0)); | 423 __ lw(a3, MemOperand(t0)); |
423 __ LoadRoot(t1, Heap::kUndefinedValueRootIndex); | 424 __ LoadRoot(t1, Heap::kUndefinedValueRootIndex); |
424 __ Branch(&slow_case, eq, a3, Operand(t1)); | 425 __ Branch(&slow_case, eq, a3, Operand(t1)); |
425 | 426 |
426 FastCloneShallowArrayStub::Mode mode = mode_; | 427 FastCloneShallowArrayStub::Mode mode = mode_; |
427 AllocationSiteInfoMode allocation_site_info_mode = | |
428 DONT_TRACK_ALLOCATION_SITE_INFO; | |
429 if (mode == CLONE_ANY_ELEMENTS_WITH_ALLOCATION_SITE_INFO) { | |
430 mode = CLONE_ANY_ELEMENTS; | |
431 allocation_site_info_mode = TRACK_ALLOCATION_SITE_INFO; | |
432 } | |
433 if (mode == CLONE_ANY_ELEMENTS) { | 428 if (mode == CLONE_ANY_ELEMENTS) { |
434 Label double_elements, check_fast_elements; | 429 Label double_elements, check_fast_elements; |
435 __ lw(v0, FieldMemOperand(a3, JSArray::kElementsOffset)); | 430 __ lw(v0, FieldMemOperand(a3, JSArray::kElementsOffset)); |
436 __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset)); | 431 __ lw(v0, FieldMemOperand(v0, HeapObject::kMapOffset)); |
437 __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex); | 432 __ LoadRoot(t1, Heap::kFixedCOWArrayMapRootIndex); |
438 __ Branch(&check_fast_elements, ne, v0, Operand(t1)); | 433 __ Branch(&check_fast_elements, ne, v0, Operand(t1)); |
439 GenerateFastCloneShallowArrayCommon(masm, 0, | 434 GenerateFastCloneShallowArrayCommon(masm, 0, COPY_ON_WRITE_ELEMENTS, |
440 COPY_ON_WRITE_ELEMENTS, | 435 allocation_site_mode_, |
441 allocation_site_info_mode, | |
442 &slow_case); | 436 &slow_case); |
443 // Return and remove the on-stack parameters. | 437 // Return and remove the on-stack parameters. |
444 __ DropAndRet(3); | 438 __ DropAndRet(3); |
445 | 439 |
446 __ bind(&check_fast_elements); | 440 __ bind(&check_fast_elements); |
447 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex); | 441 __ LoadRoot(t1, Heap::kFixedArrayMapRootIndex); |
448 __ Branch(&double_elements, ne, v0, Operand(t1)); | 442 __ Branch(&double_elements, ne, v0, Operand(t1)); |
449 GenerateFastCloneShallowArrayCommon(masm, length_, | 443 GenerateFastCloneShallowArrayCommon(masm, length_, CLONE_ELEMENTS, |
450 CLONE_ELEMENTS, | 444 allocation_site_mode_, |
451 allocation_site_info_mode, | |
452 &slow_case); | 445 &slow_case); |
453 // Return and remove the on-stack parameters. | 446 // Return and remove the on-stack parameters. |
454 __ DropAndRet(3); | 447 __ DropAndRet(3); |
455 | 448 |
456 __ bind(&double_elements); | 449 __ bind(&double_elements); |
457 mode = CLONE_DOUBLE_ELEMENTS; | 450 mode = CLONE_DOUBLE_ELEMENTS; |
458 // Fall through to generate the code to handle double elements. | 451 // Fall through to generate the code to handle double elements. |
459 } | 452 } |
460 | 453 |
461 if (FLAG_debug_code) { | 454 if (FLAG_debug_code) { |
(...skipping 12 matching lines...) Expand all Loading... |
474 } | 467 } |
475 __ push(a3); | 468 __ push(a3); |
476 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset)); | 469 __ lw(a3, FieldMemOperand(a3, JSArray::kElementsOffset)); |
477 __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset)); | 470 __ lw(a3, FieldMemOperand(a3, HeapObject::kMapOffset)); |
478 __ LoadRoot(at, expected_map_index); | 471 __ LoadRoot(at, expected_map_index); |
479 __ Assert(eq, message, a3, Operand(at)); | 472 __ Assert(eq, message, a3, Operand(at)); |
480 __ pop(a3); | 473 __ pop(a3); |
481 } | 474 } |
482 | 475 |
483 GenerateFastCloneShallowArrayCommon(masm, length_, mode, | 476 GenerateFastCloneShallowArrayCommon(masm, length_, mode, |
484 allocation_site_info_mode, &slow_case); | 477 allocation_site_mode_, |
| 478 &slow_case); |
485 | 479 |
486 // Return and remove the on-stack parameters. | 480 // Return and remove the on-stack parameters. |
487 __ DropAndRet(3); | 481 __ DropAndRet(3); |
488 | 482 |
489 __ bind(&slow_case); | 483 __ bind(&slow_case); |
490 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); | 484 __ TailCallRuntime(Runtime::kCreateArrayLiteralShallow, 3, 1); |
491 } | 485 } |
492 | 486 |
493 | 487 |
494 void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) { | 488 void FastCloneShallowObjectStub::Generate(MacroAssembler* masm) { |
(...skipping 7386 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
7881 __ Pop(ra, t1, a1); | 7875 __ Pop(ra, t1, a1); |
7882 __ Ret(); | 7876 __ Ret(); |
7883 } | 7877 } |
7884 | 7878 |
7885 | 7879 |
7886 #undef __ | 7880 #undef __ |
7887 | 7881 |
7888 } } // namespace v8::internal | 7882 } } // namespace v8::internal |
7889 | 7883 |
7890 #endif // V8_TARGET_ARCH_MIPS | 7884 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |