OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 5 #include <limits.h> // For LONG_MIN, LONG_MAX. |
6 | 6 |
7 #include "src/v8.h" | 7 #include "src/v8.h" |
8 | 8 |
9 #if V8_TARGET_ARCH_ARM | 9 #if V8_TARGET_ARCH_ARM |
10 | 10 |
(...skipping 435 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
446 | 446 |
447 | 447 |
448 void MacroAssembler::RecordWriteField( | 448 void MacroAssembler::RecordWriteField( |
449 Register object, | 449 Register object, |
450 int offset, | 450 int offset, |
451 Register value, | 451 Register value, |
452 Register dst, | 452 Register dst, |
453 LinkRegisterStatus lr_status, | 453 LinkRegisterStatus lr_status, |
454 SaveFPRegsMode save_fp, | 454 SaveFPRegsMode save_fp, |
455 RememberedSetAction remembered_set_action, | 455 RememberedSetAction remembered_set_action, |
456 SmiCheck smi_check) { | 456 SmiCheck smi_check, |
| 457 PointersToHereCheck pointers_to_here_check_for_value) { |
457 // First, check if a write barrier is even needed. The tests below | 458 // First, check if a write barrier is even needed. The tests below |
458 // catch stores of Smis. | 459 // catch stores of Smis. |
459 Label done; | 460 Label done; |
460 | 461 |
461 // Skip barrier if writing a smi. | 462 // Skip barrier if writing a smi. |
462 if (smi_check == INLINE_SMI_CHECK) { | 463 if (smi_check == INLINE_SMI_CHECK) { |
463 JumpIfSmi(value, &done); | 464 JumpIfSmi(value, &done); |
464 } | 465 } |
465 | 466 |
466 // Although the object register is tagged, the offset is relative to the start | 467 // Although the object register is tagged, the offset is relative to the start |
467 // of the object, so so offset must be a multiple of kPointerSize. | 468 // of the object, so so offset must be a multiple of kPointerSize. |
468 ASSERT(IsAligned(offset, kPointerSize)); | 469 ASSERT(IsAligned(offset, kPointerSize)); |
469 | 470 |
470 add(dst, object, Operand(offset - kHeapObjectTag)); | 471 add(dst, object, Operand(offset - kHeapObjectTag)); |
471 if (emit_debug_code()) { | 472 if (emit_debug_code()) { |
472 Label ok; | 473 Label ok; |
473 tst(dst, Operand((1 << kPointerSizeLog2) - 1)); | 474 tst(dst, Operand((1 << kPointerSizeLog2) - 1)); |
474 b(eq, &ok); | 475 b(eq, &ok); |
475 stop("Unaligned cell in write barrier"); | 476 stop("Unaligned cell in write barrier"); |
476 bind(&ok); | 477 bind(&ok); |
477 } | 478 } |
478 | 479 |
479 RecordWrite(object, | 480 RecordWrite(object, |
480 dst, | 481 dst, |
481 value, | 482 value, |
482 lr_status, | 483 lr_status, |
483 save_fp, | 484 save_fp, |
484 remembered_set_action, | 485 remembered_set_action, |
485 OMIT_SMI_CHECK); | 486 OMIT_SMI_CHECK, |
| 487 pointers_to_here_check_for_value); |
486 | 488 |
487 bind(&done); | 489 bind(&done); |
488 | 490 |
489 // Clobber clobbered input registers when running with the debug-code flag | 491 // Clobber clobbered input registers when running with the debug-code flag |
490 // turned on to provoke errors. | 492 // turned on to provoke errors. |
491 if (emit_debug_code()) { | 493 if (emit_debug_code()) { |
492 mov(value, Operand(BitCast<int32_t>(kZapValue + 4))); | 494 mov(value, Operand(BitCast<int32_t>(kZapValue + 4))); |
493 mov(dst, Operand(BitCast<int32_t>(kZapValue + 8))); | 495 mov(dst, Operand(BitCast<int32_t>(kZapValue + 8))); |
494 } | 496 } |
495 } | 497 } |
496 | 498 |
497 | 499 |
| 500 // Will clobber 4 registers: object, map, dst, ip. The |
| 501 // register 'object' contains a heap object pointer. |
| 502 void MacroAssembler::RecordWriteForMap(Register object, |
| 503 Register map, |
| 504 Register dst, |
| 505 LinkRegisterStatus lr_status, |
| 506 SaveFPRegsMode fp_mode) { |
| 507 if (emit_debug_code()) { |
| 508 ldr(dst, FieldMemOperand(map, HeapObject::kMapOffset)); |
| 509 cmp(dst, Operand(isolate()->factory()->meta_map())); |
| 510 Check(eq, kWrongAddressOrValuePassedToRecordWrite); |
| 511 } |
| 512 |
| 513 if (!FLAG_incremental_marking) { |
| 514 return; |
| 515 } |
| 516 |
| 517 // Count number of write barriers in generated code. |
| 518 isolate()->counters()->write_barriers_static()->Increment(); |
| 519 // TODO(mstarzinger): Dynamic counter missing. |
| 520 |
| 521 if (emit_debug_code()) { |
| 522 ldr(ip, FieldMemOperand(object, HeapObject::kMapOffset)); |
| 523 cmp(ip, map); |
| 524 Check(eq, kWrongAddressOrValuePassedToRecordWrite); |
| 525 } |
| 526 |
| 527 Label done; |
| 528 |
| 529 // A single check of the map's pages interesting flag suffices, since it is |
| 530 // only set during incremental collection, and then it's also guaranteed that |
| 531 // the from object's page's interesting flag is also set. This optimization |
| 532 // relies on the fact that maps can never be in new space. |
| 533 CheckPageFlag(map, |
| 534 map, // Used as scratch. |
| 535 MemoryChunk::kPointersToHereAreInterestingMask, |
| 536 eq, |
| 537 &done); |
| 538 |
| 539 add(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag)); |
| 540 if (emit_debug_code()) { |
| 541 Label ok; |
| 542 tst(dst, Operand((1 << kPointerSizeLog2) - 1)); |
| 543 b(eq, &ok); |
| 544 stop("Unaligned cell in write barrier"); |
| 545 bind(&ok); |
| 546 } |
| 547 |
| 548 // Record the actual write. |
| 549 if (lr_status == kLRHasNotBeenSaved) { |
| 550 push(lr); |
| 551 } |
| 552 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET, |
| 553 fp_mode); |
| 554 CallStub(&stub); |
| 555 if (lr_status == kLRHasNotBeenSaved) { |
| 556 pop(lr); |
| 557 } |
| 558 |
| 559 bind(&done); |
| 560 |
| 561 // Clobber clobbered registers when running with the debug-code flag |
| 562 // turned on to provoke errors. |
| 563 if (emit_debug_code()) { |
| 564 mov(dst, Operand(BitCast<int32_t>(kZapValue + 12))); |
| 565 mov(map, Operand(BitCast<int32_t>(kZapValue + 16))); |
| 566 } |
| 567 } |
| 568 |
| 569 |
498 // Will clobber 4 registers: object, address, scratch, ip. The | 570 // Will clobber 4 registers: object, address, scratch, ip. The |
499 // register 'object' contains a heap object pointer. The heap object | 571 // register 'object' contains a heap object pointer. The heap object |
500 // tag is shifted away. | 572 // tag is shifted away. |
501 void MacroAssembler::RecordWrite(Register object, | 573 void MacroAssembler::RecordWrite( |
502 Register address, | 574 Register object, |
503 Register value, | 575 Register address, |
504 LinkRegisterStatus lr_status, | 576 Register value, |
505 SaveFPRegsMode fp_mode, | 577 LinkRegisterStatus lr_status, |
506 RememberedSetAction remembered_set_action, | 578 SaveFPRegsMode fp_mode, |
507 SmiCheck smi_check) { | 579 RememberedSetAction remembered_set_action, |
| 580 SmiCheck smi_check, |
| 581 PointersToHereCheck pointers_to_here_check_for_value) { |
508 ASSERT(!object.is(value)); | 582 ASSERT(!object.is(value)); |
509 if (emit_debug_code()) { | 583 if (emit_debug_code()) { |
510 ldr(ip, MemOperand(address)); | 584 ldr(ip, MemOperand(address)); |
511 cmp(ip, value); | 585 cmp(ip, value); |
512 Check(eq, kWrongAddressOrValuePassedToRecordWrite); | 586 Check(eq, kWrongAddressOrValuePassedToRecordWrite); |
513 } | 587 } |
514 | 588 |
| 589 if (remembered_set_action == OMIT_REMEMBERED_SET && |
| 590 !FLAG_incremental_marking) { |
| 591 return; |
| 592 } |
| 593 |
515 // Count number of write barriers in generated code. | 594 // Count number of write barriers in generated code. |
516 isolate()->counters()->write_barriers_static()->Increment(); | 595 isolate()->counters()->write_barriers_static()->Increment(); |
517 // TODO(mstarzinger): Dynamic counter missing. | 596 // TODO(mstarzinger): Dynamic counter missing. |
518 | 597 |
519 // First, check if a write barrier is even needed. The tests below | 598 // First, check if a write barrier is even needed. The tests below |
520 // catch stores of smis and stores into the young generation. | 599 // catch stores of smis and stores into the young generation. |
521 Label done; | 600 Label done; |
522 | 601 |
523 if (smi_check == INLINE_SMI_CHECK) { | 602 if (smi_check == INLINE_SMI_CHECK) { |
524 JumpIfSmi(value, &done); | 603 JumpIfSmi(value, &done); |
525 } | 604 } |
526 | 605 |
527 CheckPageFlag(value, | 606 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) { |
528 value, // Used as scratch. | 607 CheckPageFlag(value, |
529 MemoryChunk::kPointersToHereAreInterestingMask, | 608 value, // Used as scratch. |
530 eq, | 609 MemoryChunk::kPointersToHereAreInterestingMask, |
531 &done); | 610 eq, |
| 611 &done); |
| 612 } |
532 CheckPageFlag(object, | 613 CheckPageFlag(object, |
533 value, // Used as scratch. | 614 value, // Used as scratch. |
534 MemoryChunk::kPointersFromHereAreInterestingMask, | 615 MemoryChunk::kPointersFromHereAreInterestingMask, |
535 eq, | 616 eq, |
536 &done); | 617 &done); |
537 | 618 |
538 // Record the actual write. | 619 // Record the actual write. |
539 if (lr_status == kLRHasNotBeenSaved) { | 620 if (lr_status == kLRHasNotBeenSaved) { |
540 push(lr); | 621 push(lr); |
541 } | 622 } |
(...skipping 3452 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3994 sub(result, result, Operand(dividend)); | 4075 sub(result, result, Operand(dividend)); |
3995 } | 4076 } |
3996 if (ms.shift() > 0) mov(result, Operand(result, ASR, ms.shift())); | 4077 if (ms.shift() > 0) mov(result, Operand(result, ASR, ms.shift())); |
3997 add(result, result, Operand(dividend, LSR, 31)); | 4078 add(result, result, Operand(dividend, LSR, 31)); |
3998 } | 4079 } |
3999 | 4080 |
4000 | 4081 |
4001 } } // namespace v8::internal | 4082 } } // namespace v8::internal |
4002 | 4083 |
4003 #endif // V8_TARGET_ARCH_ARM | 4084 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |