OLD | NEW |
1 // Copyright 2009 the V8 project authors. All rights reserved. | 1 // Copyright 2009 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
89 Abort("new-space object passed to RecordWriteHelper"); | 89 Abort("new-space object passed to RecordWriteHelper"); |
90 bind(¬_in_new_space); | 90 bind(¬_in_new_space); |
91 } | 91 } |
92 | 92 |
93 // Compute the page start address from the heap object pointer, and reuse | 93 // Compute the page start address from the heap object pointer, and reuse |
94 // the 'object' register for it. | 94 // the 'object' register for it. |
95 and_(object, Immediate(~Page::kPageAlignmentMask)); | 95 and_(object, Immediate(~Page::kPageAlignmentMask)); |
96 | 96 |
97 // Compute number of region covering addr. See Page::GetRegionNumberForAddress | 97 // Compute number of region covering addr. See Page::GetRegionNumberForAddress |
98 // method for more details. | 98 // method for more details. |
99 and_(addr, Immediate(Page::kPageAlignmentMask)); | |
100 shrl(addr, Immediate(Page::kRegionSizeLog2)); | 99 shrl(addr, Immediate(Page::kRegionSizeLog2)); |
| 100 andl(addr, Immediate(Page::kPageAlignmentMask >> Page::kRegionSizeLog2)); |
101 | 101 |
102 // Set dirty mark for region. | 102 // Set dirty mark for region. |
103 bts(Operand(object, Page::kDirtyFlagOffset), addr); | 103 bts(Operand(object, Page::kDirtyFlagOffset), addr); |
104 } | 104 } |
105 | 105 |
106 | 106 |
107 // For page containing |object| mark region covering [object+offset] dirty. | 107 // For page containing |object| mark region covering [object+offset] dirty. |
108 // object is the object being stored into, value is the object being stored. | 108 // object is the object being stored into, value is the object being stored. |
109 // If offset is zero, then the smi_index register contains the array index into | 109 // If offset is zero, then the index register contains the array index into |
110 // the elements array represented as a smi. Otherwise it can be used as a | 110 // the elements array represented a zero extended int32. Otherwise it can be |
111 // scratch register. | 111 // used as a scratch register. |
112 // All registers are clobbered by the operation. | 112 // All registers are clobbered by the operation. |
113 void MacroAssembler::RecordWrite(Register object, | 113 void MacroAssembler::RecordWrite(Register object, |
114 int offset, | 114 int offset, |
115 Register value, | 115 Register value, |
116 Register smi_index) { | 116 Register index) { |
117 // The compiled code assumes that record write doesn't change the | 117 // The compiled code assumes that record write doesn't change the |
118 // context register, so we check that none of the clobbered | 118 // context register, so we check that none of the clobbered |
119 // registers are rsi. | 119 // registers are rsi. |
120 ASSERT(!object.is(rsi) && !value.is(rsi) && !smi_index.is(rsi)); | 120 ASSERT(!object.is(rsi) && !value.is(rsi) && !index.is(rsi)); |
121 | 121 |
122 // First, check if a write barrier is even needed. The tests below | 122 // First, check if a write barrier is even needed. The tests below |
123 // catch stores of Smis and stores into young gen. | 123 // catch stores of Smis and stores into young gen. |
124 Label done; | 124 Label done; |
125 JumpIfSmi(value, &done); | 125 JumpIfSmi(value, &done); |
126 | 126 |
127 RecordWriteNonSmi(object, offset, value, smi_index); | 127 RecordWriteNonSmi(object, offset, value, index); |
128 bind(&done); | 128 bind(&done); |
129 | 129 |
130 // Clobber all input registers when running with the debug-code flag | 130 // Clobber all input registers when running with the debug-code flag |
131 // turned on to provoke errors. This clobbering repeats the | 131 // turned on to provoke errors. This clobbering repeats the |
132 // clobbering done inside RecordWriteNonSmi but it's necessary to | 132 // clobbering done inside RecordWriteNonSmi but it's necessary to |
133 // avoid having the fast case for smis leave the registers | 133 // avoid having the fast case for smis leave the registers |
134 // unchanged. | 134 // unchanged. |
135 if (FLAG_debug_code) { | 135 if (FLAG_debug_code) { |
136 movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 136 movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
137 movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 137 movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
138 movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 138 movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
139 } | 139 } |
140 } | 140 } |
141 | 141 |
142 | 142 |
143 void MacroAssembler::RecordWriteNonSmi(Register object, | 143 void MacroAssembler::RecordWriteNonSmi(Register object, |
144 int offset, | 144 int offset, |
145 Register scratch, | 145 Register scratch, |
146 Register smi_index) { | 146 Register index) { |
147 Label done; | 147 Label done; |
148 | 148 |
149 if (FLAG_debug_code) { | 149 if (FLAG_debug_code) { |
150 Label okay; | 150 Label okay; |
151 JumpIfNotSmi(object, &okay); | 151 JumpIfNotSmi(object, &okay); |
152 Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis"); | 152 Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis"); |
153 bind(&okay); | 153 bind(&okay); |
| 154 |
| 155 if (offset == 0) { |
| 156 // index must be int32. |
| 157 Register tmp = index.is(rax) ? rbx : rax; |
| 158 push(tmp); |
| 159 movl(tmp, index); |
| 160 cmpq(tmp, index); |
| 161 Check(equal, "Index register for RecordWrite must be untagged int32."); |
| 162 pop(tmp); |
| 163 } |
154 } | 164 } |
155 | 165 |
156 // Test that the object address is not in the new space. We cannot | 166 // Test that the object address is not in the new space. We cannot |
157 // update page dirty marks for new space pages. | 167 // update page dirty marks for new space pages. |
158 InNewSpace(object, scratch, equal, &done); | 168 InNewSpace(object, scratch, equal, &done); |
159 | 169 |
160 // The offset is relative to a tagged or untagged HeapObject pointer, | 170 // The offset is relative to a tagged or untagged HeapObject pointer, |
161 // so either offset or offset + kHeapObjectTag must be a | 171 // so either offset or offset + kHeapObjectTag must be a |
162 // multiple of kPointerSize. | 172 // multiple of kPointerSize. |
163 ASSERT(IsAligned(offset, kPointerSize) || | 173 ASSERT(IsAligned(offset, kPointerSize) || |
164 IsAligned(offset + kHeapObjectTag, kPointerSize)); | 174 IsAligned(offset + kHeapObjectTag, kPointerSize)); |
165 | 175 |
166 Register dst = smi_index; | 176 Register dst = index; |
167 if (offset != 0) { | 177 if (offset != 0) { |
168 lea(dst, Operand(object, offset)); | 178 lea(dst, Operand(object, offset)); |
169 } else { | 179 } else { |
170 // array access: calculate the destination address in the same manner as | 180 // array access: calculate the destination address in the same manner as |
171 // KeyedStoreIC::GenerateGeneric. | 181 // KeyedStoreIC::GenerateGeneric. |
172 SmiIndex index = SmiToIndex(smi_index, smi_index, kPointerSizeLog2); | |
173 lea(dst, FieldOperand(object, | 182 lea(dst, FieldOperand(object, |
174 index.reg, | 183 index, |
175 index.scale, | 184 times_pointer_size, |
176 FixedArray::kHeaderSize)); | 185 FixedArray::kHeaderSize)); |
177 } | 186 } |
178 RecordWriteHelper(object, dst, scratch); | 187 RecordWriteHelper(object, dst, scratch); |
179 | 188 |
180 bind(&done); | 189 bind(&done); |
181 | 190 |
182 // Clobber all input registers when running with the debug-code flag | 191 // Clobber all input registers when running with the debug-code flag |
183 // turned on to provoke errors. | 192 // turned on to provoke errors. |
184 if (FLAG_debug_code) { | 193 if (FLAG_debug_code) { |
185 movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 194 movq(object, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
186 movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 195 movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
187 movq(smi_index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 196 movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
188 } | 197 } |
189 } | 198 } |
190 | 199 |
191 | 200 |
192 void MacroAssembler::InNewSpace(Register object, | 201 void MacroAssembler::InNewSpace(Register object, |
193 Register scratch, | 202 Register scratch, |
194 Condition cc, | 203 Condition cc, |
195 Label* branch) { | 204 Label* branch) { |
196 if (Serializer::enabled()) { | 205 if (Serializer::enabled()) { |
197 // Can't do arithmetic on external references if it might get serialized. | 206 // Can't do arithmetic on external references if it might get serialized. |
(...skipping 280 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
478 Label* on_overflow) { | 487 Label* on_overflow) { |
479 ASSERT_EQ(0, kSmiTag); | 488 ASSERT_EQ(0, kSmiTag); |
480 // 32-bit integer always fits in a long smi. | 489 // 32-bit integer always fits in a long smi. |
481 if (!dst.is(src)) { | 490 if (!dst.is(src)) { |
482 movl(dst, src); | 491 movl(dst, src); |
483 } | 492 } |
484 shl(dst, Immediate(kSmiShift)); | 493 shl(dst, Immediate(kSmiShift)); |
485 } | 494 } |
486 | 495 |
487 | 496 |
| 497 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) { |
| 498 if (FLAG_debug_code) { |
| 499 testb(dst, Immediate(0x01)); |
| 500 Label ok; |
| 501 j(zero, &ok); |
| 502 if (allow_stub_calls()) { |
| 503 Abort("Integer32ToSmiField writing to non-smi location"); |
| 504 } else { |
| 505 int3(); |
| 506 } |
| 507 bind(&ok); |
| 508 } |
| 509 ASSERT(kSmiShift % kBitsPerByte == 0); |
| 510 movl(Operand(dst, kSmiShift / kBitsPerByte), src); |
| 511 } |
| 512 |
| 513 |
488 void MacroAssembler::Integer64PlusConstantToSmi(Register dst, | 514 void MacroAssembler::Integer64PlusConstantToSmi(Register dst, |
489 Register src, | 515 Register src, |
490 int constant) { | 516 int constant) { |
491 if (dst.is(src)) { | 517 if (dst.is(src)) { |
492 addq(dst, Immediate(constant)); | 518 addq(dst, Immediate(constant)); |
493 } else { | 519 } else { |
494 lea(dst, Operand(src, constant)); | 520 lea(dst, Operand(src, constant)); |
495 } | 521 } |
496 shl(dst, Immediate(kSmiShift)); | 522 shl(dst, Immediate(kSmiShift)); |
497 } | 523 } |
(...skipping 15 matching lines...) Expand all Loading... |
513 | 539 |
514 void MacroAssembler::SmiToInteger64(Register dst, Register src) { | 540 void MacroAssembler::SmiToInteger64(Register dst, Register src) { |
515 ASSERT_EQ(0, kSmiTag); | 541 ASSERT_EQ(0, kSmiTag); |
516 if (!dst.is(src)) { | 542 if (!dst.is(src)) { |
517 movq(dst, src); | 543 movq(dst, src); |
518 } | 544 } |
519 sar(dst, Immediate(kSmiShift)); | 545 sar(dst, Immediate(kSmiShift)); |
520 } | 546 } |
521 | 547 |
522 | 548 |
| 549 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) { |
| 550 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte)); |
| 551 } |
| 552 |
| 553 |
523 void MacroAssembler::SmiTest(Register src) { | 554 void MacroAssembler::SmiTest(Register src) { |
524 testq(src, src); | 555 testq(src, src); |
525 } | 556 } |
526 | 557 |
527 | 558 |
528 void MacroAssembler::SmiCompare(Register dst, Register src) { | 559 void MacroAssembler::SmiCompare(Register dst, Register src) { |
529 cmpq(dst, src); | 560 cmpq(dst, src); |
530 } | 561 } |
531 | 562 |
532 | 563 |
(...skipping 16 matching lines...) Expand all Loading... |
549 void MacroAssembler::SmiCompare(const Operand& dst, Register src) { | 580 void MacroAssembler::SmiCompare(const Operand& dst, Register src) { |
550 cmpq(dst, src); | 581 cmpq(dst, src); |
551 } | 582 } |
552 | 583 |
553 | 584 |
554 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) { | 585 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) { |
555 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value())); | 586 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value())); |
556 } | 587 } |
557 | 588 |
558 | 589 |
| 590 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) { |
| 591 cmpl(Operand(dst, kSmiShift / kBitsPerByte), src); |
| 592 } |
| 593 |
| 594 |
559 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst, | 595 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst, |
560 Register src, | 596 Register src, |
561 int power) { | 597 int power) { |
562 ASSERT(power >= 0); | 598 ASSERT(power >= 0); |
563 ASSERT(power < 64); | 599 ASSERT(power < 64); |
564 if (power == 0) { | 600 if (power == 0) { |
565 SmiToInteger64(dst, src); | 601 SmiToInteger64(dst, src); |
566 return; | 602 return; |
567 } | 603 } |
568 if (!dst.is(src)) { | 604 if (!dst.is(src)) { |
(...skipping 2144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2713 CPU::FlushICache(address_, size_); | 2749 CPU::FlushICache(address_, size_); |
2714 | 2750 |
2715 // Check that the code was patched as expected. | 2751 // Check that the code was patched as expected. |
2716 ASSERT(masm_.pc_ == address_ + size_); | 2752 ASSERT(masm_.pc_ == address_ + size_); |
2717 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 2753 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
2718 } | 2754 } |
2719 | 2755 |
2720 } } // namespace v8::internal | 2756 } } // namespace v8::internal |
2721 | 2757 |
2722 #endif // V8_TARGET_ARCH_X64 | 2758 #endif // V8_TARGET_ARCH_X64 |
OLD | NEW |