OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
74 | 74 |
75 void MacroAssembler::StoreRoot(Register source, | 75 void MacroAssembler::StoreRoot(Register source, |
76 Heap::RootListIndex index, | 76 Heap::RootListIndex index, |
77 Condition cond, | 77 Condition cond, |
78 Register src1, const Operand& src2) { | 78 Register src1, const Operand& src2) { |
79 Branch(2, NegateCondition(cond), src1, src2); | 79 Branch(2, NegateCondition(cond), src1, src2); |
80 sw(source, MemOperand(s6, index << kPointerSizeLog2)); | 80 sw(source, MemOperand(s6, index << kPointerSizeLog2)); |
81 } | 81 } |
82 | 82 |
83 | 83 |
84 void MacroAssembler::RecordWriteHelper(Register object, | |
85 Register address, | |
86 Register scratch) { | |
87 if (emit_debug_code()) { | |
88 // Check that the object is not in new space. | |
89 Label not_in_new_space; | |
90 InNewSpace(object, scratch, ne, ¬_in_new_space); | |
91 Abort("new-space object passed to RecordWriteHelper"); | |
92 bind(¬_in_new_space); | |
93 } | |
94 | |
95 // Calculate page address: Clear bits from 0 to kPageSizeBits. | |
96 if (mips32r2) { | |
97 Ins(object, zero_reg, 0, kPageSizeBits); | |
98 } else { | |
99 // The Ins macro is slow on r1, so use shifts instead. | |
100 srl(object, object, kPageSizeBits); | |
101 sll(object, object, kPageSizeBits); | |
102 } | |
103 | |
104 // Calculate region number. | |
105 Ext(address, address, Page::kRegionSizeLog2, | |
106 kPageSizeBits - Page::kRegionSizeLog2); | |
107 | |
108 // Mark region dirty. | |
109 lw(scratch, MemOperand(object, Page::kDirtyFlagOffset)); | |
110 li(at, Operand(1)); | |
111 sllv(at, at, address); | |
112 or_(scratch, scratch, at); | |
113 sw(scratch, MemOperand(object, Page::kDirtyFlagOffset)); | |
114 } | |
115 | |
116 | |
117 // Push and pop all registers that can hold pointers. | 84 // Push and pop all registers that can hold pointers. |
118 void MacroAssembler::PushSafepointRegisters() { | 85 void MacroAssembler::PushSafepointRegisters() { |
119 // Safepoints expect a block of kNumSafepointRegisters values on the | 86 // Safepoints expect a block of kNumSafepointRegisters values on the |
120 // stack, so adjust the stack for unsaved registers. | 87 // stack, so adjust the stack for unsaved registers. |
121 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; | 88 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; |
122 ASSERT(num_unsaved >= 0); | 89 ASSERT(num_unsaved >= 0); |
123 if (num_unsaved > 0) { | 90 if (num_unsaved > 0) { |
124 Subu(sp, sp, Operand(num_unsaved * kPointerSize)); | 91 Subu(sp, sp, Operand(num_unsaved * kPointerSize)); |
125 } | 92 } |
126 MultiPush(kSafepointSavedRegisters); | 93 MultiPush(kSafepointSavedRegisters); |
(...skipping 70 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
197 Register scratch, | 164 Register scratch, |
198 Condition cc, | 165 Condition cc, |
199 Label* branch) { | 166 Label* branch) { |
200 ASSERT(cc == eq || cc == ne); | 167 ASSERT(cc == eq || cc == ne); |
201 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate()))); | 168 And(scratch, object, Operand(ExternalReference::new_space_mask(isolate()))); |
202 Branch(branch, cc, scratch, | 169 Branch(branch, cc, scratch, |
203 Operand(ExternalReference::new_space_start(isolate()))); | 170 Operand(ExternalReference::new_space_start(isolate()))); |
204 } | 171 } |
205 | 172 |
206 | 173 |
207 // Will clobber 4 registers: object, scratch0, scratch1, at. The | 174 void MacroAssembler::RecordWriteField( |
208 // register 'object' contains a heap object pointer. The heap object | 175 Register object, |
209 // tag is shifted away. | 176 int offset, |
210 void MacroAssembler::RecordWrite(Register object, | 177 Register value, |
211 Operand offset, | 178 Register dst, |
212 Register scratch0, | 179 RAStatus ra_status, |
213 Register scratch1) { | 180 SaveFPRegsMode save_fp, |
214 // The compiled code assumes that record write doesn't change the | 181 RememberedSetAction remembered_set_action, |
215 // context register, so we check that none of the clobbered | 182 SmiCheck smi_check) { |
216 // registers are cp. | 183 ASSERT(!AreAliased(value, dst, t8, object)); |
217 ASSERT(!object.is(cp) && !scratch0.is(cp) && !scratch1.is(cp)); | 184 // First, check if a write barrier is even needed. The tests below |
218 | 185 // catch stores of Smis. |
219 Label done; | 186 Label done; |
220 | 187 |
221 // First, test that the object is not in the new space. We cannot set | 188 // Skip barrier if writing a smi. |
222 // region marks for new space pages. | 189 if (smi_check == INLINE_SMI_CHECK) { |
223 InNewSpace(object, scratch0, eq, &done); | 190 JumpIfSmi(value, &done); |
| 191 } |
224 | 192 |
225 // Add offset into the object. | 193 // Although the object register is tagged, the offset is relative to the start |
226 Addu(scratch0, object, offset); | 194 // of the object, so so offset must be a multiple of kPointerSize. |
| 195 ASSERT(IsAligned(offset, kPointerSize)); |
227 | 196 |
228 // Record the actual write. | 197 Addu(dst, object, Operand(offset - kHeapObjectTag)); |
229 RecordWriteHelper(object, scratch0, scratch1); | 198 if (emit_debug_code()) { |
| 199 Label ok; |
| 200 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1)); |
| 201 Branch(&ok, eq, t8, Operand(zero_reg)); |
| 202 stop("Unaligned cell in write barrier"); |
| 203 bind(&ok); |
| 204 } |
| 205 |
| 206 RecordWrite(object, |
| 207 dst, |
| 208 value, |
| 209 ra_status, |
| 210 save_fp, |
| 211 remembered_set_action, |
| 212 OMIT_SMI_CHECK); |
230 | 213 |
231 bind(&done); | 214 bind(&done); |
232 | 215 |
233 // Clobber all input registers when running with the debug-code flag | 216 // Clobber clobbered input registers when running with the debug-code flag |
234 // turned on to provoke errors. | 217 // turned on to provoke errors. |
235 if (emit_debug_code()) { | 218 if (emit_debug_code()) { |
236 li(object, Operand(BitCast<int32_t>(kZapValue))); | 219 li(value, Operand(BitCast<int32_t>(kZapValue + 4))); |
237 li(scratch0, Operand(BitCast<int32_t>(kZapValue))); | 220 li(dst, Operand(BitCast<int32_t>(kZapValue + 8))); |
238 li(scratch1, Operand(BitCast<int32_t>(kZapValue))); | |
239 } | 221 } |
240 } | 222 } |
241 | 223 |
242 | 224 |
243 // Will clobber 4 registers: object, address, scratch, ip. The | 225 // Will clobber 4 registers: object, address, scratch, ip. The |
244 // register 'object' contains a heap object pointer. The heap object | 226 // register 'object' contains a heap object pointer. The heap object |
245 // tag is shifted away. | 227 // tag is shifted away. |
246 void MacroAssembler::RecordWrite(Register object, | 228 void MacroAssembler::RecordWrite(Register object, |
247 Register address, | 229 Register address, |
248 Register scratch) { | 230 Register value, |
| 231 RAStatus ra_status, |
| 232 SaveFPRegsMode fp_mode, |
| 233 RememberedSetAction remembered_set_action, |
| 234 SmiCheck smi_check) { |
| 235 ASSERT(!AreAliased(object, address, value, t8)); |
| 236 ASSERT(!AreAliased(object, address, value, t9)); |
249 // The compiled code assumes that record write doesn't change the | 237 // The compiled code assumes that record write doesn't change the |
250 // context register, so we check that none of the clobbered | 238 // context register, so we check that none of the clobbered |
251 // registers are cp. | 239 // registers are cp. |
252 ASSERT(!object.is(cp) && !address.is(cp) && !scratch.is(cp)); | 240 ASSERT(!address.is(cp) && !value.is(cp)); |
253 | 241 |
254 Label done; | 242 Label done; |
255 | 243 |
256 // First, test that the object is not in the new space. We cannot set | 244 if (smi_check == INLINE_SMI_CHECK) { |
257 // region marks for new space pages. | 245 ASSERT_EQ(0, kSmiTag); |
258 InNewSpace(object, scratch, eq, &done); | 246 And(t8, value, Operand(kSmiTagMask)); |
| 247 Branch(&done, eq, t8, Operand(zero_reg)); |
| 248 } |
| 249 |
| 250 CheckPageFlag(value, |
| 251 value, // Used as scratch. |
| 252 MemoryChunk::kPointersToHereAreInterestingMask, |
| 253 eq, |
| 254 &done); |
| 255 CheckPageFlag(object, |
| 256 value, // Used as scratch. |
| 257 MemoryChunk::kPointersFromHereAreInterestingMask, |
| 258 eq, |
| 259 &done); |
259 | 260 |
260 // Record the actual write. | 261 // Record the actual write. |
261 RecordWriteHelper(object, address, scratch); | 262 if (ra_status == kRAHasNotBeenSaved) { |
| 263 push(ra); |
| 264 } |
| 265 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); |
| 266 CallStub(&stub); |
| 267 if (ra_status == kRAHasNotBeenSaved) { |
| 268 pop(ra); |
| 269 } |
262 | 270 |
263 bind(&done); | 271 bind(&done); |
264 | 272 |
265 // Clobber all input registers when running with the debug-code flag | 273 // Clobber clobbered registers when running with the debug-code flag |
266 // turned on to provoke errors. | 274 // turned on to provoke errors. |
267 if (emit_debug_code()) { | 275 if (emit_debug_code()) { |
268 li(object, Operand(BitCast<int32_t>(kZapValue))); | 276 li(address, Operand(BitCast<int32_t>(kZapValue + 12))); |
269 li(address, Operand(BitCast<int32_t>(kZapValue))); | 277 li(value, Operand(BitCast<int32_t>(kZapValue + 16))); |
270 li(scratch, Operand(BitCast<int32_t>(kZapValue))); | |
271 } | 278 } |
272 } | 279 } |
273 | 280 |
| 281 |
| 282 void MacroAssembler::RememberedSetHelper(Register address, |
| 283 Register scratch, |
| 284 SaveFPRegsMode fp_mode, |
| 285 RememberedSetFinalAction and_then) { |
| 286 Label done; |
| 287 // Load store buffer top. |
| 288 ExternalReference store_buffer = |
| 289 ExternalReference::store_buffer_top(isolate()); |
| 290 li(t8, Operand(store_buffer)); |
| 291 lw(scratch, MemOperand(t8)); |
| 292 // Store pointer to buffer and increment buffer top. |
| 293 sw(address, MemOperand(scratch)); |
| 294 Addu(scratch, scratch, kPointerSize); |
| 295 // Write back new top of buffer. |
| 296 sw(scratch, MemOperand(t8)); |
| 297 // Call stub on end of buffer. |
| 298 // Check for end of buffer. |
| 299 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit)); |
| 300 if (and_then == kFallThroughAtEnd) { |
| 301 Branch(&done, eq, t8, Operand(zero_reg)); |
| 302 } else { |
| 303 ASSERT(and_then == kReturnAtEnd); |
| 304 Ret(ne, t8, Operand(zero_reg)); |
| 305 } |
| 306 push(ra); |
| 307 StoreBufferOverflowStub store_buffer_overflow = |
| 308 StoreBufferOverflowStub(fp_mode); |
| 309 CallStub(&store_buffer_overflow); |
| 310 pop(ra); |
| 311 bind(&done); |
| 312 if (and_then == kReturnAtEnd) { |
| 313 Ret(); |
| 314 } |
| 315 } |
| 316 |
274 | 317 |
275 // ----------------------------------------------------------------------------- | 318 // ----------------------------------------------------------------------------- |
276 // Allocation support. | 319 // Allocation support. |
277 | 320 |
278 | 321 |
279 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, | 322 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, |
280 Register scratch, | 323 Register scratch, |
281 Label* miss) { | 324 Label* miss) { |
282 Label same_contexts; | 325 Label same_contexts; |
283 | 326 |
(...skipping 3588 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
3872 li(a1, Operand(ExternalReference(f, isolate()))); | 3915 li(a1, Operand(ExternalReference(f, isolate()))); |
3873 CEntryStub stub(1); | 3916 CEntryStub stub(1); |
3874 CallStub(&stub); | 3917 CallStub(&stub); |
3875 } | 3918 } |
3876 | 3919 |
3877 | 3920 |
3878 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { | 3921 void MacroAssembler::CallRuntimeSaveDoubles(Runtime::FunctionId id) { |
3879 const Runtime::Function* function = Runtime::FunctionForId(id); | 3922 const Runtime::Function* function = Runtime::FunctionForId(id); |
3880 li(a0, Operand(function->nargs)); | 3923 li(a0, Operand(function->nargs)); |
3881 li(a1, Operand(ExternalReference(function, isolate()))); | 3924 li(a1, Operand(ExternalReference(function, isolate()))); |
3882 CEntryStub stub(1); | 3925 CEntryStub stub(1, kSaveFPRegs); |
3883 stub.SaveDoubles(); | |
3884 CallStub(&stub); | 3926 CallStub(&stub); |
3885 } | 3927 } |
3886 | 3928 |
3887 | 3929 |
3888 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { | 3930 void MacroAssembler::CallRuntime(Runtime::FunctionId fid, int num_arguments) { |
3889 CallRuntime(Runtime::FunctionForId(fid), num_arguments); | 3931 CallRuntime(Runtime::FunctionForId(fid), num_arguments); |
3890 } | 3932 } |
3891 | 3933 |
3892 | 3934 |
3893 void MacroAssembler::CallExternalReference(const ExternalReference& ext, | 3935 void MacroAssembler::CallExternalReference(const ExternalReference& ext, |
(...skipping 751 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4645 lw(scratch, MemOperand(li_location, kInstrSize)); | 4687 lw(scratch, MemOperand(li_location, kInstrSize)); |
4646 } | 4688 } |
4647 Ins(scratch, new_value, 0, kImm16Bits); | 4689 Ins(scratch, new_value, 0, kImm16Bits); |
4648 sw(scratch, MemOperand(li_location, kInstrSize)); | 4690 sw(scratch, MemOperand(li_location, kInstrSize)); |
4649 | 4691 |
4650 // Update the I-cache so the new lui and ori can be executed. | 4692 // Update the I-cache so the new lui and ori can be executed. |
4651 FlushICache(li_location, 2); | 4693 FlushICache(li_location, 2); |
4652 } | 4694 } |
4653 | 4695 |
4654 | 4696 |
| 4697 void MacroAssembler::CheckPageFlag( |
| 4698 Register object, |
| 4699 Register scratch, |
| 4700 int mask, |
| 4701 Condition cc, |
| 4702 Label* condition_met) { |
| 4703 And(scratch, object, Operand(~Page::kPageAlignmentMask)); |
| 4704 lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); |
| 4705 And(scratch, scratch, Operand(mask)); |
| 4706 Branch(condition_met, cc, scratch, Operand(zero_reg)); |
| 4707 } |
| 4708 |
| 4709 |
| 4710 void MacroAssembler::JumpIfBlack(Register object, |
| 4711 Register scratch0, |
| 4712 Register scratch1, |
| 4713 Label* on_black) { |
| 4714 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. |
| 4715 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); |
| 4716 } |
| 4717 |
| 4718 |
| 4719 void MacroAssembler::HasColor(Register object, |
| 4720 Register bitmap_scratch, |
| 4721 Register mask_scratch, |
| 4722 Label* has_color, |
| 4723 int first_bit, |
| 4724 int second_bit) { |
| 4725 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8)); |
| 4726 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9)); |
| 4727 |
| 4728 GetMarkBits(object, bitmap_scratch, mask_scratch); |
| 4729 |
| 4730 Label other_color, word_boundary; |
| 4731 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
| 4732 And(t8, t9, Operand(mask_scratch)); |
| 4733 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg)); |
| 4734 // Shift left 1 by adding. |
| 4735 Addu(mask_scratch, mask_scratch, Operand(mask_scratch)); |
| 4736 Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg)); |
| 4737 And(t8, t9, Operand(mask_scratch)); |
| 4738 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg)); |
| 4739 jmp(&other_color); |
| 4740 |
| 4741 bind(&word_boundary); |
| 4742 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize)); |
| 4743 And(t9, t9, Operand(1)); |
| 4744 Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg)); |
| 4745 bind(&other_color); |
| 4746 } |
| 4747 |
| 4748 |
| 4749 // Detect some, but not all, common pointer-free objects. This is used by the |
| 4750 // incremental write barrier which doesn't care about oddballs (they are always |
| 4751 // marked black immediately so this code is not hit). |
| 4752 void MacroAssembler::JumpIfDataObject(Register value, |
| 4753 Register scratch, |
| 4754 Label* not_data_object) { |
| 4755 ASSERT(!AreAliased(value, scratch, t8, no_reg)); |
| 4756 Label is_data_object; |
| 4757 lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); |
| 4758 LoadRoot(t8, Heap::kHeapNumberMapRootIndex); |
| 4759 Branch(&is_data_object, eq, t8, Operand(scratch)); |
| 4760 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); |
| 4761 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); |
| 4762 // If it's a string and it's not a cons string then it's an object containing |
| 4763 // no GC pointers. |
| 4764 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
| 4765 And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask)); |
| 4766 Branch(not_data_object, ne, t8, Operand(zero_reg)); |
| 4767 bind(&is_data_object); |
| 4768 } |
| 4769 |
| 4770 |
| 4771 void MacroAssembler::GetMarkBits(Register addr_reg, |
| 4772 Register bitmap_reg, |
| 4773 Register mask_reg) { |
| 4774 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); |
| 4775 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask)); |
| 4776 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); |
| 4777 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; |
| 4778 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits); |
| 4779 sll(t8, t8, kPointerSizeLog2); |
| 4780 Addu(bitmap_reg, bitmap_reg, t8); |
| 4781 li(t8, Operand(1)); |
| 4782 sllv(mask_reg, t8, mask_reg); |
| 4783 } |
| 4784 |
| 4785 |
| 4786 void MacroAssembler::EnsureNotWhite( |
| 4787 Register value, |
| 4788 Register bitmap_scratch, |
| 4789 Register mask_scratch, |
| 4790 Register load_scratch, |
| 4791 Label* value_is_white_and_not_data) { |
| 4792 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8)); |
| 4793 GetMarkBits(value, bitmap_scratch, mask_scratch); |
| 4794 |
| 4795 // If the value is black or grey we don't need to do anything. |
| 4796 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
| 4797 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); |
| 4798 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); |
| 4799 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
| 4800 |
| 4801 Label done; |
| 4802 |
| 4803 // Since both black and grey have a 1 in the first position and white does |
| 4804 // not have a 1 there we only need to check one bit. |
| 4805 lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
| 4806 And(t8, mask_scratch, load_scratch); |
| 4807 Branch(&done, ne, t8, Operand(zero_reg)); |
| 4808 |
| 4809 if (FLAG_debug_code) { |
| 4810 // Check for impossible bit pattern. |
| 4811 Label ok; |
| 4812 // sll may overflow, making the check conservative. |
| 4813 sll(t8, mask_scratch, 1); |
| 4814 And(t8, load_scratch, t8); |
| 4815 Branch(&ok, eq, t8, Operand(zero_reg)); |
| 4816 stop("Impossible marking bit pattern"); |
| 4817 bind(&ok); |
| 4818 } |
| 4819 |
| 4820 // Value is white. We check whether it is data that doesn't need scanning. |
| 4821 // Currently only checks for HeapNumber and non-cons strings. |
| 4822 Register map = load_scratch; // Holds map while checking type. |
| 4823 Register length = load_scratch; // Holds length of object after testing type. |
| 4824 Label is_data_object; |
| 4825 |
| 4826 // Check for heap-number |
| 4827 lw(map, FieldMemOperand(value, HeapObject::kMapOffset)); |
| 4828 LoadRoot(t8, Heap::kHeapNumberMapRootIndex); |
| 4829 { |
| 4830 Label skip; |
| 4831 Branch(&skip, ne, t8, Operand(map)); |
| 4832 li(length, HeapNumber::kSize); |
| 4833 Branch(&is_data_object); |
| 4834 bind(&skip); |
| 4835 } |
| 4836 |
| 4837 // Check for strings. |
| 4838 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); |
| 4839 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); |
| 4840 // If it's a string and it's not a cons string then it's an object containing |
| 4841 // no GC pointers. |
| 4842 Register instance_type = load_scratch; |
| 4843 lbu(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 4844 And(t8, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask)); |
| 4845 Branch(value_is_white_and_not_data, ne, t8, Operand(zero_reg)); |
| 4846 // It's a non-indirect (non-cons and non-slice) string. |
| 4847 // If it's external, the length is just ExternalString::kSize. |
| 4848 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). |
| 4849 // External strings are the only ones with the kExternalStringTag bit |
| 4850 // set. |
| 4851 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); |
| 4852 ASSERT_EQ(0, kConsStringTag & kExternalStringTag); |
| 4853 And(t8, instance_type, Operand(kExternalStringTag)); |
| 4854 { |
| 4855 Label skip; |
| 4856 Branch(&skip, eq, t8, Operand(zero_reg)); |
| 4857 li(length, ExternalString::kSize); |
| 4858 Branch(&is_data_object); |
| 4859 bind(&skip); |
| 4860 } |
| 4861 |
| 4862 // Sequential string, either ASCII or UC16. |
| 4863 // For ASCII (char-size of 1) we shift the smi tag away to get the length. |
| 4864 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby |
| 4865 // getting the length multiplied by 2. |
| 4866 ASSERT(kAsciiStringTag == 4 && kStringEncodingMask == 4); |
| 4867 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); |
| 4868 lw(t9, FieldMemOperand(value, String::kLengthOffset)); |
| 4869 And(t8, instance_type, Operand(kStringEncodingMask)); |
| 4870 { |
| 4871 Label skip; |
| 4872 Branch(&skip, eq, t8, Operand(zero_reg)); |
| 4873 srl(t9, t9, 1); |
| 4874 bind(&skip); |
| 4875 } |
| 4876 Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask)); |
| 4877 And(length, length, Operand(~kObjectAlignmentMask)); |
| 4878 |
| 4879 bind(&is_data_object); |
| 4880 // Value is a data object, and it is white. Mark it black. Since we know |
| 4881 // that the object is white we can make it black by flipping one bit. |
| 4882 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
| 4883 Or(t8, t8, Operand(mask_scratch)); |
| 4884 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
| 4885 |
| 4886 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask)); |
| 4887 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); |
| 4888 Addu(t8, t8, Operand(length)); |
| 4889 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); |
| 4890 |
| 4891 bind(&done); |
| 4892 } |
| 4893 |
| 4894 |
4655 void MacroAssembler::LoadInstanceDescriptors(Register map, | 4895 void MacroAssembler::LoadInstanceDescriptors(Register map, |
4656 Register descriptors) { | 4896 Register descriptors) { |
4657 lw(descriptors, | 4897 lw(descriptors, |
4658 FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset)); | 4898 FieldMemOperand(map, Map::kInstanceDescriptorsOrBitField3Offset)); |
4659 Label not_smi; | 4899 Label not_smi; |
4660 JumpIfNotSmi(descriptors, ¬_smi); | 4900 JumpIfNotSmi(descriptors, ¬_smi); |
4661 li(descriptors, Operand(FACTORY->empty_descriptor_array())); | 4901 li(descriptors, Operand(FACTORY->empty_descriptor_array())); |
4662 bind(¬_smi); | 4902 bind(¬_smi); |
4663 } | 4903 } |
4664 | 4904 |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4699 Branch(&done); | 4939 Branch(&done); |
4700 | 4940 |
4701 // In 0-255 range, round and truncate. | 4941 // In 0-255 range, round and truncate. |
4702 bind(&in_bounds); | 4942 bind(&in_bounds); |
4703 round_w_d(temp_double_reg, input_reg); | 4943 round_w_d(temp_double_reg, input_reg); |
4704 mfc1(result_reg, temp_double_reg); | 4944 mfc1(result_reg, temp_double_reg); |
4705 bind(&done); | 4945 bind(&done); |
4706 } | 4946 } |
4707 | 4947 |
4708 | 4948 |
| 4949 bool AreAliased(Register r1, Register r2, Register r3, Register r4) { |
| 4950 if (r1.is(r2)) return true; |
| 4951 if (r1.is(r3)) return true; |
| 4952 if (r1.is(r4)) return true; |
| 4953 if (r2.is(r3)) return true; |
| 4954 if (r2.is(r4)) return true; |
| 4955 if (r3.is(r4)) return true; |
| 4956 return false; |
| 4957 } |
| 4958 |
| 4959 |
4709 CodePatcher::CodePatcher(byte* address, int instructions) | 4960 CodePatcher::CodePatcher(byte* address, int instructions) |
4710 : address_(address), | 4961 : address_(address), |
4711 instructions_(instructions), | 4962 instructions_(instructions), |
4712 size_(instructions * Assembler::kInstrSize), | 4963 size_(instructions * Assembler::kInstrSize), |
4713 masm_(Isolate::Current(), address, size_ + Assembler::kGap) { | 4964 masm_(Isolate::Current(), address, size_ + Assembler::kGap) { |
4714 // Create a new macro assembler pointing to the address of the code to patch. | 4965 // Create a new macro assembler pointing to the address of the code to patch. |
4715 // The size is adjusted with kGap on order for the assembler to generate size | 4966 // The size is adjusted with kGap on order for the assembler to generate size |
4716 // bytes of instructions without failing with buffer size constraints. | 4967 // bytes of instructions without failing with buffer size constraints. |
4717 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 4968 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
4718 } | 4969 } |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4756 opcode == BGTZL); | 5007 opcode == BGTZL); |
4757 opcode = (cond == eq) ? BEQ : BNE; | 5008 opcode = (cond == eq) ? BEQ : BNE; |
4758 instr = (instr & ~kOpcodeMask) | opcode; | 5009 instr = (instr & ~kOpcodeMask) | opcode; |
4759 masm_.emit(instr); | 5010 masm_.emit(instr); |
4760 } | 5011 } |
4761 | 5012 |
4762 | 5013 |
4763 } } // namespace v8::internal | 5014 } } // namespace v8::internal |
4764 | 5015 |
4765 #endif // V8_TARGET_ARCH_MIPS | 5016 #endif // V8_TARGET_ARCH_MIPS |
OLD | NEW |