Chromium Code Reviews| OLD | NEW |
|---|---|
| 1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| (...skipping 178 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 189 | 189 |
| 190 void MacroAssembler::CompareRoot(const Operand& with, | 190 void MacroAssembler::CompareRoot(const Operand& with, |
| 191 Heap::RootListIndex index) { | 191 Heap::RootListIndex index) { |
| 192 ASSERT(root_array_available_); | 192 ASSERT(root_array_available_); |
| 193 ASSERT(!with.AddressUsesRegister(kScratchRegister)); | 193 ASSERT(!with.AddressUsesRegister(kScratchRegister)); |
| 194 LoadRoot(kScratchRegister, index); | 194 LoadRoot(kScratchRegister, index); |
| 195 cmpq(with, kScratchRegister); | 195 cmpq(with, kScratchRegister); |
| 196 } | 196 } |
| 197 | 197 |
| 198 | 198 |
| 199 void MacroAssembler::RecordWriteHelper(Register object, | 199 void MacroAssembler::RememberedSetHelper(Register addr, |
| 200 Register addr, | 200 Register scratch, |
| 201 Register scratch, | 201 SaveFPRegsMode save_fp, |
| 202 SaveFPRegsMode save_fp) { | 202 RememberedSetFinalAction and_then) { |
| 203 if (emit_debug_code()) { | |
| 204 // Check that the object is not in new space. | |
| 205 Label not_in_new_space; | |
| 206 InNewSpace(object, scratch, not_equal, ¬_in_new_space, Label::kNear); | |
| 207 Abort("new-space object passed to RecordWriteHelper"); | |
| 208 bind(¬_in_new_space); | |
| 209 } | |
| 210 | |
| 211 // Load store buffer top. | 203 // Load store buffer top. |
| 212 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex); | 204 LoadRoot(scratch, Heap::kStoreBufferTopRootIndex); |
| 213 // Store pointer to buffer. | 205 // Store pointer to buffer. |
| 214 movq(Operand(scratch, 0), addr); | 206 movq(Operand(scratch, 0), addr); |
| 215 // Increment buffer top. | 207 // Increment buffer top. |
| 216 addq(scratch, Immediate(kPointerSize)); | 208 addq(scratch, Immediate(kPointerSize)); |
| 217 // Write back new top of buffer. | 209 // Write back new top of buffer. |
| 218 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex); | 210 StoreRoot(scratch, Heap::kStoreBufferTopRootIndex); |
| 219 // Call stub on end of buffer. | 211 // Call stub on end of buffer. |
| 220 Label no_overflow; | 212 Label done; |
| 221 // Check for end of buffer. | 213 // Check for end of buffer. |
| 222 testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit)); | 214 testq(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit)); |
| 223 j(equal, &no_overflow, Label::kNear); | 215 if (and_then == kReturnAtEnd) { |
| 216 Label buffer_overflowed; | |
| 217 j(not_equal, &buffer_overflowed, Label::kNear); | |
| 218 ret(0); | |
| 219 bind(&buffer_overflowed); | |
| 220 } else { | |
| 221 ASSERT(and_then == kFallThroughAtEnd); | |
| 222 j(equal, &done, Label::kNear); | |
| 223 } | |
| 224 StoreBufferOverflowStub store_buffer_overflow = | 224 StoreBufferOverflowStub store_buffer_overflow = |
| 225 StoreBufferOverflowStub(save_fp); | 225 StoreBufferOverflowStub(save_fp); |
| 226 CallStub(&store_buffer_overflow); | 226 CallStub(&store_buffer_overflow); |
| 227 bind(&no_overflow); | 227 if (and_then == kReturnAtEnd) { |
| 228 ret(0); | |
| 229 } else { | |
| 230 ASSERT(and_then == kFallThroughAtEnd); | |
| 231 bind(&done); | |
| 232 } | |
| 228 } | 233 } |
| 229 | 234 |
| 230 | 235 |
| 231 void MacroAssembler::InNewSpace(Register object, | 236 void MacroAssembler::InNewSpace(Register object, |
| 232 Register scratch, | 237 Register scratch, |
| 233 Condition cc, | 238 Condition cc, |
| 234 Label* branch, | 239 Label* branch, |
| 235 Label::Distance near_jump) { | 240 Label::Distance near_jump) { |
| 236 if (Serializer::enabled()) { | 241 if (Serializer::enabled()) { |
| 237 // Can't do arithmetic on external references if it might get serialized. | 242 // Can't do arithmetic on external references if it might get serialized. |
| (...skipping 19 matching lines...) Expand all Loading... | |
| 257 addq(scratch, kScratchRegister); | 262 addq(scratch, kScratchRegister); |
| 258 } else { | 263 } else { |
| 259 lea(scratch, Operand(object, kScratchRegister, times_1, 0)); | 264 lea(scratch, Operand(object, kScratchRegister, times_1, 0)); |
| 260 } | 265 } |
| 261 and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask()))); | 266 and_(scratch, Immediate(static_cast<int32_t>(HEAP->NewSpaceMask()))); |
| 262 j(cc, branch, near_jump); | 267 j(cc, branch, near_jump); |
| 263 } | 268 } |
| 264 } | 269 } |
| 265 | 270 |
| 266 | 271 |
| 267 void MacroAssembler::RecordWrite(Register object, | 272 void MacroAssembler::RecordWriteField( |
| 268 int offset, | 273 Register object, |
| 269 Register value, | 274 int offset, |
| 270 Register index, | 275 Register value, |
| 271 SaveFPRegsMode save_fp) { | 276 Register dst, |
| 277 SaveFPRegsMode save_fp, | |
| 278 RememberedSetAction remembered_set_action, | |
| 279 SmiCheck smi_check) { | |
| 272 // The compiled code assumes that record write doesn't change the | 280 // The compiled code assumes that record write doesn't change the |
| 273 // context register, so we check that none of the clobbered | 281 // context register, so we check that none of the clobbered |
| 274 // registers are rsi. | 282 // registers are rsi. |
| 275 ASSERT(!value.is(rsi) && !index.is(rsi)); | 283 ASSERT(!value.is(rsi) && !dst.is(rsi)); |
| 276 | 284 |
| 277 // First, check if a write barrier is even needed. The tests below | 285 // First, check if a write barrier is even needed. The tests below |
| 278 // catch stores of smis and stores into the young generation. | 286 // catch stores of Smis. |
| 279 Label done; | 287 Label done; |
| 280 JumpIfSmi(value, &done); | |
| 281 | 288 |
| 282 RecordWriteNonSmi(object, offset, value, index, save_fp); | 289 // Skip barrier if writing a smi. |
| 290 if (smi_check == INLINE_SMI_CHECK) { | |
| 291 JumpIfSmi(value, &done); | |
| 292 } | |
| 293 | |
| 294 // Although the object register is tagged, the offset is relative to the start | |
| 295 // of the object, so so offset must be a multiple of kPointerSize. | |
| 296 ASSERT(IsAligned(offset, kPointerSize)); | |
| 297 | |
| 298 lea(dst, FieldOperand(object, offset)); | |
| 299 if (emit_debug_code()) { | |
| 300 Label ok; | |
| 301 testb(dst, Immediate((1 << kPointerSizeLog2) - 1)); | |
| 302 j(zero, &ok, Label::kNear); | |
| 303 int3(); | |
| 304 bind(&ok); | |
| 305 } | |
| 306 | |
| 307 RecordWrite( | |
| 308 object, dst, value, save_fp, remembered_set_action, OMIT_SMI_CHECK); | |
| 309 | |
| 283 bind(&done); | 310 bind(&done); |
| 284 | 311 |
| 285 // Clobber all input registers when running with the debug-code flag | 312 // Clobber clobbered input registers when running with the debug-code flag |
| 286 // turned on to provoke errors. This clobbering repeats the | 313 // turned on to provoke errors. |
| 287 // clobbering done inside RecordWriteNonSmi but it's necessary to | |
| 288 // avoid having the fast case for smis leave the registers | |
| 289 // unchanged. | |
| 290 if (emit_debug_code()) { | 314 if (emit_debug_code()) { |
| 291 movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 315 movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
| 292 movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 316 movq(dst, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
| 293 } | 317 } |
| 294 } | 318 } |
| 295 | 319 |
| 296 | 320 |
| 297 void MacroAssembler::RecordWrite(Register object, | 321 void MacroAssembler::RecordWrite(Register object, |
| 298 Register address, | 322 Register address, |
| 299 Register value, | 323 Register value, |
| 300 SaveFPRegsMode save_fp) { | 324 SaveFPRegsMode fp_mode, |
| 325 RememberedSetAction remembered_set_action, | |
| 326 SmiCheck smi_check) { | |
| 301 // The compiled code assumes that record write doesn't change the | 327 // The compiled code assumes that record write doesn't change the |
| 302 // context register, so we check that none of the clobbered | 328 // context register, so we check that none of the clobbered |
| 303 // registers are rsi. | 329 // registers are rsi. |
| 304 ASSERT(!object.is(rsi) && !value.is(rsi) && !address.is(rsi)); | 330 ASSERT(!value.is(rsi) && !address.is(rsi)); |
| 331 if (remembered_set_action == OMIT_REMEMBERED_SET && | |
| 332 FLAG_incremental_marking == false) { | |
| 333 return; | |
| 334 } | |
| 335 | |
| 336 ASSERT(!object.is(value)); | |
| 337 ASSERT(!object.is(address)); | |
| 338 ASSERT(!value.is(address)); | |
| 339 if (emit_debug_code()) { | |
| 340 AbortIfSmi(object); | |
| 341 } | |
| 342 | |
| 343 if (remembered_set_action == OMIT_REMEMBERED_SET && | |
| 344 FLAG_incremental_marking == false) { | |
|
Vyacheslav Egorov (Chromium)
2011/06/07 14:01:27
== false is just too strange if we have the same i
Erik Corry
2011/06/08 10:38:55
Done.
| |
| 345 return; | |
| 346 } | |
| 347 | |
| 348 if (FLAG_debug_code) { | |
| 349 Label ok; | |
| 350 cmpq(value, Operand(address, 0)); | |
| 351 j(equal, &ok, Label::kNear); | |
| 352 int3(); | |
| 353 bind(&ok); | |
| 354 } | |
| 305 | 355 |
| 306 // First, check if a write barrier is even needed. The tests below | 356 // First, check if a write barrier is even needed. The tests below |
| 307 // catch stores of smis and stores into the young generation. | 357 // catch stores of smis and stores into the young generation. |
| 308 Label done; | 358 Label done; |
| 309 JumpIfSmi(value, &done); | |
| 310 | 359 |
| 311 InNewSpace(object, value, equal, &done); | 360 if (smi_check == INLINE_SMI_CHECK) { |
| 361 // Skip barrier if writing a smi. | |
| 362 JumpIfSmi(value, &done); | |
| 363 } | |
| 312 | 364 |
| 313 RecordWriteHelper(object, address, value, save_fp); | 365 CheckPageFlag(value, |
| 366 value, // Used as scratch. | |
| 367 MemoryChunk::POINTERS_TO_HERE_ARE_INTERESTING, | |
| 368 zero, | |
| 369 &done, | |
| 370 Label::kNear); | |
| 371 | |
| 372 CheckPageFlag(object, | |
| 373 value, // Used as scratch. | |
| 374 MemoryChunk::POINTERS_FROM_HERE_ARE_INTERESTING, | |
| 375 zero, | |
| 376 &done, | |
| 377 Label::kNear); | |
| 378 | |
| 379 RecordWriteStub stub(object, value, address, remembered_set_action, fp_mode); | |
| 380 CallStub(&stub); | |
| 314 | 381 |
| 315 bind(&done); | 382 bind(&done); |
| 316 | 383 |
| 317 // Clobber all input registers when running with the debug-code flag | 384 // Clobber clobbered registers when running with the debug-code flag |
| 318 // turned on to provoke errors. | 385 // turned on to provoke errors. |
| 319 if (emit_debug_code()) { | 386 if (emit_debug_code()) { |
| 320 movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 387 movq(address, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
| 321 movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | 388 movq(value, BitCast<int64_t>(kZapValue), RelocInfo::NONE); |
| 322 } | 389 } |
| 323 } | 390 } |
| 324 | 391 |
| 325 | |
| 326 void MacroAssembler::RecordWriteNonSmi(Register object, | |
| 327 int offset, | |
| 328 Register scratch, | |
| 329 Register index, | |
| 330 SaveFPRegsMode save_fp) { | |
| 331 Label done; | |
| 332 | |
| 333 if (emit_debug_code()) { | |
| 334 Label okay; | |
| 335 JumpIfNotSmi(object, &okay, Label::kNear); | |
| 336 Abort("MacroAssembler::RecordWriteNonSmi cannot deal with smis"); | |
| 337 bind(&okay); | |
| 338 | |
| 339 if (offset == 0) { | |
| 340 // index must be int32. | |
| 341 Register tmp = index.is(rax) ? rbx : rax; | |
| 342 push(tmp); | |
| 343 movl(tmp, index); | |
| 344 cmpq(tmp, index); | |
| 345 Check(equal, "Index register for RecordWrite must be untagged int32."); | |
| 346 pop(tmp); | |
| 347 } | |
| 348 } | |
| 349 | |
| 350 // Test that the object address is not in the new space. We cannot | |
| 351 // update page dirty marks for new space pages. | |
| 352 InNewSpace(object, scratch, equal, &done); | |
| 353 | |
| 354 // The offset is relative to a tagged or untagged HeapObject pointer, | |
| 355 // so either offset or offset + kHeapObjectTag must be a | |
| 356 // multiple of kPointerSize. | |
| 357 ASSERT(IsAligned(offset, kPointerSize) || | |
| 358 IsAligned(offset + kHeapObjectTag, kPointerSize)); | |
| 359 | |
| 360 Register dst = index; | |
| 361 if (offset != 0) { | |
| 362 lea(dst, Operand(object, offset)); | |
| 363 } else { | |
| 364 // array access: calculate the destination address in the same manner as | |
| 365 // KeyedStoreIC::GenerateGeneric. | |
| 366 lea(dst, FieldOperand(object, | |
| 367 index, | |
| 368 times_pointer_size, | |
| 369 FixedArray::kHeaderSize)); | |
| 370 } | |
| 371 RecordWriteHelper(object, dst, scratch, save_fp); | |
| 372 | |
| 373 bind(&done); | |
| 374 | |
| 375 // Clobber all input registers when running with the debug-code flag | |
| 376 // turned on to provoke errors. | |
| 377 if (emit_debug_code()) { | |
| 378 movq(scratch, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | |
| 379 movq(index, BitCast<int64_t>(kZapValue), RelocInfo::NONE); | |
| 380 } | |
| 381 } | |
| 382 | |
| 383 | 392 |
| 384 void MacroAssembler::Assert(Condition cc, const char* msg) { | 393 void MacroAssembler::Assert(Condition cc, const char* msg) { |
| 385 if (emit_debug_code()) Check(cc, msg); | 394 if (emit_debug_code()) Check(cc, msg); |
| 386 } | 395 } |
| 387 | 396 |
| 388 | 397 |
| 389 void MacroAssembler::AssertFastElements(Register elements) { | 398 void MacroAssembler::AssertFastElements(Register elements) { |
| 390 if (emit_debug_code()) { | 399 if (emit_debug_code()) { |
| 391 Label ok; | 400 Label ok; |
| 392 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset), | 401 CompareRoot(FieldOperand(elements, HeapObject::kMapOffset), |
| (...skipping 3329 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... | |
| 3722 | 3731 |
| 3723 call(function); | 3732 call(function); |
| 3724 ASSERT(OS::ActivationFrameAlignment() != 0); | 3733 ASSERT(OS::ActivationFrameAlignment() != 0); |
| 3725 ASSERT(num_arguments >= 0); | 3734 ASSERT(num_arguments >= 0); |
| 3726 int argument_slots_on_stack = | 3735 int argument_slots_on_stack = |
| 3727 ArgumentStackSlotsForCFunctionCall(num_arguments); | 3736 ArgumentStackSlotsForCFunctionCall(num_arguments); |
| 3728 movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize)); | 3737 movq(rsp, Operand(rsp, argument_slots_on_stack * kPointerSize)); |
| 3729 } | 3738 } |
| 3730 | 3739 |
| 3731 | 3740 |
| 3741 bool Aliasing(Register r1, Register r2, Register r3, Register r4) { | |
|
Vyacheslav Egorov (Chromium)
2011/06/07 14:01:27
Aliasing is a very strange name.
Erik Corry
2011/06/08 10:38:55
AreAliased
| |
| 3742 if (r1.is(r2)) return true; | |
| 3743 if (r1.is(r3)) return true; | |
| 3744 if (r1.is(r4)) return true; | |
| 3745 if (r2.is(r3)) return true; | |
| 3746 if (r2.is(r4)) return true; | |
| 3747 if (r3.is(r4)) return true; | |
| 3748 return false; | |
| 3749 } | |
| 3750 | |
| 3751 | |
| 3732 CodePatcher::CodePatcher(byte* address, int size) | 3752 CodePatcher::CodePatcher(byte* address, int size) |
| 3733 : address_(address), | 3753 : address_(address), |
| 3734 size_(size), | 3754 size_(size), |
| 3735 masm_(Isolate::Current(), address, size + Assembler::kGap) { | 3755 masm_(Isolate::Current(), address, size + Assembler::kGap) { |
| 3736 // Create a new macro assembler pointing to the address of the code to patch. | 3756 // Create a new macro assembler pointing to the address of the code to patch. |
| 3737 // The size is adjusted with kGap on order for the assembler to generate size | 3757 // The size is adjusted with kGap on order for the assembler to generate size |
| 3738 // bytes of instructions without failing with buffer size constraints. | 3758 // bytes of instructions without failing with buffer size constraints. |
| 3739 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 3759 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
| 3740 } | 3760 } |
| 3741 | 3761 |
| 3742 | 3762 |
| 3743 CodePatcher::~CodePatcher() { | 3763 CodePatcher::~CodePatcher() { |
| 3744 // Indicate that code has changed. | 3764 // Indicate that code has changed. |
| 3745 CPU::FlushICache(address_, size_); | 3765 CPU::FlushICache(address_, size_); |
| 3746 | 3766 |
| 3747 // Check that the code was patched as expected. | 3767 // Check that the code was patched as expected. |
| 3748 ASSERT(masm_.pc_ == address_ + size_); | 3768 ASSERT(masm_.pc_ == address_ + size_); |
| 3749 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 3769 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
| 3750 } | 3770 } |
| 3751 | 3771 |
| 3772 | |
| 3773 void MacroAssembler::CheckPageFlag( | |
| 3774 Register object, | |
| 3775 Register scratch, | |
| 3776 MemoryChunk::MemoryChunkFlags flag, | |
| 3777 Condition cc, | |
| 3778 Label* condition_met, | |
| 3779 Label::Distance condition_met_near) { | |
| 3780 ASSERT(cc == zero || cc == not_zero); | |
| 3781 if (scratch.is(object)) { | |
| 3782 and_(scratch, Immediate(~Page::kPageAlignmentMask)); | |
| 3783 } else { | |
| 3784 movq(scratch, Immediate(~Page::kPageAlignmentMask)); | |
| 3785 and_(scratch, object); | |
| 3786 } | |
| 3787 if (flag < kBitsPerByte) { | |
| 3788 testb(Operand(scratch, MemoryChunk::kFlagsOffset), | |
| 3789 Immediate(static_cast<uint8_t>(1u << flag))); | |
| 3790 } else { | |
| 3791 testl(Operand(scratch, MemoryChunk::kFlagsOffset), Immediate(1 << flag)); | |
| 3792 } | |
| 3793 j(cc, condition_met, condition_met_near); | |
|
Vyacheslav Egorov (Chromium)
2011/06/07 14:01:27
why conditions_met_near is called *_near not *_dis
Erik Corry
2011/06/08 10:38:55
Done.
| |
| 3794 } | |
| 3795 | |
| 3752 } } // namespace v8::internal | 3796 } } // namespace v8::internal |
| 3753 | 3797 |
| 3754 #endif // V8_TARGET_ARCH_X64 | 3798 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |