| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
| 2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
| 3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
| 4 | 4 |
| 5 #include "src/v8.h" | 5 #include "src/v8.h" |
| 6 | 6 |
| 7 #if V8_TARGET_ARCH_X64 | 7 #if V8_TARGET_ARCH_X64 |
| 8 | 8 |
| 9 #include "src/bootstrapper.h" | 9 #include "src/bootstrapper.h" |
| 10 #include "src/codegen.h" | 10 #include "src/codegen.h" |
| (...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 150 } | 150 } |
| 151 Push(Immediate(static_cast<int32_t>(address))); | 151 Push(Immediate(static_cast<int32_t>(address))); |
| 152 return; | 152 return; |
| 153 } | 153 } |
| 154 LoadAddress(kScratchRegister, source); | 154 LoadAddress(kScratchRegister, source); |
| 155 Push(kScratchRegister); | 155 Push(kScratchRegister); |
| 156 } | 156 } |
| 157 | 157 |
| 158 | 158 |
| 159 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) { | 159 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index) { |
| 160 ASSERT(root_array_available_); | 160 DCHECK(root_array_available_); |
| 161 movp(destination, Operand(kRootRegister, | 161 movp(destination, Operand(kRootRegister, |
| 162 (index << kPointerSizeLog2) - kRootRegisterBias)); | 162 (index << kPointerSizeLog2) - kRootRegisterBias)); |
| 163 } | 163 } |
| 164 | 164 |
| 165 | 165 |
| 166 void MacroAssembler::LoadRootIndexed(Register destination, | 166 void MacroAssembler::LoadRootIndexed(Register destination, |
| 167 Register variable_offset, | 167 Register variable_offset, |
| 168 int fixed_offset) { | 168 int fixed_offset) { |
| 169 ASSERT(root_array_available_); | 169 DCHECK(root_array_available_); |
| 170 movp(destination, | 170 movp(destination, |
| 171 Operand(kRootRegister, | 171 Operand(kRootRegister, |
| 172 variable_offset, times_pointer_size, | 172 variable_offset, times_pointer_size, |
| 173 (fixed_offset << kPointerSizeLog2) - kRootRegisterBias)); | 173 (fixed_offset << kPointerSizeLog2) - kRootRegisterBias)); |
| 174 } | 174 } |
| 175 | 175 |
| 176 | 176 |
| 177 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) { | 177 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index) { |
| 178 ASSERT(root_array_available_); | 178 DCHECK(root_array_available_); |
| 179 movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias), | 179 movp(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias), |
| 180 source); | 180 source); |
| 181 } | 181 } |
| 182 | 182 |
| 183 | 183 |
| 184 void MacroAssembler::PushRoot(Heap::RootListIndex index) { | 184 void MacroAssembler::PushRoot(Heap::RootListIndex index) { |
| 185 ASSERT(root_array_available_); | 185 DCHECK(root_array_available_); |
| 186 Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias)); | 186 Push(Operand(kRootRegister, (index << kPointerSizeLog2) - kRootRegisterBias)); |
| 187 } | 187 } |
| 188 | 188 |
| 189 | 189 |
| 190 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) { | 190 void MacroAssembler::CompareRoot(Register with, Heap::RootListIndex index) { |
| 191 ASSERT(root_array_available_); | 191 DCHECK(root_array_available_); |
| 192 cmpp(with, Operand(kRootRegister, | 192 cmpp(with, Operand(kRootRegister, |
| 193 (index << kPointerSizeLog2) - kRootRegisterBias)); | 193 (index << kPointerSizeLog2) - kRootRegisterBias)); |
| 194 } | 194 } |
| 195 | 195 |
| 196 | 196 |
| 197 void MacroAssembler::CompareRoot(const Operand& with, | 197 void MacroAssembler::CompareRoot(const Operand& with, |
| 198 Heap::RootListIndex index) { | 198 Heap::RootListIndex index) { |
| 199 ASSERT(root_array_available_); | 199 DCHECK(root_array_available_); |
| 200 ASSERT(!with.AddressUsesRegister(kScratchRegister)); | 200 DCHECK(!with.AddressUsesRegister(kScratchRegister)); |
| 201 LoadRoot(kScratchRegister, index); | 201 LoadRoot(kScratchRegister, index); |
| 202 cmpp(with, kScratchRegister); | 202 cmpp(with, kScratchRegister); |
| 203 } | 203 } |
| 204 | 204 |
| 205 | 205 |
| 206 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. | 206 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. |
| 207 Register addr, | 207 Register addr, |
| 208 Register scratch, | 208 Register scratch, |
| 209 SaveFPRegsMode save_fp, | 209 SaveFPRegsMode save_fp, |
| 210 RememberedSetFinalAction and_then) { | 210 RememberedSetFinalAction and_then) { |
| (...skipping 14 matching lines...) Expand all Loading... |
| 225 // Call stub on end of buffer. | 225 // Call stub on end of buffer. |
| 226 Label done; | 226 Label done; |
| 227 // Check for end of buffer. | 227 // Check for end of buffer. |
| 228 testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit)); | 228 testp(scratch, Immediate(StoreBuffer::kStoreBufferOverflowBit)); |
| 229 if (and_then == kReturnAtEnd) { | 229 if (and_then == kReturnAtEnd) { |
| 230 Label buffer_overflowed; | 230 Label buffer_overflowed; |
| 231 j(not_equal, &buffer_overflowed, Label::kNear); | 231 j(not_equal, &buffer_overflowed, Label::kNear); |
| 232 ret(0); | 232 ret(0); |
| 233 bind(&buffer_overflowed); | 233 bind(&buffer_overflowed); |
| 234 } else { | 234 } else { |
| 235 ASSERT(and_then == kFallThroughAtEnd); | 235 DCHECK(and_then == kFallThroughAtEnd); |
| 236 j(equal, &done, Label::kNear); | 236 j(equal, &done, Label::kNear); |
| 237 } | 237 } |
| 238 StoreBufferOverflowStub store_buffer_overflow = | 238 StoreBufferOverflowStub store_buffer_overflow = |
| 239 StoreBufferOverflowStub(isolate(), save_fp); | 239 StoreBufferOverflowStub(isolate(), save_fp); |
| 240 CallStub(&store_buffer_overflow); | 240 CallStub(&store_buffer_overflow); |
| 241 if (and_then == kReturnAtEnd) { | 241 if (and_then == kReturnAtEnd) { |
| 242 ret(0); | 242 ret(0); |
| 243 } else { | 243 } else { |
| 244 ASSERT(and_then == kFallThroughAtEnd); | 244 DCHECK(and_then == kFallThroughAtEnd); |
| 245 bind(&done); | 245 bind(&done); |
| 246 } | 246 } |
| 247 } | 247 } |
| 248 | 248 |
| 249 | 249 |
| 250 void MacroAssembler::InNewSpace(Register object, | 250 void MacroAssembler::InNewSpace(Register object, |
| 251 Register scratch, | 251 Register scratch, |
| 252 Condition cc, | 252 Condition cc, |
| 253 Label* branch, | 253 Label* branch, |
| 254 Label::Distance distance) { | 254 Label::Distance distance) { |
| 255 if (serializer_enabled()) { | 255 if (serializer_enabled()) { |
| 256 // Can't do arithmetic on external references if it might get serialized. | 256 // Can't do arithmetic on external references if it might get serialized. |
| 257 // The mask isn't really an address. We load it as an external reference in | 257 // The mask isn't really an address. We load it as an external reference in |
| 258 // case the size of the new space is different between the snapshot maker | 258 // case the size of the new space is different between the snapshot maker |
| 259 // and the running system. | 259 // and the running system. |
| 260 if (scratch.is(object)) { | 260 if (scratch.is(object)) { |
| 261 Move(kScratchRegister, ExternalReference::new_space_mask(isolate())); | 261 Move(kScratchRegister, ExternalReference::new_space_mask(isolate())); |
| 262 andp(scratch, kScratchRegister); | 262 andp(scratch, kScratchRegister); |
| 263 } else { | 263 } else { |
| 264 Move(scratch, ExternalReference::new_space_mask(isolate())); | 264 Move(scratch, ExternalReference::new_space_mask(isolate())); |
| 265 andp(scratch, object); | 265 andp(scratch, object); |
| 266 } | 266 } |
| 267 Move(kScratchRegister, ExternalReference::new_space_start(isolate())); | 267 Move(kScratchRegister, ExternalReference::new_space_start(isolate())); |
| 268 cmpp(scratch, kScratchRegister); | 268 cmpp(scratch, kScratchRegister); |
| 269 j(cc, branch, distance); | 269 j(cc, branch, distance); |
| 270 } else { | 270 } else { |
| 271 ASSERT(kPointerSize == kInt64Size | 271 DCHECK(kPointerSize == kInt64Size |
| 272 ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())) | 272 ? is_int32(static_cast<int64_t>(isolate()->heap()->NewSpaceMask())) |
| 273 : kPointerSize == kInt32Size); | 273 : kPointerSize == kInt32Size); |
| 274 intptr_t new_space_start = | 274 intptr_t new_space_start = |
| 275 reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart()); | 275 reinterpret_cast<intptr_t>(isolate()->heap()->NewSpaceStart()); |
| 276 Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start), | 276 Move(kScratchRegister, reinterpret_cast<Address>(-new_space_start), |
| 277 Assembler::RelocInfoNone()); | 277 Assembler::RelocInfoNone()); |
| 278 if (scratch.is(object)) { | 278 if (scratch.is(object)) { |
| 279 addp(scratch, kScratchRegister); | 279 addp(scratch, kScratchRegister); |
| 280 } else { | 280 } else { |
| 281 leap(scratch, Operand(object, kScratchRegister, times_1, 0)); | 281 leap(scratch, Operand(object, kScratchRegister, times_1, 0)); |
| (...skipping 18 matching lines...) Expand all Loading... |
| 300 // catch stores of Smis. | 300 // catch stores of Smis. |
| 301 Label done; | 301 Label done; |
| 302 | 302 |
| 303 // Skip barrier if writing a smi. | 303 // Skip barrier if writing a smi. |
| 304 if (smi_check == INLINE_SMI_CHECK) { | 304 if (smi_check == INLINE_SMI_CHECK) { |
| 305 JumpIfSmi(value, &done); | 305 JumpIfSmi(value, &done); |
| 306 } | 306 } |
| 307 | 307 |
| 308 // Although the object register is tagged, the offset is relative to the start | 308 // Although the object register is tagged, the offset is relative to the start |
| 309 // of the object, so so offset must be a multiple of kPointerSize. | 309 // of the object, so so offset must be a multiple of kPointerSize. |
| 310 ASSERT(IsAligned(offset, kPointerSize)); | 310 DCHECK(IsAligned(offset, kPointerSize)); |
| 311 | 311 |
| 312 leap(dst, FieldOperand(object, offset)); | 312 leap(dst, FieldOperand(object, offset)); |
| 313 if (emit_debug_code()) { | 313 if (emit_debug_code()) { |
| 314 Label ok; | 314 Label ok; |
| 315 testb(dst, Immediate((1 << kPointerSizeLog2) - 1)); | 315 testb(dst, Immediate((1 << kPointerSizeLog2) - 1)); |
| 316 j(zero, &ok, Label::kNear); | 316 j(zero, &ok, Label::kNear); |
| 317 int3(); | 317 int3(); |
| 318 bind(&ok); | 318 bind(&ok); |
| 319 } | 319 } |
| 320 | 320 |
| (...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 365 Move(value, kZapValue, Assembler::RelocInfoNone()); | 365 Move(value, kZapValue, Assembler::RelocInfoNone()); |
| 366 Move(index, kZapValue, Assembler::RelocInfoNone()); | 366 Move(index, kZapValue, Assembler::RelocInfoNone()); |
| 367 } | 367 } |
| 368 } | 368 } |
| 369 | 369 |
| 370 | 370 |
| 371 void MacroAssembler::RecordWriteForMap(Register object, | 371 void MacroAssembler::RecordWriteForMap(Register object, |
| 372 Register map, | 372 Register map, |
| 373 Register dst, | 373 Register dst, |
| 374 SaveFPRegsMode fp_mode) { | 374 SaveFPRegsMode fp_mode) { |
| 375 ASSERT(!object.is(kScratchRegister)); | 375 DCHECK(!object.is(kScratchRegister)); |
| 376 ASSERT(!object.is(map)); | 376 DCHECK(!object.is(map)); |
| 377 ASSERT(!object.is(dst)); | 377 DCHECK(!object.is(dst)); |
| 378 ASSERT(!map.is(dst)); | 378 DCHECK(!map.is(dst)); |
| 379 AssertNotSmi(object); | 379 AssertNotSmi(object); |
| 380 | 380 |
| 381 if (emit_debug_code()) { | 381 if (emit_debug_code()) { |
| 382 Label ok; | 382 Label ok; |
| 383 if (map.is(kScratchRegister)) pushq(map); | 383 if (map.is(kScratchRegister)) pushq(map); |
| 384 CompareMap(map, isolate()->factory()->meta_map()); | 384 CompareMap(map, isolate()->factory()->meta_map()); |
| 385 if (map.is(kScratchRegister)) popq(map); | 385 if (map.is(kScratchRegister)) popq(map); |
| 386 j(equal, &ok, Label::kNear); | 386 j(equal, &ok, Label::kNear); |
| 387 int3(); | 387 int3(); |
| 388 bind(&ok); | 388 bind(&ok); |
| (...skipping 51 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 440 | 440 |
| 441 | 441 |
| 442 void MacroAssembler::RecordWrite( | 442 void MacroAssembler::RecordWrite( |
| 443 Register object, | 443 Register object, |
| 444 Register address, | 444 Register address, |
| 445 Register value, | 445 Register value, |
| 446 SaveFPRegsMode fp_mode, | 446 SaveFPRegsMode fp_mode, |
| 447 RememberedSetAction remembered_set_action, | 447 RememberedSetAction remembered_set_action, |
| 448 SmiCheck smi_check, | 448 SmiCheck smi_check, |
| 449 PointersToHereCheck pointers_to_here_check_for_value) { | 449 PointersToHereCheck pointers_to_here_check_for_value) { |
| 450 ASSERT(!object.is(value)); | 450 DCHECK(!object.is(value)); |
| 451 ASSERT(!object.is(address)); | 451 DCHECK(!object.is(address)); |
| 452 ASSERT(!value.is(address)); | 452 DCHECK(!value.is(address)); |
| 453 AssertNotSmi(object); | 453 AssertNotSmi(object); |
| 454 | 454 |
| 455 if (remembered_set_action == OMIT_REMEMBERED_SET && | 455 if (remembered_set_action == OMIT_REMEMBERED_SET && |
| 456 !FLAG_incremental_marking) { | 456 !FLAG_incremental_marking) { |
| 457 return; | 457 return; |
| 458 } | 458 } |
| 459 | 459 |
| 460 if (emit_debug_code()) { | 460 if (emit_debug_code()) { |
| 461 Label ok; | 461 Label ok; |
| 462 cmpp(value, Operand(address, 0)); | 462 cmpp(value, Operand(address, 0)); |
| (...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 538 Abort(reason); | 538 Abort(reason); |
| 539 // Control will not return here. | 539 // Control will not return here. |
| 540 bind(&L); | 540 bind(&L); |
| 541 } | 541 } |
| 542 | 542 |
| 543 | 543 |
| 544 void MacroAssembler::CheckStackAlignment() { | 544 void MacroAssembler::CheckStackAlignment() { |
| 545 int frame_alignment = base::OS::ActivationFrameAlignment(); | 545 int frame_alignment = base::OS::ActivationFrameAlignment(); |
| 546 int frame_alignment_mask = frame_alignment - 1; | 546 int frame_alignment_mask = frame_alignment - 1; |
| 547 if (frame_alignment > kPointerSize) { | 547 if (frame_alignment > kPointerSize) { |
| 548 ASSERT(IsPowerOf2(frame_alignment)); | 548 DCHECK(IsPowerOf2(frame_alignment)); |
| 549 Label alignment_as_expected; | 549 Label alignment_as_expected; |
| 550 testp(rsp, Immediate(frame_alignment_mask)); | 550 testp(rsp, Immediate(frame_alignment_mask)); |
| 551 j(zero, &alignment_as_expected, Label::kNear); | 551 j(zero, &alignment_as_expected, Label::kNear); |
| 552 // Abort if stack is not aligned. | 552 // Abort if stack is not aligned. |
| 553 int3(); | 553 int3(); |
| 554 bind(&alignment_as_expected); | 554 bind(&alignment_as_expected); |
| 555 } | 555 } |
| 556 } | 556 } |
| 557 | 557 |
| 558 | 558 |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 593 CallRuntime(Runtime::kAbort, 1); | 593 CallRuntime(Runtime::kAbort, 1); |
| 594 } else { | 594 } else { |
| 595 CallRuntime(Runtime::kAbort, 1); | 595 CallRuntime(Runtime::kAbort, 1); |
| 596 } | 596 } |
| 597 // Control will not return here. | 597 // Control will not return here. |
| 598 int3(); | 598 int3(); |
| 599 } | 599 } |
| 600 | 600 |
| 601 | 601 |
| 602 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) { | 602 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id) { |
| 603 ASSERT(AllowThisStubCall(stub)); // Calls are not allowed in some stubs | 603 DCHECK(AllowThisStubCall(stub)); // Calls are not allowed in some stubs |
| 604 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); | 604 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id); |
| 605 } | 605 } |
| 606 | 606 |
| 607 | 607 |
| 608 void MacroAssembler::TailCallStub(CodeStub* stub) { | 608 void MacroAssembler::TailCallStub(CodeStub* stub) { |
| 609 Jump(stub->GetCode(), RelocInfo::CODE_TARGET); | 609 Jump(stub->GetCode(), RelocInfo::CODE_TARGET); |
| 610 } | 610 } |
| 611 | 611 |
| 612 | 612 |
| 613 void MacroAssembler::StubReturn(int argc) { | 613 void MacroAssembler::StubReturn(int argc) { |
| 614 ASSERT(argc >= 1 && generating_stub()); | 614 DCHECK(argc >= 1 && generating_stub()); |
| 615 ret((argc - 1) * kPointerSize); | 615 ret((argc - 1) * kPointerSize); |
| 616 } | 616 } |
| 617 | 617 |
| 618 | 618 |
| 619 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { | 619 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { |
| 620 return has_frame_ || !stub->SometimesSetsUpAFrame(); | 620 return has_frame_ || !stub->SometimesSetsUpAFrame(); |
| 621 } | 621 } |
| 622 | 622 |
| 623 | 623 |
| 624 void MacroAssembler::IndexFromHash(Register hash, Register index) { | 624 void MacroAssembler::IndexFromHash(Register hash, Register index) { |
| 625 // The assert checks that the constants for the maximum number of digits | 625 // The assert checks that the constants for the maximum number of digits |
| 626 // for an array index cached in the hash field and the number of bits | 626 // for an array index cached in the hash field and the number of bits |
| 627 // reserved for it does not conflict. | 627 // reserved for it does not conflict. |
| 628 ASSERT(TenToThe(String::kMaxCachedArrayIndexLength) < | 628 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) < |
| 629 (1 << String::kArrayIndexValueBits)); | 629 (1 << String::kArrayIndexValueBits)); |
| 630 if (!hash.is(index)) { | 630 if (!hash.is(index)) { |
| 631 movl(index, hash); | 631 movl(index, hash); |
| 632 } | 632 } |
| 633 DecodeFieldToSmi<String::ArrayIndexValueBits>(index); | 633 DecodeFieldToSmi<String::ArrayIndexValueBits>(index); |
| 634 } | 634 } |
| 635 | 635 |
| 636 | 636 |
| 637 void MacroAssembler::CallRuntime(const Runtime::Function* f, | 637 void MacroAssembler::CallRuntime(const Runtime::Function* f, |
| 638 int num_arguments, | 638 int num_arguments, |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 687 int result_size) { | 687 int result_size) { |
| 688 TailCallExternalReference(ExternalReference(fid, isolate()), | 688 TailCallExternalReference(ExternalReference(fid, isolate()), |
| 689 num_arguments, | 689 num_arguments, |
| 690 result_size); | 690 result_size); |
| 691 } | 691 } |
| 692 | 692 |
| 693 | 693 |
| 694 static int Offset(ExternalReference ref0, ExternalReference ref1) { | 694 static int Offset(ExternalReference ref0, ExternalReference ref1) { |
| 695 int64_t offset = (ref0.address() - ref1.address()); | 695 int64_t offset = (ref0.address() - ref1.address()); |
| 696 // Check that fits into int. | 696 // Check that fits into int. |
| 697 ASSERT(static_cast<int>(offset) == offset); | 697 DCHECK(static_cast<int>(offset) == offset); |
| 698 return static_cast<int>(offset); | 698 return static_cast<int>(offset); |
| 699 } | 699 } |
| 700 | 700 |
| 701 | 701 |
| 702 void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) { | 702 void MacroAssembler::PrepareCallApiFunction(int arg_stack_space) { |
| 703 EnterApiExitFrame(arg_stack_space); | 703 EnterApiExitFrame(arg_stack_space); |
| 704 } | 704 } |
| 705 | 705 |
| 706 | 706 |
| 707 void MacroAssembler::CallApiFunctionAndReturn( | 707 void MacroAssembler::CallApiFunctionAndReturn( |
| (...skipping 16 matching lines...) Expand all Loading... |
| 724 const int kNextOffset = 0; | 724 const int kNextOffset = 0; |
| 725 const int kLimitOffset = Offset( | 725 const int kLimitOffset = Offset( |
| 726 ExternalReference::handle_scope_limit_address(isolate()), | 726 ExternalReference::handle_scope_limit_address(isolate()), |
| 727 next_address); | 727 next_address); |
| 728 const int kLevelOffset = Offset( | 728 const int kLevelOffset = Offset( |
| 729 ExternalReference::handle_scope_level_address(isolate()), | 729 ExternalReference::handle_scope_level_address(isolate()), |
| 730 next_address); | 730 next_address); |
| 731 ExternalReference scheduled_exception_address = | 731 ExternalReference scheduled_exception_address = |
| 732 ExternalReference::scheduled_exception_address(isolate()); | 732 ExternalReference::scheduled_exception_address(isolate()); |
| 733 | 733 |
| 734 ASSERT(rdx.is(function_address) || r8.is(function_address)); | 734 DCHECK(rdx.is(function_address) || r8.is(function_address)); |
| 735 // Allocate HandleScope in callee-save registers. | 735 // Allocate HandleScope in callee-save registers. |
| 736 Register prev_next_address_reg = r14; | 736 Register prev_next_address_reg = r14; |
| 737 Register prev_limit_reg = rbx; | 737 Register prev_limit_reg = rbx; |
| 738 Register base_reg = r15; | 738 Register base_reg = r15; |
| 739 Move(base_reg, next_address); | 739 Move(base_reg, next_address); |
| 740 movp(prev_next_address_reg, Operand(base_reg, kNextOffset)); | 740 movp(prev_next_address_reg, Operand(base_reg, kNextOffset)); |
| 741 movp(prev_limit_reg, Operand(base_reg, kLimitOffset)); | 741 movp(prev_limit_reg, Operand(base_reg, kLimitOffset)); |
| 742 addl(Operand(base_reg, kLevelOffset), Immediate(1)); | 742 addl(Operand(base_reg, kLevelOffset), Immediate(1)); |
| 743 | 743 |
| 744 if (FLAG_log_timer_events) { | 744 if (FLAG_log_timer_events) { |
| (...skipping 121 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 866 LoadAddress(rbx, ext); | 866 LoadAddress(rbx, ext); |
| 867 CEntryStub ces(isolate(), result_size); | 867 CEntryStub ces(isolate(), result_size); |
| 868 jmp(ces.GetCode(), RelocInfo::CODE_TARGET); | 868 jmp(ces.GetCode(), RelocInfo::CODE_TARGET); |
| 869 } | 869 } |
| 870 | 870 |
| 871 | 871 |
| 872 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, | 872 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, |
| 873 InvokeFlag flag, | 873 InvokeFlag flag, |
| 874 const CallWrapper& call_wrapper) { | 874 const CallWrapper& call_wrapper) { |
| 875 // You can't call a builtin without a valid frame. | 875 // You can't call a builtin without a valid frame. |
| 876 ASSERT(flag == JUMP_FUNCTION || has_frame()); | 876 DCHECK(flag == JUMP_FUNCTION || has_frame()); |
| 877 | 877 |
| 878 // Rely on the assertion to check that the number of provided | 878 // Rely on the assertion to check that the number of provided |
| 879 // arguments match the expected number of arguments. Fake a | 879 // arguments match the expected number of arguments. Fake a |
| 880 // parameter count to avoid emitting code to do the check. | 880 // parameter count to avoid emitting code to do the check. |
| 881 ParameterCount expected(0); | 881 ParameterCount expected(0); |
| 882 GetBuiltinEntry(rdx, id); | 882 GetBuiltinEntry(rdx, id); |
| 883 InvokeCode(rdx, expected, expected, flag, call_wrapper); | 883 InvokeCode(rdx, expected, expected, flag, call_wrapper); |
| 884 } | 884 } |
| 885 | 885 |
| 886 | 886 |
| 887 void MacroAssembler::GetBuiltinFunction(Register target, | 887 void MacroAssembler::GetBuiltinFunction(Register target, |
| 888 Builtins::JavaScript id) { | 888 Builtins::JavaScript id) { |
| 889 // Load the builtins object into target register. | 889 // Load the builtins object into target register. |
| 890 movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); | 890 movp(target, Operand(rsi, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); |
| 891 movp(target, FieldOperand(target, GlobalObject::kBuiltinsOffset)); | 891 movp(target, FieldOperand(target, GlobalObject::kBuiltinsOffset)); |
| 892 movp(target, FieldOperand(target, | 892 movp(target, FieldOperand(target, |
| 893 JSBuiltinsObject::OffsetOfFunctionWithId(id))); | 893 JSBuiltinsObject::OffsetOfFunctionWithId(id))); |
| 894 } | 894 } |
| 895 | 895 |
| 896 | 896 |
| 897 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { | 897 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { |
| 898 ASSERT(!target.is(rdi)); | 898 DCHECK(!target.is(rdi)); |
| 899 // Load the JavaScript builtin function from the builtins object. | 899 // Load the JavaScript builtin function from the builtins object. |
| 900 GetBuiltinFunction(rdi, id); | 900 GetBuiltinFunction(rdi, id); |
| 901 movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); | 901 movp(target, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); |
| 902 } | 902 } |
| 903 | 903 |
| 904 | 904 |
| 905 #define REG(Name) { kRegister_ ## Name ## _Code } | 905 #define REG(Name) { kRegister_ ## Name ## _Code } |
| 906 | 906 |
| 907 static const Register saved_regs[] = { | 907 static const Register saved_regs[] = { |
| 908 REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8), | 908 REG(rax), REG(rcx), REG(rdx), REG(rbx), REG(rbp), REG(rsi), REG(rdi), REG(r8), |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 964 } | 964 } |
| 965 | 965 |
| 966 | 966 |
| 967 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) { | 967 void MacroAssembler::Cvtlsi2sd(XMMRegister dst, const Operand& src) { |
| 968 xorps(dst, dst); | 968 xorps(dst, dst); |
| 969 cvtlsi2sd(dst, src); | 969 cvtlsi2sd(dst, src); |
| 970 } | 970 } |
| 971 | 971 |
| 972 | 972 |
| 973 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) { | 973 void MacroAssembler::Load(Register dst, const Operand& src, Representation r) { |
| 974 ASSERT(!r.IsDouble()); | 974 DCHECK(!r.IsDouble()); |
| 975 if (r.IsInteger8()) { | 975 if (r.IsInteger8()) { |
| 976 movsxbq(dst, src); | 976 movsxbq(dst, src); |
| 977 } else if (r.IsUInteger8()) { | 977 } else if (r.IsUInteger8()) { |
| 978 movzxbl(dst, src); | 978 movzxbl(dst, src); |
| 979 } else if (r.IsInteger16()) { | 979 } else if (r.IsInteger16()) { |
| 980 movsxwq(dst, src); | 980 movsxwq(dst, src); |
| 981 } else if (r.IsUInteger16()) { | 981 } else if (r.IsUInteger16()) { |
| 982 movzxwl(dst, src); | 982 movzxwl(dst, src); |
| 983 } else if (r.IsInteger32()) { | 983 } else if (r.IsInteger32()) { |
| 984 movl(dst, src); | 984 movl(dst, src); |
| 985 } else { | 985 } else { |
| 986 movp(dst, src); | 986 movp(dst, src); |
| 987 } | 987 } |
| 988 } | 988 } |
| 989 | 989 |
| 990 | 990 |
| 991 void MacroAssembler::Store(const Operand& dst, Register src, Representation r) { | 991 void MacroAssembler::Store(const Operand& dst, Register src, Representation r) { |
| 992 ASSERT(!r.IsDouble()); | 992 DCHECK(!r.IsDouble()); |
| 993 if (r.IsInteger8() || r.IsUInteger8()) { | 993 if (r.IsInteger8() || r.IsUInteger8()) { |
| 994 movb(dst, src); | 994 movb(dst, src); |
| 995 } else if (r.IsInteger16() || r.IsUInteger16()) { | 995 } else if (r.IsInteger16() || r.IsUInteger16()) { |
| 996 movw(dst, src); | 996 movw(dst, src); |
| 997 } else if (r.IsInteger32()) { | 997 } else if (r.IsInteger32()) { |
| 998 movl(dst, src); | 998 movl(dst, src); |
| 999 } else { | 999 } else { |
| 1000 if (r.IsHeapObject()) { | 1000 if (r.IsHeapObject()) { |
| 1001 AssertNotSmi(src); | 1001 AssertNotSmi(src); |
| 1002 } else if (r.IsSmi()) { | 1002 } else if (r.IsSmi()) { |
| (...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1037 // ---------------------------------------------------------------------------- | 1037 // ---------------------------------------------------------------------------- |
| 1038 // Smi tagging, untagging and tag detection. | 1038 // Smi tagging, untagging and tag detection. |
| 1039 | 1039 |
| 1040 bool MacroAssembler::IsUnsafeInt(const int32_t x) { | 1040 bool MacroAssembler::IsUnsafeInt(const int32_t x) { |
| 1041 static const int kMaxBits = 17; | 1041 static const int kMaxBits = 17; |
| 1042 return !is_intn(x, kMaxBits); | 1042 return !is_intn(x, kMaxBits); |
| 1043 } | 1043 } |
| 1044 | 1044 |
| 1045 | 1045 |
| 1046 void MacroAssembler::SafeMove(Register dst, Smi* src) { | 1046 void MacroAssembler::SafeMove(Register dst, Smi* src) { |
| 1047 ASSERT(!dst.is(kScratchRegister)); | 1047 DCHECK(!dst.is(kScratchRegister)); |
| 1048 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { | 1048 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { |
| 1049 if (SmiValuesAre32Bits()) { | 1049 if (SmiValuesAre32Bits()) { |
| 1050 // JIT cookie can be converted to Smi. | 1050 // JIT cookie can be converted to Smi. |
| 1051 Move(dst, Smi::FromInt(src->value() ^ jit_cookie())); | 1051 Move(dst, Smi::FromInt(src->value() ^ jit_cookie())); |
| 1052 Move(kScratchRegister, Smi::FromInt(jit_cookie())); | 1052 Move(kScratchRegister, Smi::FromInt(jit_cookie())); |
| 1053 xorp(dst, kScratchRegister); | 1053 xorp(dst, kScratchRegister); |
| 1054 } else { | 1054 } else { |
| 1055 ASSERT(SmiValuesAre31Bits()); | 1055 DCHECK(SmiValuesAre31Bits()); |
| 1056 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src)); | 1056 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src)); |
| 1057 movp(dst, Immediate(value ^ jit_cookie())); | 1057 movp(dst, Immediate(value ^ jit_cookie())); |
| 1058 xorp(dst, Immediate(jit_cookie())); | 1058 xorp(dst, Immediate(jit_cookie())); |
| 1059 } | 1059 } |
| 1060 } else { | 1060 } else { |
| 1061 Move(dst, src); | 1061 Move(dst, src); |
| 1062 } | 1062 } |
| 1063 } | 1063 } |
| 1064 | 1064 |
| 1065 | 1065 |
| 1066 void MacroAssembler::SafePush(Smi* src) { | 1066 void MacroAssembler::SafePush(Smi* src) { |
| 1067 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { | 1067 if (IsUnsafeInt(src->value()) && jit_cookie() != 0) { |
| 1068 if (SmiValuesAre32Bits()) { | 1068 if (SmiValuesAre32Bits()) { |
| 1069 // JIT cookie can be converted to Smi. | 1069 // JIT cookie can be converted to Smi. |
| 1070 Push(Smi::FromInt(src->value() ^ jit_cookie())); | 1070 Push(Smi::FromInt(src->value() ^ jit_cookie())); |
| 1071 Move(kScratchRegister, Smi::FromInt(jit_cookie())); | 1071 Move(kScratchRegister, Smi::FromInt(jit_cookie())); |
| 1072 xorp(Operand(rsp, 0), kScratchRegister); | 1072 xorp(Operand(rsp, 0), kScratchRegister); |
| 1073 } else { | 1073 } else { |
| 1074 ASSERT(SmiValuesAre31Bits()); | 1074 DCHECK(SmiValuesAre31Bits()); |
| 1075 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src)); | 1075 int32_t value = static_cast<int32_t>(reinterpret_cast<intptr_t>(src)); |
| 1076 Push(Immediate(value ^ jit_cookie())); | 1076 Push(Immediate(value ^ jit_cookie())); |
| 1077 xorp(Operand(rsp, 0), Immediate(jit_cookie())); | 1077 xorp(Operand(rsp, 0), Immediate(jit_cookie())); |
| 1078 } | 1078 } |
| 1079 } else { | 1079 } else { |
| 1080 Push(src); | 1080 Push(src); |
| 1081 } | 1081 } |
| 1082 } | 1082 } |
| 1083 | 1083 |
| 1084 | 1084 |
| (...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1164 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) { | 1164 void MacroAssembler::Integer32ToSmiField(const Operand& dst, Register src) { |
| 1165 if (emit_debug_code()) { | 1165 if (emit_debug_code()) { |
| 1166 testb(dst, Immediate(0x01)); | 1166 testb(dst, Immediate(0x01)); |
| 1167 Label ok; | 1167 Label ok; |
| 1168 j(zero, &ok, Label::kNear); | 1168 j(zero, &ok, Label::kNear); |
| 1169 Abort(kInteger32ToSmiFieldWritingToNonSmiLocation); | 1169 Abort(kInteger32ToSmiFieldWritingToNonSmiLocation); |
| 1170 bind(&ok); | 1170 bind(&ok); |
| 1171 } | 1171 } |
| 1172 | 1172 |
| 1173 if (SmiValuesAre32Bits()) { | 1173 if (SmiValuesAre32Bits()) { |
| 1174 ASSERT(kSmiShift % kBitsPerByte == 0); | 1174 DCHECK(kSmiShift % kBitsPerByte == 0); |
| 1175 movl(Operand(dst, kSmiShift / kBitsPerByte), src); | 1175 movl(Operand(dst, kSmiShift / kBitsPerByte), src); |
| 1176 } else { | 1176 } else { |
| 1177 ASSERT(SmiValuesAre31Bits()); | 1177 DCHECK(SmiValuesAre31Bits()); |
| 1178 Integer32ToSmi(kScratchRegister, src); | 1178 Integer32ToSmi(kScratchRegister, src); |
| 1179 movp(dst, kScratchRegister); | 1179 movp(dst, kScratchRegister); |
| 1180 } | 1180 } |
| 1181 } | 1181 } |
| 1182 | 1182 |
| 1183 | 1183 |
| 1184 void MacroAssembler::Integer64PlusConstantToSmi(Register dst, | 1184 void MacroAssembler::Integer64PlusConstantToSmi(Register dst, |
| 1185 Register src, | 1185 Register src, |
| 1186 int constant) { | 1186 int constant) { |
| 1187 if (dst.is(src)) { | 1187 if (dst.is(src)) { |
| 1188 addl(dst, Immediate(constant)); | 1188 addl(dst, Immediate(constant)); |
| 1189 } else { | 1189 } else { |
| 1190 leal(dst, Operand(src, constant)); | 1190 leal(dst, Operand(src, constant)); |
| 1191 } | 1191 } |
| 1192 shlp(dst, Immediate(kSmiShift)); | 1192 shlp(dst, Immediate(kSmiShift)); |
| 1193 } | 1193 } |
| 1194 | 1194 |
| 1195 | 1195 |
| 1196 void MacroAssembler::SmiToInteger32(Register dst, Register src) { | 1196 void MacroAssembler::SmiToInteger32(Register dst, Register src) { |
| 1197 STATIC_ASSERT(kSmiTag == 0); | 1197 STATIC_ASSERT(kSmiTag == 0); |
| 1198 if (!dst.is(src)) { | 1198 if (!dst.is(src)) { |
| 1199 movp(dst, src); | 1199 movp(dst, src); |
| 1200 } | 1200 } |
| 1201 | 1201 |
| 1202 if (SmiValuesAre32Bits()) { | 1202 if (SmiValuesAre32Bits()) { |
| 1203 shrp(dst, Immediate(kSmiShift)); | 1203 shrp(dst, Immediate(kSmiShift)); |
| 1204 } else { | 1204 } else { |
| 1205 ASSERT(SmiValuesAre31Bits()); | 1205 DCHECK(SmiValuesAre31Bits()); |
| 1206 sarl(dst, Immediate(kSmiShift)); | 1206 sarl(dst, Immediate(kSmiShift)); |
| 1207 } | 1207 } |
| 1208 } | 1208 } |
| 1209 | 1209 |
| 1210 | 1210 |
| 1211 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) { | 1211 void MacroAssembler::SmiToInteger32(Register dst, const Operand& src) { |
| 1212 if (SmiValuesAre32Bits()) { | 1212 if (SmiValuesAre32Bits()) { |
| 1213 movl(dst, Operand(src, kSmiShift / kBitsPerByte)); | 1213 movl(dst, Operand(src, kSmiShift / kBitsPerByte)); |
| 1214 } else { | 1214 } else { |
| 1215 ASSERT(SmiValuesAre31Bits()); | 1215 DCHECK(SmiValuesAre31Bits()); |
| 1216 movl(dst, src); | 1216 movl(dst, src); |
| 1217 sarl(dst, Immediate(kSmiShift)); | 1217 sarl(dst, Immediate(kSmiShift)); |
| 1218 } | 1218 } |
| 1219 } | 1219 } |
| 1220 | 1220 |
| 1221 | 1221 |
| 1222 void MacroAssembler::SmiToInteger64(Register dst, Register src) { | 1222 void MacroAssembler::SmiToInteger64(Register dst, Register src) { |
| 1223 STATIC_ASSERT(kSmiTag == 0); | 1223 STATIC_ASSERT(kSmiTag == 0); |
| 1224 if (!dst.is(src)) { | 1224 if (!dst.is(src)) { |
| 1225 movp(dst, src); | 1225 movp(dst, src); |
| 1226 } | 1226 } |
| 1227 sarp(dst, Immediate(kSmiShift)); | 1227 sarp(dst, Immediate(kSmiShift)); |
| 1228 if (kPointerSize == kInt32Size) { | 1228 if (kPointerSize == kInt32Size) { |
| 1229 // Sign extend to 64-bit. | 1229 // Sign extend to 64-bit. |
| 1230 movsxlq(dst, dst); | 1230 movsxlq(dst, dst); |
| 1231 } | 1231 } |
| 1232 } | 1232 } |
| 1233 | 1233 |
| 1234 | 1234 |
| 1235 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) { | 1235 void MacroAssembler::SmiToInteger64(Register dst, const Operand& src) { |
| 1236 if (SmiValuesAre32Bits()) { | 1236 if (SmiValuesAre32Bits()) { |
| 1237 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte)); | 1237 movsxlq(dst, Operand(src, kSmiShift / kBitsPerByte)); |
| 1238 } else { | 1238 } else { |
| 1239 ASSERT(SmiValuesAre31Bits()); | 1239 DCHECK(SmiValuesAre31Bits()); |
| 1240 movp(dst, src); | 1240 movp(dst, src); |
| 1241 SmiToInteger64(dst, dst); | 1241 SmiToInteger64(dst, dst); |
| 1242 } | 1242 } |
| 1243 } | 1243 } |
| 1244 | 1244 |
| 1245 | 1245 |
| 1246 void MacroAssembler::SmiTest(Register src) { | 1246 void MacroAssembler::SmiTest(Register src) { |
| 1247 AssertSmi(src); | 1247 AssertSmi(src); |
| 1248 testp(src, src); | 1248 testp(src, src); |
| 1249 } | 1249 } |
| 1250 | 1250 |
| 1251 | 1251 |
| 1252 void MacroAssembler::SmiCompare(Register smi1, Register smi2) { | 1252 void MacroAssembler::SmiCompare(Register smi1, Register smi2) { |
| 1253 AssertSmi(smi1); | 1253 AssertSmi(smi1); |
| 1254 AssertSmi(smi2); | 1254 AssertSmi(smi2); |
| 1255 cmpp(smi1, smi2); | 1255 cmpp(smi1, smi2); |
| 1256 } | 1256 } |
| 1257 | 1257 |
| 1258 | 1258 |
| 1259 void MacroAssembler::SmiCompare(Register dst, Smi* src) { | 1259 void MacroAssembler::SmiCompare(Register dst, Smi* src) { |
| 1260 AssertSmi(dst); | 1260 AssertSmi(dst); |
| 1261 Cmp(dst, src); | 1261 Cmp(dst, src); |
| 1262 } | 1262 } |
| 1263 | 1263 |
| 1264 | 1264 |
| 1265 void MacroAssembler::Cmp(Register dst, Smi* src) { | 1265 void MacroAssembler::Cmp(Register dst, Smi* src) { |
| 1266 ASSERT(!dst.is(kScratchRegister)); | 1266 DCHECK(!dst.is(kScratchRegister)); |
| 1267 if (src->value() == 0) { | 1267 if (src->value() == 0) { |
| 1268 testp(dst, dst); | 1268 testp(dst, dst); |
| 1269 } else { | 1269 } else { |
| 1270 Register constant_reg = GetSmiConstant(src); | 1270 Register constant_reg = GetSmiConstant(src); |
| 1271 cmpp(dst, constant_reg); | 1271 cmpp(dst, constant_reg); |
| 1272 } | 1272 } |
| 1273 } | 1273 } |
| 1274 | 1274 |
| 1275 | 1275 |
| 1276 void MacroAssembler::SmiCompare(Register dst, const Operand& src) { | 1276 void MacroAssembler::SmiCompare(Register dst, const Operand& src) { |
| 1277 AssertSmi(dst); | 1277 AssertSmi(dst); |
| 1278 AssertSmi(src); | 1278 AssertSmi(src); |
| 1279 cmpp(dst, src); | 1279 cmpp(dst, src); |
| 1280 } | 1280 } |
| 1281 | 1281 |
| 1282 | 1282 |
| 1283 void MacroAssembler::SmiCompare(const Operand& dst, Register src) { | 1283 void MacroAssembler::SmiCompare(const Operand& dst, Register src) { |
| 1284 AssertSmi(dst); | 1284 AssertSmi(dst); |
| 1285 AssertSmi(src); | 1285 AssertSmi(src); |
| 1286 cmpp(dst, src); | 1286 cmpp(dst, src); |
| 1287 } | 1287 } |
| 1288 | 1288 |
| 1289 | 1289 |
| 1290 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) { | 1290 void MacroAssembler::SmiCompare(const Operand& dst, Smi* src) { |
| 1291 AssertSmi(dst); | 1291 AssertSmi(dst); |
| 1292 if (SmiValuesAre32Bits()) { | 1292 if (SmiValuesAre32Bits()) { |
| 1293 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value())); | 1293 cmpl(Operand(dst, kSmiShift / kBitsPerByte), Immediate(src->value())); |
| 1294 } else { | 1294 } else { |
| 1295 ASSERT(SmiValuesAre31Bits()); | 1295 DCHECK(SmiValuesAre31Bits()); |
| 1296 cmpl(dst, Immediate(src)); | 1296 cmpl(dst, Immediate(src)); |
| 1297 } | 1297 } |
| 1298 } | 1298 } |
| 1299 | 1299 |
| 1300 | 1300 |
| 1301 void MacroAssembler::Cmp(const Operand& dst, Smi* src) { | 1301 void MacroAssembler::Cmp(const Operand& dst, Smi* src) { |
| 1302 // The Operand cannot use the smi register. | 1302 // The Operand cannot use the smi register. |
| 1303 Register smi_reg = GetSmiConstant(src); | 1303 Register smi_reg = GetSmiConstant(src); |
| 1304 ASSERT(!dst.AddressUsesRegister(smi_reg)); | 1304 DCHECK(!dst.AddressUsesRegister(smi_reg)); |
| 1305 cmpp(dst, smi_reg); | 1305 cmpp(dst, smi_reg); |
| 1306 } | 1306 } |
| 1307 | 1307 |
| 1308 | 1308 |
| 1309 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) { | 1309 void MacroAssembler::SmiCompareInteger32(const Operand& dst, Register src) { |
| 1310 if (SmiValuesAre32Bits()) { | 1310 if (SmiValuesAre32Bits()) { |
| 1311 cmpl(Operand(dst, kSmiShift / kBitsPerByte), src); | 1311 cmpl(Operand(dst, kSmiShift / kBitsPerByte), src); |
| 1312 } else { | 1312 } else { |
| 1313 ASSERT(SmiValuesAre31Bits()); | 1313 DCHECK(SmiValuesAre31Bits()); |
| 1314 SmiToInteger32(kScratchRegister, dst); | 1314 SmiToInteger32(kScratchRegister, dst); |
| 1315 cmpl(kScratchRegister, src); | 1315 cmpl(kScratchRegister, src); |
| 1316 } | 1316 } |
| 1317 } | 1317 } |
| 1318 | 1318 |
| 1319 | 1319 |
| 1320 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst, | 1320 void MacroAssembler::PositiveSmiTimesPowerOfTwoToInteger64(Register dst, |
| 1321 Register src, | 1321 Register src, |
| 1322 int power) { | 1322 int power) { |
| 1323 ASSERT(power >= 0); | 1323 DCHECK(power >= 0); |
| 1324 ASSERT(power < 64); | 1324 DCHECK(power < 64); |
| 1325 if (power == 0) { | 1325 if (power == 0) { |
| 1326 SmiToInteger64(dst, src); | 1326 SmiToInteger64(dst, src); |
| 1327 return; | 1327 return; |
| 1328 } | 1328 } |
| 1329 if (!dst.is(src)) { | 1329 if (!dst.is(src)) { |
| 1330 movp(dst, src); | 1330 movp(dst, src); |
| 1331 } | 1331 } |
| 1332 if (power < kSmiShift) { | 1332 if (power < kSmiShift) { |
| 1333 sarp(dst, Immediate(kSmiShift - power)); | 1333 sarp(dst, Immediate(kSmiShift - power)); |
| 1334 } else if (power > kSmiShift) { | 1334 } else if (power > kSmiShift) { |
| 1335 shlp(dst, Immediate(power - kSmiShift)); | 1335 shlp(dst, Immediate(power - kSmiShift)); |
| 1336 } | 1336 } |
| 1337 } | 1337 } |
| 1338 | 1338 |
| 1339 | 1339 |
| 1340 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst, | 1340 void MacroAssembler::PositiveSmiDivPowerOfTwoToInteger32(Register dst, |
| 1341 Register src, | 1341 Register src, |
| 1342 int power) { | 1342 int power) { |
| 1343 ASSERT((0 <= power) && (power < 32)); | 1343 DCHECK((0 <= power) && (power < 32)); |
| 1344 if (dst.is(src)) { | 1344 if (dst.is(src)) { |
| 1345 shrp(dst, Immediate(power + kSmiShift)); | 1345 shrp(dst, Immediate(power + kSmiShift)); |
| 1346 } else { | 1346 } else { |
| 1347 UNIMPLEMENTED(); // Not used. | 1347 UNIMPLEMENTED(); // Not used. |
| 1348 } | 1348 } |
| 1349 } | 1349 } |
| 1350 | 1350 |
| 1351 | 1351 |
| 1352 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2, | 1352 void MacroAssembler::SmiOrIfSmis(Register dst, Register src1, Register src2, |
| 1353 Label* on_not_smis, | 1353 Label* on_not_smis, |
| 1354 Label::Distance near_jump) { | 1354 Label::Distance near_jump) { |
| 1355 if (dst.is(src1) || dst.is(src2)) { | 1355 if (dst.is(src1) || dst.is(src2)) { |
| 1356 ASSERT(!src1.is(kScratchRegister)); | 1356 DCHECK(!src1.is(kScratchRegister)); |
| 1357 ASSERT(!src2.is(kScratchRegister)); | 1357 DCHECK(!src2.is(kScratchRegister)); |
| 1358 movp(kScratchRegister, src1); | 1358 movp(kScratchRegister, src1); |
| 1359 orp(kScratchRegister, src2); | 1359 orp(kScratchRegister, src2); |
| 1360 JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump); | 1360 JumpIfNotSmi(kScratchRegister, on_not_smis, near_jump); |
| 1361 movp(dst, kScratchRegister); | 1361 movp(dst, kScratchRegister); |
| 1362 } else { | 1362 } else { |
| 1363 movp(dst, src1); | 1363 movp(dst, src1); |
| 1364 orp(dst, src2); | 1364 orp(dst, src2); |
| 1365 JumpIfNotSmi(dst, on_not_smis, near_jump); | 1365 JumpIfNotSmi(dst, on_not_smis, near_jump); |
| 1366 } | 1366 } |
| 1367 } | 1367 } |
| (...skipping 25 matching lines...) Expand all Loading... |
| 1393 | 1393 |
| 1394 Condition MacroAssembler::CheckBothSmi(Register first, Register second) { | 1394 Condition MacroAssembler::CheckBothSmi(Register first, Register second) { |
| 1395 if (first.is(second)) { | 1395 if (first.is(second)) { |
| 1396 return CheckSmi(first); | 1396 return CheckSmi(first); |
| 1397 } | 1397 } |
| 1398 STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3); | 1398 STATIC_ASSERT(kSmiTag == 0 && kHeapObjectTag == 1 && kHeapObjectTagMask == 3); |
| 1399 if (SmiValuesAre32Bits()) { | 1399 if (SmiValuesAre32Bits()) { |
| 1400 leal(kScratchRegister, Operand(first, second, times_1, 0)); | 1400 leal(kScratchRegister, Operand(first, second, times_1, 0)); |
| 1401 testb(kScratchRegister, Immediate(0x03)); | 1401 testb(kScratchRegister, Immediate(0x03)); |
| 1402 } else { | 1402 } else { |
| 1403 ASSERT(SmiValuesAre31Bits()); | 1403 DCHECK(SmiValuesAre31Bits()); |
| 1404 movl(kScratchRegister, first); | 1404 movl(kScratchRegister, first); |
| 1405 orl(kScratchRegister, second); | 1405 orl(kScratchRegister, second); |
| 1406 testb(kScratchRegister, Immediate(kSmiTagMask)); | 1406 testb(kScratchRegister, Immediate(kSmiTagMask)); |
| 1407 } | 1407 } |
| 1408 return zero; | 1408 return zero; |
| 1409 } | 1409 } |
| 1410 | 1410 |
| 1411 | 1411 |
| 1412 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first, | 1412 Condition MacroAssembler::CheckBothNonNegativeSmi(Register first, |
| 1413 Register second) { | 1413 Register second) { |
| (...skipping 21 matching lines...) Expand all Loading... |
| 1435 movl(scratch, first); | 1435 movl(scratch, first); |
| 1436 } | 1436 } |
| 1437 andl(scratch, second); | 1437 andl(scratch, second); |
| 1438 } | 1438 } |
| 1439 testb(scratch, Immediate(kSmiTagMask)); | 1439 testb(scratch, Immediate(kSmiTagMask)); |
| 1440 return zero; | 1440 return zero; |
| 1441 } | 1441 } |
| 1442 | 1442 |
| 1443 | 1443 |
| 1444 Condition MacroAssembler::CheckIsMinSmi(Register src) { | 1444 Condition MacroAssembler::CheckIsMinSmi(Register src) { |
| 1445 ASSERT(!src.is(kScratchRegister)); | 1445 DCHECK(!src.is(kScratchRegister)); |
| 1446 // If we overflow by subtracting one, it's the minimal smi value. | 1446 // If we overflow by subtracting one, it's the minimal smi value. |
| 1447 cmpp(src, kSmiConstantRegister); | 1447 cmpp(src, kSmiConstantRegister); |
| 1448 return overflow; | 1448 return overflow; |
| 1449 } | 1449 } |
| 1450 | 1450 |
| 1451 | 1451 |
| 1452 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) { | 1452 Condition MacroAssembler::CheckInteger32ValidSmiValue(Register src) { |
| 1453 if (SmiValuesAre32Bits()) { | 1453 if (SmiValuesAre32Bits()) { |
| 1454 // A 32-bit integer value can always be converted to a smi. | 1454 // A 32-bit integer value can always be converted to a smi. |
| 1455 return always; | 1455 return always; |
| 1456 } else { | 1456 } else { |
| 1457 ASSERT(SmiValuesAre31Bits()); | 1457 DCHECK(SmiValuesAre31Bits()); |
| 1458 cmpl(src, Immediate(0xc0000000)); | 1458 cmpl(src, Immediate(0xc0000000)); |
| 1459 return positive; | 1459 return positive; |
| 1460 } | 1460 } |
| 1461 } | 1461 } |
| 1462 | 1462 |
| 1463 | 1463 |
| 1464 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) { | 1464 Condition MacroAssembler::CheckUInteger32ValidSmiValue(Register src) { |
| 1465 if (SmiValuesAre32Bits()) { | 1465 if (SmiValuesAre32Bits()) { |
| 1466 // An unsigned 32-bit integer value is valid as long as the high bit | 1466 // An unsigned 32-bit integer value is valid as long as the high bit |
| 1467 // is not set. | 1467 // is not set. |
| 1468 testl(src, src); | 1468 testl(src, src); |
| 1469 return positive; | 1469 return positive; |
| 1470 } else { | 1470 } else { |
| 1471 ASSERT(SmiValuesAre31Bits()); | 1471 DCHECK(SmiValuesAre31Bits()); |
| 1472 testl(src, Immediate(0xc0000000)); | 1472 testl(src, Immediate(0xc0000000)); |
| 1473 return zero; | 1473 return zero; |
| 1474 } | 1474 } |
| 1475 } | 1475 } |
| 1476 | 1476 |
| 1477 | 1477 |
| 1478 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) { | 1478 void MacroAssembler::CheckSmiToIndicator(Register dst, Register src) { |
| 1479 if (dst.is(src)) { | 1479 if (dst.is(src)) { |
| 1480 andl(dst, Immediate(kSmiTagMask)); | 1480 andl(dst, Immediate(kSmiTagMask)); |
| 1481 } else { | 1481 } else { |
| (...skipping 97 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1579 } | 1579 } |
| 1580 | 1580 |
| 1581 | 1581 |
| 1582 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { | 1582 void MacroAssembler::SmiAddConstant(Register dst, Register src, Smi* constant) { |
| 1583 if (constant->value() == 0) { | 1583 if (constant->value() == 0) { |
| 1584 if (!dst.is(src)) { | 1584 if (!dst.is(src)) { |
| 1585 movp(dst, src); | 1585 movp(dst, src); |
| 1586 } | 1586 } |
| 1587 return; | 1587 return; |
| 1588 } else if (dst.is(src)) { | 1588 } else if (dst.is(src)) { |
| 1589 ASSERT(!dst.is(kScratchRegister)); | 1589 DCHECK(!dst.is(kScratchRegister)); |
| 1590 switch (constant->value()) { | 1590 switch (constant->value()) { |
| 1591 case 1: | 1591 case 1: |
| 1592 addp(dst, kSmiConstantRegister); | 1592 addp(dst, kSmiConstantRegister); |
| 1593 return; | 1593 return; |
| 1594 case 2: | 1594 case 2: |
| 1595 leap(dst, Operand(src, kSmiConstantRegister, times_2, 0)); | 1595 leap(dst, Operand(src, kSmiConstantRegister, times_2, 0)); |
| 1596 return; | 1596 return; |
| 1597 case 4: | 1597 case 4: |
| 1598 leap(dst, Operand(src, kSmiConstantRegister, times_4, 0)); | 1598 leap(dst, Operand(src, kSmiConstantRegister, times_4, 0)); |
| 1599 return; | 1599 return; |
| (...skipping 27 matching lines...) Expand all Loading... |
| 1627 } | 1627 } |
| 1628 } | 1628 } |
| 1629 | 1629 |
| 1630 | 1630 |
| 1631 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) { | 1631 void MacroAssembler::SmiAddConstant(const Operand& dst, Smi* constant) { |
| 1632 if (constant->value() != 0) { | 1632 if (constant->value() != 0) { |
| 1633 if (SmiValuesAre32Bits()) { | 1633 if (SmiValuesAre32Bits()) { |
| 1634 addl(Operand(dst, kSmiShift / kBitsPerByte), | 1634 addl(Operand(dst, kSmiShift / kBitsPerByte), |
| 1635 Immediate(constant->value())); | 1635 Immediate(constant->value())); |
| 1636 } else { | 1636 } else { |
| 1637 ASSERT(SmiValuesAre31Bits()); | 1637 DCHECK(SmiValuesAre31Bits()); |
| 1638 addp(dst, Immediate(constant)); | 1638 addp(dst, Immediate(constant)); |
| 1639 } | 1639 } |
| 1640 } | 1640 } |
| 1641 } | 1641 } |
| 1642 | 1642 |
| 1643 | 1643 |
| 1644 void MacroAssembler::SmiAddConstant(Register dst, | 1644 void MacroAssembler::SmiAddConstant(Register dst, |
| 1645 Register src, | 1645 Register src, |
| 1646 Smi* constant, | 1646 Smi* constant, |
| 1647 SmiOperationExecutionMode mode, | 1647 SmiOperationExecutionMode mode, |
| 1648 Label* bailout_label, | 1648 Label* bailout_label, |
| 1649 Label::Distance near_jump) { | 1649 Label::Distance near_jump) { |
| 1650 if (constant->value() == 0) { | 1650 if (constant->value() == 0) { |
| 1651 if (!dst.is(src)) { | 1651 if (!dst.is(src)) { |
| 1652 movp(dst, src); | 1652 movp(dst, src); |
| 1653 } | 1653 } |
| 1654 } else if (dst.is(src)) { | 1654 } else if (dst.is(src)) { |
| 1655 ASSERT(!dst.is(kScratchRegister)); | 1655 DCHECK(!dst.is(kScratchRegister)); |
| 1656 LoadSmiConstant(kScratchRegister, constant); | 1656 LoadSmiConstant(kScratchRegister, constant); |
| 1657 addp(dst, kScratchRegister); | 1657 addp(dst, kScratchRegister); |
| 1658 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) { | 1658 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) { |
| 1659 j(no_overflow, bailout_label, near_jump); | 1659 j(no_overflow, bailout_label, near_jump); |
| 1660 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); | 1660 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER)); |
| 1661 subp(dst, kScratchRegister); | 1661 subp(dst, kScratchRegister); |
| 1662 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) { | 1662 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) { |
| 1663 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) { | 1663 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) { |
| 1664 Label done; | 1664 Label done; |
| 1665 j(no_overflow, &done, Label::kNear); | 1665 j(no_overflow, &done, Label::kNear); |
| 1666 subp(dst, kScratchRegister); | 1666 subp(dst, kScratchRegister); |
| 1667 jmp(bailout_label, near_jump); | 1667 jmp(bailout_label, near_jump); |
| 1668 bind(&done); | 1668 bind(&done); |
| 1669 } else { | 1669 } else { |
| 1670 // Bailout if overflow without reserving src. | 1670 // Bailout if overflow without reserving src. |
| 1671 j(overflow, bailout_label, near_jump); | 1671 j(overflow, bailout_label, near_jump); |
| 1672 } | 1672 } |
| 1673 } else { | 1673 } else { |
| 1674 CHECK(mode.IsEmpty()); | 1674 CHECK(mode.IsEmpty()); |
| 1675 } | 1675 } |
| 1676 } else { | 1676 } else { |
| 1677 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); | 1677 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER)); |
| 1678 ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW)); | 1678 DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW)); |
| 1679 LoadSmiConstant(dst, constant); | 1679 LoadSmiConstant(dst, constant); |
| 1680 addp(dst, src); | 1680 addp(dst, src); |
| 1681 j(overflow, bailout_label, near_jump); | 1681 j(overflow, bailout_label, near_jump); |
| 1682 } | 1682 } |
| 1683 } | 1683 } |
| 1684 | 1684 |
| 1685 | 1685 |
| 1686 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) { | 1686 void MacroAssembler::SmiSubConstant(Register dst, Register src, Smi* constant) { |
| 1687 if (constant->value() == 0) { | 1687 if (constant->value() == 0) { |
| 1688 if (!dst.is(src)) { | 1688 if (!dst.is(src)) { |
| 1689 movp(dst, src); | 1689 movp(dst, src); |
| 1690 } | 1690 } |
| 1691 } else if (dst.is(src)) { | 1691 } else if (dst.is(src)) { |
| 1692 ASSERT(!dst.is(kScratchRegister)); | 1692 DCHECK(!dst.is(kScratchRegister)); |
| 1693 Register constant_reg = GetSmiConstant(constant); | 1693 Register constant_reg = GetSmiConstant(constant); |
| 1694 subp(dst, constant_reg); | 1694 subp(dst, constant_reg); |
| 1695 } else { | 1695 } else { |
| 1696 if (constant->value() == Smi::kMinValue) { | 1696 if (constant->value() == Smi::kMinValue) { |
| 1697 LoadSmiConstant(dst, constant); | 1697 LoadSmiConstant(dst, constant); |
| 1698 // Adding and subtracting the min-value gives the same result, it only | 1698 // Adding and subtracting the min-value gives the same result, it only |
| 1699 // differs on the overflow bit, which we don't check here. | 1699 // differs on the overflow bit, which we don't check here. |
| 1700 addp(dst, src); | 1700 addp(dst, src); |
| 1701 } else { | 1701 } else { |
| 1702 // Subtract by adding the negation. | 1702 // Subtract by adding the negation. |
| 1703 LoadSmiConstant(dst, Smi::FromInt(-constant->value())); | 1703 LoadSmiConstant(dst, Smi::FromInt(-constant->value())); |
| 1704 addp(dst, src); | 1704 addp(dst, src); |
| 1705 } | 1705 } |
| 1706 } | 1706 } |
| 1707 } | 1707 } |
| 1708 | 1708 |
| 1709 | 1709 |
| 1710 void MacroAssembler::SmiSubConstant(Register dst, | 1710 void MacroAssembler::SmiSubConstant(Register dst, |
| 1711 Register src, | 1711 Register src, |
| 1712 Smi* constant, | 1712 Smi* constant, |
| 1713 SmiOperationExecutionMode mode, | 1713 SmiOperationExecutionMode mode, |
| 1714 Label* bailout_label, | 1714 Label* bailout_label, |
| 1715 Label::Distance near_jump) { | 1715 Label::Distance near_jump) { |
| 1716 if (constant->value() == 0) { | 1716 if (constant->value() == 0) { |
| 1717 if (!dst.is(src)) { | 1717 if (!dst.is(src)) { |
| 1718 movp(dst, src); | 1718 movp(dst, src); |
| 1719 } | 1719 } |
| 1720 } else if (dst.is(src)) { | 1720 } else if (dst.is(src)) { |
| 1721 ASSERT(!dst.is(kScratchRegister)); | 1721 DCHECK(!dst.is(kScratchRegister)); |
| 1722 LoadSmiConstant(kScratchRegister, constant); | 1722 LoadSmiConstant(kScratchRegister, constant); |
| 1723 subp(dst, kScratchRegister); | 1723 subp(dst, kScratchRegister); |
| 1724 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) { | 1724 if (mode.Contains(BAILOUT_ON_NO_OVERFLOW)) { |
| 1725 j(no_overflow, bailout_label, near_jump); | 1725 j(no_overflow, bailout_label, near_jump); |
| 1726 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); | 1726 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER)); |
| 1727 addp(dst, kScratchRegister); | 1727 addp(dst, kScratchRegister); |
| 1728 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) { | 1728 } else if (mode.Contains(BAILOUT_ON_OVERFLOW)) { |
| 1729 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) { | 1729 if (mode.Contains(PRESERVE_SOURCE_REGISTER)) { |
| 1730 Label done; | 1730 Label done; |
| 1731 j(no_overflow, &done, Label::kNear); | 1731 j(no_overflow, &done, Label::kNear); |
| 1732 addp(dst, kScratchRegister); | 1732 addp(dst, kScratchRegister); |
| 1733 jmp(bailout_label, near_jump); | 1733 jmp(bailout_label, near_jump); |
| 1734 bind(&done); | 1734 bind(&done); |
| 1735 } else { | 1735 } else { |
| 1736 // Bailout if overflow without reserving src. | 1736 // Bailout if overflow without reserving src. |
| 1737 j(overflow, bailout_label, near_jump); | 1737 j(overflow, bailout_label, near_jump); |
| 1738 } | 1738 } |
| 1739 } else { | 1739 } else { |
| 1740 CHECK(mode.IsEmpty()); | 1740 CHECK(mode.IsEmpty()); |
| 1741 } | 1741 } |
| 1742 } else { | 1742 } else { |
| 1743 ASSERT(mode.Contains(PRESERVE_SOURCE_REGISTER)); | 1743 DCHECK(mode.Contains(PRESERVE_SOURCE_REGISTER)); |
| 1744 ASSERT(mode.Contains(BAILOUT_ON_OVERFLOW)); | 1744 DCHECK(mode.Contains(BAILOUT_ON_OVERFLOW)); |
| 1745 if (constant->value() == Smi::kMinValue) { | 1745 if (constant->value() == Smi::kMinValue) { |
| 1746 ASSERT(!dst.is(kScratchRegister)); | 1746 DCHECK(!dst.is(kScratchRegister)); |
| 1747 movp(dst, src); | 1747 movp(dst, src); |
| 1748 LoadSmiConstant(kScratchRegister, constant); | 1748 LoadSmiConstant(kScratchRegister, constant); |
| 1749 subp(dst, kScratchRegister); | 1749 subp(dst, kScratchRegister); |
| 1750 j(overflow, bailout_label, near_jump); | 1750 j(overflow, bailout_label, near_jump); |
| 1751 } else { | 1751 } else { |
| 1752 // Subtract by adding the negation. | 1752 // Subtract by adding the negation. |
| 1753 LoadSmiConstant(dst, Smi::FromInt(-(constant->value()))); | 1753 LoadSmiConstant(dst, Smi::FromInt(-(constant->value()))); |
| 1754 addp(dst, src); | 1754 addp(dst, src); |
| 1755 j(overflow, bailout_label, near_jump); | 1755 j(overflow, bailout_label, near_jump); |
| 1756 } | 1756 } |
| 1757 } | 1757 } |
| 1758 } | 1758 } |
| 1759 | 1759 |
| 1760 | 1760 |
| 1761 void MacroAssembler::SmiNeg(Register dst, | 1761 void MacroAssembler::SmiNeg(Register dst, |
| 1762 Register src, | 1762 Register src, |
| 1763 Label* on_smi_result, | 1763 Label* on_smi_result, |
| 1764 Label::Distance near_jump) { | 1764 Label::Distance near_jump) { |
| 1765 if (dst.is(src)) { | 1765 if (dst.is(src)) { |
| 1766 ASSERT(!dst.is(kScratchRegister)); | 1766 DCHECK(!dst.is(kScratchRegister)); |
| 1767 movp(kScratchRegister, src); | 1767 movp(kScratchRegister, src); |
| 1768 negp(dst); // Low 32 bits are retained as zero by negation. | 1768 negp(dst); // Low 32 bits are retained as zero by negation. |
| 1769 // Test if result is zero or Smi::kMinValue. | 1769 // Test if result is zero or Smi::kMinValue. |
| 1770 cmpp(dst, kScratchRegister); | 1770 cmpp(dst, kScratchRegister); |
| 1771 j(not_equal, on_smi_result, near_jump); | 1771 j(not_equal, on_smi_result, near_jump); |
| 1772 movp(src, kScratchRegister); | 1772 movp(src, kScratchRegister); |
| 1773 } else { | 1773 } else { |
| 1774 movp(dst, src); | 1774 movp(dst, src); |
| 1775 negp(dst); | 1775 negp(dst); |
| 1776 cmpp(dst, src); | 1776 cmpp(dst, src); |
| (...skipping 24 matching lines...) Expand all Loading... |
| 1801 masm->j(overflow, on_not_smi_result, near_jump); | 1801 masm->j(overflow, on_not_smi_result, near_jump); |
| 1802 } | 1802 } |
| 1803 } | 1803 } |
| 1804 | 1804 |
| 1805 | 1805 |
| 1806 void MacroAssembler::SmiAdd(Register dst, | 1806 void MacroAssembler::SmiAdd(Register dst, |
| 1807 Register src1, | 1807 Register src1, |
| 1808 Register src2, | 1808 Register src2, |
| 1809 Label* on_not_smi_result, | 1809 Label* on_not_smi_result, |
| 1810 Label::Distance near_jump) { | 1810 Label::Distance near_jump) { |
| 1811 ASSERT_NOT_NULL(on_not_smi_result); | 1811 DCHECK_NOT_NULL(on_not_smi_result); |
| 1812 ASSERT(!dst.is(src2)); | 1812 DCHECK(!dst.is(src2)); |
| 1813 SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump); | 1813 SmiAddHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump); |
| 1814 } | 1814 } |
| 1815 | 1815 |
| 1816 | 1816 |
| 1817 void MacroAssembler::SmiAdd(Register dst, | 1817 void MacroAssembler::SmiAdd(Register dst, |
| 1818 Register src1, | 1818 Register src1, |
| 1819 const Operand& src2, | 1819 const Operand& src2, |
| 1820 Label* on_not_smi_result, | 1820 Label* on_not_smi_result, |
| 1821 Label::Distance near_jump) { | 1821 Label::Distance near_jump) { |
| 1822 ASSERT_NOT_NULL(on_not_smi_result); | 1822 DCHECK_NOT_NULL(on_not_smi_result); |
| 1823 ASSERT(!src2.AddressUsesRegister(dst)); | 1823 DCHECK(!src2.AddressUsesRegister(dst)); |
| 1824 SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump); | 1824 SmiAddHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump); |
| 1825 } | 1825 } |
| 1826 | 1826 |
| 1827 | 1827 |
| 1828 void MacroAssembler::SmiAdd(Register dst, | 1828 void MacroAssembler::SmiAdd(Register dst, |
| 1829 Register src1, | 1829 Register src1, |
| 1830 Register src2) { | 1830 Register src2) { |
| 1831 // No overflow checking. Use only when it's known that | 1831 // No overflow checking. Use only when it's known that |
| 1832 // overflowing is impossible. | 1832 // overflowing is impossible. |
| 1833 if (!dst.is(src1)) { | 1833 if (!dst.is(src1)) { |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1865 masm->j(overflow, on_not_smi_result, near_jump); | 1865 masm->j(overflow, on_not_smi_result, near_jump); |
| 1866 } | 1866 } |
| 1867 } | 1867 } |
| 1868 | 1868 |
| 1869 | 1869 |
| 1870 void MacroAssembler::SmiSub(Register dst, | 1870 void MacroAssembler::SmiSub(Register dst, |
| 1871 Register src1, | 1871 Register src1, |
| 1872 Register src2, | 1872 Register src2, |
| 1873 Label* on_not_smi_result, | 1873 Label* on_not_smi_result, |
| 1874 Label::Distance near_jump) { | 1874 Label::Distance near_jump) { |
| 1875 ASSERT_NOT_NULL(on_not_smi_result); | 1875 DCHECK_NOT_NULL(on_not_smi_result); |
| 1876 ASSERT(!dst.is(src2)); | 1876 DCHECK(!dst.is(src2)); |
| 1877 SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump); | 1877 SmiSubHelper<Register>(this, dst, src1, src2, on_not_smi_result, near_jump); |
| 1878 } | 1878 } |
| 1879 | 1879 |
| 1880 | 1880 |
| 1881 void MacroAssembler::SmiSub(Register dst, | 1881 void MacroAssembler::SmiSub(Register dst, |
| 1882 Register src1, | 1882 Register src1, |
| 1883 const Operand& src2, | 1883 const Operand& src2, |
| 1884 Label* on_not_smi_result, | 1884 Label* on_not_smi_result, |
| 1885 Label::Distance near_jump) { | 1885 Label::Distance near_jump) { |
| 1886 ASSERT_NOT_NULL(on_not_smi_result); | 1886 DCHECK_NOT_NULL(on_not_smi_result); |
| 1887 ASSERT(!src2.AddressUsesRegister(dst)); | 1887 DCHECK(!src2.AddressUsesRegister(dst)); |
| 1888 SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump); | 1888 SmiSubHelper<Operand>(this, dst, src1, src2, on_not_smi_result, near_jump); |
| 1889 } | 1889 } |
| 1890 | 1890 |
| 1891 | 1891 |
| 1892 template<class T> | 1892 template<class T> |
| 1893 static void SmiSubNoOverflowHelper(MacroAssembler* masm, | 1893 static void SmiSubNoOverflowHelper(MacroAssembler* masm, |
| 1894 Register dst, | 1894 Register dst, |
| 1895 Register src1, | 1895 Register src1, |
| 1896 T src2) { | 1896 T src2) { |
| 1897 // No overflow checking. Use only when it's known that | 1897 // No overflow checking. Use only when it's known that |
| 1898 // overflowing is impossible (e.g., subtracting two positive smis). | 1898 // overflowing is impossible (e.g., subtracting two positive smis). |
| 1899 if (!dst.is(src1)) { | 1899 if (!dst.is(src1)) { |
| 1900 masm->movp(dst, src1); | 1900 masm->movp(dst, src1); |
| 1901 } | 1901 } |
| 1902 masm->subp(dst, src2); | 1902 masm->subp(dst, src2); |
| 1903 masm->Assert(no_overflow, kSmiSubtractionOverflow); | 1903 masm->Assert(no_overflow, kSmiSubtractionOverflow); |
| 1904 } | 1904 } |
| 1905 | 1905 |
| 1906 | 1906 |
| 1907 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) { | 1907 void MacroAssembler::SmiSub(Register dst, Register src1, Register src2) { |
| 1908 ASSERT(!dst.is(src2)); | 1908 DCHECK(!dst.is(src2)); |
| 1909 SmiSubNoOverflowHelper<Register>(this, dst, src1, src2); | 1909 SmiSubNoOverflowHelper<Register>(this, dst, src1, src2); |
| 1910 } | 1910 } |
| 1911 | 1911 |
| 1912 | 1912 |
| 1913 void MacroAssembler::SmiSub(Register dst, | 1913 void MacroAssembler::SmiSub(Register dst, |
| 1914 Register src1, | 1914 Register src1, |
| 1915 const Operand& src2) { | 1915 const Operand& src2) { |
| 1916 SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2); | 1916 SmiSubNoOverflowHelper<Operand>(this, dst, src1, src2); |
| 1917 } | 1917 } |
| 1918 | 1918 |
| 1919 | 1919 |
| 1920 void MacroAssembler::SmiMul(Register dst, | 1920 void MacroAssembler::SmiMul(Register dst, |
| 1921 Register src1, | 1921 Register src1, |
| 1922 Register src2, | 1922 Register src2, |
| 1923 Label* on_not_smi_result, | 1923 Label* on_not_smi_result, |
| 1924 Label::Distance near_jump) { | 1924 Label::Distance near_jump) { |
| 1925 ASSERT(!dst.is(src2)); | 1925 DCHECK(!dst.is(src2)); |
| 1926 ASSERT(!dst.is(kScratchRegister)); | 1926 DCHECK(!dst.is(kScratchRegister)); |
| 1927 ASSERT(!src1.is(kScratchRegister)); | 1927 DCHECK(!src1.is(kScratchRegister)); |
| 1928 ASSERT(!src2.is(kScratchRegister)); | 1928 DCHECK(!src2.is(kScratchRegister)); |
| 1929 | 1929 |
| 1930 if (dst.is(src1)) { | 1930 if (dst.is(src1)) { |
| 1931 Label failure, zero_correct_result; | 1931 Label failure, zero_correct_result; |
| 1932 movp(kScratchRegister, src1); // Create backup for later testing. | 1932 movp(kScratchRegister, src1); // Create backup for later testing. |
| 1933 SmiToInteger64(dst, src1); | 1933 SmiToInteger64(dst, src1); |
| 1934 imulp(dst, src2); | 1934 imulp(dst, src2); |
| 1935 j(overflow, &failure, Label::kNear); | 1935 j(overflow, &failure, Label::kNear); |
| 1936 | 1936 |
| 1937 // Check for negative zero result. If product is zero, and one | 1937 // Check for negative zero result. If product is zero, and one |
| 1938 // argument is negative, go to slow case. | 1938 // argument is negative, go to slow case. |
| (...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1970 bind(&correct_result); | 1970 bind(&correct_result); |
| 1971 } | 1971 } |
| 1972 } | 1972 } |
| 1973 | 1973 |
| 1974 | 1974 |
| 1975 void MacroAssembler::SmiDiv(Register dst, | 1975 void MacroAssembler::SmiDiv(Register dst, |
| 1976 Register src1, | 1976 Register src1, |
| 1977 Register src2, | 1977 Register src2, |
| 1978 Label* on_not_smi_result, | 1978 Label* on_not_smi_result, |
| 1979 Label::Distance near_jump) { | 1979 Label::Distance near_jump) { |
| 1980 ASSERT(!src1.is(kScratchRegister)); | 1980 DCHECK(!src1.is(kScratchRegister)); |
| 1981 ASSERT(!src2.is(kScratchRegister)); | 1981 DCHECK(!src2.is(kScratchRegister)); |
| 1982 ASSERT(!dst.is(kScratchRegister)); | 1982 DCHECK(!dst.is(kScratchRegister)); |
| 1983 ASSERT(!src2.is(rax)); | 1983 DCHECK(!src2.is(rax)); |
| 1984 ASSERT(!src2.is(rdx)); | 1984 DCHECK(!src2.is(rdx)); |
| 1985 ASSERT(!src1.is(rdx)); | 1985 DCHECK(!src1.is(rdx)); |
| 1986 | 1986 |
| 1987 // Check for 0 divisor (result is +/-Infinity). | 1987 // Check for 0 divisor (result is +/-Infinity). |
| 1988 testp(src2, src2); | 1988 testp(src2, src2); |
| 1989 j(zero, on_not_smi_result, near_jump); | 1989 j(zero, on_not_smi_result, near_jump); |
| 1990 | 1990 |
| 1991 if (src1.is(rax)) { | 1991 if (src1.is(rax)) { |
| 1992 movp(kScratchRegister, src1); | 1992 movp(kScratchRegister, src1); |
| 1993 } | 1993 } |
| 1994 SmiToInteger32(rax, src1); | 1994 SmiToInteger32(rax, src1); |
| 1995 // We need to rule out dividing Smi::kMinValue by -1, since that would | 1995 // We need to rule out dividing Smi::kMinValue by -1, since that would |
| (...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2033 } | 2033 } |
| 2034 Integer32ToSmi(dst, rax); | 2034 Integer32ToSmi(dst, rax); |
| 2035 } | 2035 } |
| 2036 | 2036 |
| 2037 | 2037 |
| 2038 void MacroAssembler::SmiMod(Register dst, | 2038 void MacroAssembler::SmiMod(Register dst, |
| 2039 Register src1, | 2039 Register src1, |
| 2040 Register src2, | 2040 Register src2, |
| 2041 Label* on_not_smi_result, | 2041 Label* on_not_smi_result, |
| 2042 Label::Distance near_jump) { | 2042 Label::Distance near_jump) { |
| 2043 ASSERT(!dst.is(kScratchRegister)); | 2043 DCHECK(!dst.is(kScratchRegister)); |
| 2044 ASSERT(!src1.is(kScratchRegister)); | 2044 DCHECK(!src1.is(kScratchRegister)); |
| 2045 ASSERT(!src2.is(kScratchRegister)); | 2045 DCHECK(!src2.is(kScratchRegister)); |
| 2046 ASSERT(!src2.is(rax)); | 2046 DCHECK(!src2.is(rax)); |
| 2047 ASSERT(!src2.is(rdx)); | 2047 DCHECK(!src2.is(rdx)); |
| 2048 ASSERT(!src1.is(rdx)); | 2048 DCHECK(!src1.is(rdx)); |
| 2049 ASSERT(!src1.is(src2)); | 2049 DCHECK(!src1.is(src2)); |
| 2050 | 2050 |
| 2051 testp(src2, src2); | 2051 testp(src2, src2); |
| 2052 j(zero, on_not_smi_result, near_jump); | 2052 j(zero, on_not_smi_result, near_jump); |
| 2053 | 2053 |
| 2054 if (src1.is(rax)) { | 2054 if (src1.is(rax)) { |
| 2055 movp(kScratchRegister, src1); | 2055 movp(kScratchRegister, src1); |
| 2056 } | 2056 } |
| 2057 SmiToInteger32(rax, src1); | 2057 SmiToInteger32(rax, src1); |
| 2058 SmiToInteger32(src2, src2); | 2058 SmiToInteger32(src2, src2); |
| 2059 | 2059 |
| (...skipping 25 matching lines...) Expand all Loading... |
| 2085 testl(rdx, rdx); | 2085 testl(rdx, rdx); |
| 2086 j(not_zero, &smi_result, Label::kNear); | 2086 j(not_zero, &smi_result, Label::kNear); |
| 2087 testp(src1, src1); | 2087 testp(src1, src1); |
| 2088 j(negative, on_not_smi_result, near_jump); | 2088 j(negative, on_not_smi_result, near_jump); |
| 2089 bind(&smi_result); | 2089 bind(&smi_result); |
| 2090 Integer32ToSmi(dst, rdx); | 2090 Integer32ToSmi(dst, rdx); |
| 2091 } | 2091 } |
| 2092 | 2092 |
| 2093 | 2093 |
| 2094 void MacroAssembler::SmiNot(Register dst, Register src) { | 2094 void MacroAssembler::SmiNot(Register dst, Register src) { |
| 2095 ASSERT(!dst.is(kScratchRegister)); | 2095 DCHECK(!dst.is(kScratchRegister)); |
| 2096 ASSERT(!src.is(kScratchRegister)); | 2096 DCHECK(!src.is(kScratchRegister)); |
| 2097 if (SmiValuesAre32Bits()) { | 2097 if (SmiValuesAre32Bits()) { |
| 2098 // Set tag and padding bits before negating, so that they are zero | 2098 // Set tag and padding bits before negating, so that they are zero |
| 2099 // afterwards. | 2099 // afterwards. |
| 2100 movl(kScratchRegister, Immediate(~0)); | 2100 movl(kScratchRegister, Immediate(~0)); |
| 2101 } else { | 2101 } else { |
| 2102 ASSERT(SmiValuesAre31Bits()); | 2102 DCHECK(SmiValuesAre31Bits()); |
| 2103 movl(kScratchRegister, Immediate(1)); | 2103 movl(kScratchRegister, Immediate(1)); |
| 2104 } | 2104 } |
| 2105 if (dst.is(src)) { | 2105 if (dst.is(src)) { |
| 2106 xorp(dst, kScratchRegister); | 2106 xorp(dst, kScratchRegister); |
| 2107 } else { | 2107 } else { |
| 2108 leap(dst, Operand(src, kScratchRegister, times_1, 0)); | 2108 leap(dst, Operand(src, kScratchRegister, times_1, 0)); |
| 2109 } | 2109 } |
| 2110 notp(dst); | 2110 notp(dst); |
| 2111 } | 2111 } |
| 2112 | 2112 |
| 2113 | 2113 |
| 2114 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) { | 2114 void MacroAssembler::SmiAnd(Register dst, Register src1, Register src2) { |
| 2115 ASSERT(!dst.is(src2)); | 2115 DCHECK(!dst.is(src2)); |
| 2116 if (!dst.is(src1)) { | 2116 if (!dst.is(src1)) { |
| 2117 movp(dst, src1); | 2117 movp(dst, src1); |
| 2118 } | 2118 } |
| 2119 andp(dst, src2); | 2119 andp(dst, src2); |
| 2120 } | 2120 } |
| 2121 | 2121 |
| 2122 | 2122 |
| 2123 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) { | 2123 void MacroAssembler::SmiAndConstant(Register dst, Register src, Smi* constant) { |
| 2124 if (constant->value() == 0) { | 2124 if (constant->value() == 0) { |
| 2125 Set(dst, 0); | 2125 Set(dst, 0); |
| 2126 } else if (dst.is(src)) { | 2126 } else if (dst.is(src)) { |
| 2127 ASSERT(!dst.is(kScratchRegister)); | 2127 DCHECK(!dst.is(kScratchRegister)); |
| 2128 Register constant_reg = GetSmiConstant(constant); | 2128 Register constant_reg = GetSmiConstant(constant); |
| 2129 andp(dst, constant_reg); | 2129 andp(dst, constant_reg); |
| 2130 } else { | 2130 } else { |
| 2131 LoadSmiConstant(dst, constant); | 2131 LoadSmiConstant(dst, constant); |
| 2132 andp(dst, src); | 2132 andp(dst, src); |
| 2133 } | 2133 } |
| 2134 } | 2134 } |
| 2135 | 2135 |
| 2136 | 2136 |
| 2137 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) { | 2137 void MacroAssembler::SmiOr(Register dst, Register src1, Register src2) { |
| 2138 if (!dst.is(src1)) { | 2138 if (!dst.is(src1)) { |
| 2139 ASSERT(!src1.is(src2)); | 2139 DCHECK(!src1.is(src2)); |
| 2140 movp(dst, src1); | 2140 movp(dst, src1); |
| 2141 } | 2141 } |
| 2142 orp(dst, src2); | 2142 orp(dst, src2); |
| 2143 } | 2143 } |
| 2144 | 2144 |
| 2145 | 2145 |
| 2146 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) { | 2146 void MacroAssembler::SmiOrConstant(Register dst, Register src, Smi* constant) { |
| 2147 if (dst.is(src)) { | 2147 if (dst.is(src)) { |
| 2148 ASSERT(!dst.is(kScratchRegister)); | 2148 DCHECK(!dst.is(kScratchRegister)); |
| 2149 Register constant_reg = GetSmiConstant(constant); | 2149 Register constant_reg = GetSmiConstant(constant); |
| 2150 orp(dst, constant_reg); | 2150 orp(dst, constant_reg); |
| 2151 } else { | 2151 } else { |
| 2152 LoadSmiConstant(dst, constant); | 2152 LoadSmiConstant(dst, constant); |
| 2153 orp(dst, src); | 2153 orp(dst, src); |
| 2154 } | 2154 } |
| 2155 } | 2155 } |
| 2156 | 2156 |
| 2157 | 2157 |
| 2158 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) { | 2158 void MacroAssembler::SmiXor(Register dst, Register src1, Register src2) { |
| 2159 if (!dst.is(src1)) { | 2159 if (!dst.is(src1)) { |
| 2160 ASSERT(!src1.is(src2)); | 2160 DCHECK(!src1.is(src2)); |
| 2161 movp(dst, src1); | 2161 movp(dst, src1); |
| 2162 } | 2162 } |
| 2163 xorp(dst, src2); | 2163 xorp(dst, src2); |
| 2164 } | 2164 } |
| 2165 | 2165 |
| 2166 | 2166 |
| 2167 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) { | 2167 void MacroAssembler::SmiXorConstant(Register dst, Register src, Smi* constant) { |
| 2168 if (dst.is(src)) { | 2168 if (dst.is(src)) { |
| 2169 ASSERT(!dst.is(kScratchRegister)); | 2169 DCHECK(!dst.is(kScratchRegister)); |
| 2170 Register constant_reg = GetSmiConstant(constant); | 2170 Register constant_reg = GetSmiConstant(constant); |
| 2171 xorp(dst, constant_reg); | 2171 xorp(dst, constant_reg); |
| 2172 } else { | 2172 } else { |
| 2173 LoadSmiConstant(dst, constant); | 2173 LoadSmiConstant(dst, constant); |
| 2174 xorp(dst, src); | 2174 xorp(dst, src); |
| 2175 } | 2175 } |
| 2176 } | 2176 } |
| 2177 | 2177 |
| 2178 | 2178 |
| 2179 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst, | 2179 void MacroAssembler::SmiShiftArithmeticRightConstant(Register dst, |
| 2180 Register src, | 2180 Register src, |
| 2181 int shift_value) { | 2181 int shift_value) { |
| 2182 ASSERT(is_uint5(shift_value)); | 2182 DCHECK(is_uint5(shift_value)); |
| 2183 if (shift_value > 0) { | 2183 if (shift_value > 0) { |
| 2184 if (dst.is(src)) { | 2184 if (dst.is(src)) { |
| 2185 sarp(dst, Immediate(shift_value + kSmiShift)); | 2185 sarp(dst, Immediate(shift_value + kSmiShift)); |
| 2186 shlp(dst, Immediate(kSmiShift)); | 2186 shlp(dst, Immediate(kSmiShift)); |
| 2187 } else { | 2187 } else { |
| 2188 UNIMPLEMENTED(); // Not used. | 2188 UNIMPLEMENTED(); // Not used. |
| 2189 } | 2189 } |
| 2190 } | 2190 } |
| 2191 } | 2191 } |
| 2192 | 2192 |
| 2193 | 2193 |
| 2194 void MacroAssembler::SmiShiftLeftConstant(Register dst, | 2194 void MacroAssembler::SmiShiftLeftConstant(Register dst, |
| 2195 Register src, | 2195 Register src, |
| 2196 int shift_value, | 2196 int shift_value, |
| 2197 Label* on_not_smi_result, | 2197 Label* on_not_smi_result, |
| 2198 Label::Distance near_jump) { | 2198 Label::Distance near_jump) { |
| 2199 if (SmiValuesAre32Bits()) { | 2199 if (SmiValuesAre32Bits()) { |
| 2200 if (!dst.is(src)) { | 2200 if (!dst.is(src)) { |
| 2201 movp(dst, src); | 2201 movp(dst, src); |
| 2202 } | 2202 } |
| 2203 if (shift_value > 0) { | 2203 if (shift_value > 0) { |
| 2204 // Shift amount specified by lower 5 bits, not six as the shl opcode. | 2204 // Shift amount specified by lower 5 bits, not six as the shl opcode. |
| 2205 shlq(dst, Immediate(shift_value & 0x1f)); | 2205 shlq(dst, Immediate(shift_value & 0x1f)); |
| 2206 } | 2206 } |
| 2207 } else { | 2207 } else { |
| 2208 ASSERT(SmiValuesAre31Bits()); | 2208 DCHECK(SmiValuesAre31Bits()); |
| 2209 if (dst.is(src)) { | 2209 if (dst.is(src)) { |
| 2210 UNIMPLEMENTED(); // Not used. | 2210 UNIMPLEMENTED(); // Not used. |
| 2211 } else { | 2211 } else { |
| 2212 SmiToInteger32(dst, src); | 2212 SmiToInteger32(dst, src); |
| 2213 shll(dst, Immediate(shift_value)); | 2213 shll(dst, Immediate(shift_value)); |
| 2214 JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump); | 2214 JumpIfNotValidSmiValue(dst, on_not_smi_result, near_jump); |
| 2215 Integer32ToSmi(dst, dst); | 2215 Integer32ToSmi(dst, dst); |
| 2216 } | 2216 } |
| 2217 } | 2217 } |
| 2218 } | 2218 } |
| 2219 | 2219 |
| 2220 | 2220 |
| 2221 void MacroAssembler::SmiShiftLogicalRightConstant( | 2221 void MacroAssembler::SmiShiftLogicalRightConstant( |
| 2222 Register dst, Register src, int shift_value, | 2222 Register dst, Register src, int shift_value, |
| 2223 Label* on_not_smi_result, Label::Distance near_jump) { | 2223 Label* on_not_smi_result, Label::Distance near_jump) { |
| 2224 // Logic right shift interprets its result as an *unsigned* number. | 2224 // Logic right shift interprets its result as an *unsigned* number. |
| 2225 if (dst.is(src)) { | 2225 if (dst.is(src)) { |
| 2226 UNIMPLEMENTED(); // Not used. | 2226 UNIMPLEMENTED(); // Not used. |
| 2227 } else { | 2227 } else { |
| 2228 if (shift_value == 0) { | 2228 if (shift_value == 0) { |
| 2229 testp(src, src); | 2229 testp(src, src); |
| 2230 j(negative, on_not_smi_result, near_jump); | 2230 j(negative, on_not_smi_result, near_jump); |
| 2231 } | 2231 } |
| 2232 if (SmiValuesAre32Bits()) { | 2232 if (SmiValuesAre32Bits()) { |
| 2233 movp(dst, src); | 2233 movp(dst, src); |
| 2234 shrp(dst, Immediate(shift_value + kSmiShift)); | 2234 shrp(dst, Immediate(shift_value + kSmiShift)); |
| 2235 shlp(dst, Immediate(kSmiShift)); | 2235 shlp(dst, Immediate(kSmiShift)); |
| 2236 } else { | 2236 } else { |
| 2237 ASSERT(SmiValuesAre31Bits()); | 2237 DCHECK(SmiValuesAre31Bits()); |
| 2238 SmiToInteger32(dst, src); | 2238 SmiToInteger32(dst, src); |
| 2239 shrp(dst, Immediate(shift_value)); | 2239 shrp(dst, Immediate(shift_value)); |
| 2240 JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump); | 2240 JumpIfUIntNotValidSmiValue(dst, on_not_smi_result, near_jump); |
| 2241 Integer32ToSmi(dst, dst); | 2241 Integer32ToSmi(dst, dst); |
| 2242 } | 2242 } |
| 2243 } | 2243 } |
| 2244 } | 2244 } |
| 2245 | 2245 |
| 2246 | 2246 |
| 2247 void MacroAssembler::SmiShiftLeft(Register dst, | 2247 void MacroAssembler::SmiShiftLeft(Register dst, |
| 2248 Register src1, | 2248 Register src1, |
| 2249 Register src2, | 2249 Register src2, |
| 2250 Label* on_not_smi_result, | 2250 Label* on_not_smi_result, |
| 2251 Label::Distance near_jump) { | 2251 Label::Distance near_jump) { |
| 2252 if (SmiValuesAre32Bits()) { | 2252 if (SmiValuesAre32Bits()) { |
| 2253 ASSERT(!dst.is(rcx)); | 2253 DCHECK(!dst.is(rcx)); |
| 2254 if (!dst.is(src1)) { | 2254 if (!dst.is(src1)) { |
| 2255 movp(dst, src1); | 2255 movp(dst, src1); |
| 2256 } | 2256 } |
| 2257 // Untag shift amount. | 2257 // Untag shift amount. |
| 2258 SmiToInteger32(rcx, src2); | 2258 SmiToInteger32(rcx, src2); |
| 2259 // Shift amount specified by lower 5 bits, not six as the shl opcode. | 2259 // Shift amount specified by lower 5 bits, not six as the shl opcode. |
| 2260 andp(rcx, Immediate(0x1f)); | 2260 andp(rcx, Immediate(0x1f)); |
| 2261 shlq_cl(dst); | 2261 shlq_cl(dst); |
| 2262 } else { | 2262 } else { |
| 2263 ASSERT(SmiValuesAre31Bits()); | 2263 DCHECK(SmiValuesAre31Bits()); |
| 2264 ASSERT(!dst.is(kScratchRegister)); | 2264 DCHECK(!dst.is(kScratchRegister)); |
| 2265 ASSERT(!src1.is(kScratchRegister)); | 2265 DCHECK(!src1.is(kScratchRegister)); |
| 2266 ASSERT(!src2.is(kScratchRegister)); | 2266 DCHECK(!src2.is(kScratchRegister)); |
| 2267 ASSERT(!dst.is(src2)); | 2267 DCHECK(!dst.is(src2)); |
| 2268 ASSERT(!dst.is(rcx)); | 2268 DCHECK(!dst.is(rcx)); |
| 2269 | 2269 |
| 2270 if (src1.is(rcx) || src2.is(rcx)) { | 2270 if (src1.is(rcx) || src2.is(rcx)) { |
| 2271 movq(kScratchRegister, rcx); | 2271 movq(kScratchRegister, rcx); |
| 2272 } | 2272 } |
| 2273 if (dst.is(src1)) { | 2273 if (dst.is(src1)) { |
| 2274 UNIMPLEMENTED(); // Not used. | 2274 UNIMPLEMENTED(); // Not used. |
| 2275 } else { | 2275 } else { |
| 2276 Label valid_result; | 2276 Label valid_result; |
| 2277 SmiToInteger32(dst, src1); | 2277 SmiToInteger32(dst, src1); |
| 2278 SmiToInteger32(rcx, src2); | 2278 SmiToInteger32(rcx, src2); |
| (...skipping 14 matching lines...) Expand all Loading... |
| 2293 } | 2293 } |
| 2294 } | 2294 } |
| 2295 } | 2295 } |
| 2296 | 2296 |
| 2297 | 2297 |
| 2298 void MacroAssembler::SmiShiftLogicalRight(Register dst, | 2298 void MacroAssembler::SmiShiftLogicalRight(Register dst, |
| 2299 Register src1, | 2299 Register src1, |
| 2300 Register src2, | 2300 Register src2, |
| 2301 Label* on_not_smi_result, | 2301 Label* on_not_smi_result, |
| 2302 Label::Distance near_jump) { | 2302 Label::Distance near_jump) { |
| 2303 ASSERT(!dst.is(kScratchRegister)); | 2303 DCHECK(!dst.is(kScratchRegister)); |
| 2304 ASSERT(!src1.is(kScratchRegister)); | 2304 DCHECK(!src1.is(kScratchRegister)); |
| 2305 ASSERT(!src2.is(kScratchRegister)); | 2305 DCHECK(!src2.is(kScratchRegister)); |
| 2306 ASSERT(!dst.is(src2)); | 2306 DCHECK(!dst.is(src2)); |
| 2307 ASSERT(!dst.is(rcx)); | 2307 DCHECK(!dst.is(rcx)); |
| 2308 if (src1.is(rcx) || src2.is(rcx)) { | 2308 if (src1.is(rcx) || src2.is(rcx)) { |
| 2309 movq(kScratchRegister, rcx); | 2309 movq(kScratchRegister, rcx); |
| 2310 } | 2310 } |
| 2311 if (dst.is(src1)) { | 2311 if (dst.is(src1)) { |
| 2312 UNIMPLEMENTED(); // Not used. | 2312 UNIMPLEMENTED(); // Not used. |
| 2313 } else { | 2313 } else { |
| 2314 Label valid_result; | 2314 Label valid_result; |
| 2315 SmiToInteger32(dst, src1); | 2315 SmiToInteger32(dst, src1); |
| 2316 SmiToInteger32(rcx, src2); | 2316 SmiToInteger32(rcx, src2); |
| 2317 shrl_cl(dst); | 2317 shrl_cl(dst); |
| (...skipping 10 matching lines...) Expand all Loading... |
| 2328 jmp(on_not_smi_result, near_jump); | 2328 jmp(on_not_smi_result, near_jump); |
| 2329 bind(&valid_result); | 2329 bind(&valid_result); |
| 2330 Integer32ToSmi(dst, dst); | 2330 Integer32ToSmi(dst, dst); |
| 2331 } | 2331 } |
| 2332 } | 2332 } |
| 2333 | 2333 |
| 2334 | 2334 |
| 2335 void MacroAssembler::SmiShiftArithmeticRight(Register dst, | 2335 void MacroAssembler::SmiShiftArithmeticRight(Register dst, |
| 2336 Register src1, | 2336 Register src1, |
| 2337 Register src2) { | 2337 Register src2) { |
| 2338 ASSERT(!dst.is(kScratchRegister)); | 2338 DCHECK(!dst.is(kScratchRegister)); |
| 2339 ASSERT(!src1.is(kScratchRegister)); | 2339 DCHECK(!src1.is(kScratchRegister)); |
| 2340 ASSERT(!src2.is(kScratchRegister)); | 2340 DCHECK(!src2.is(kScratchRegister)); |
| 2341 ASSERT(!dst.is(rcx)); | 2341 DCHECK(!dst.is(rcx)); |
| 2342 | 2342 |
| 2343 SmiToInteger32(rcx, src2); | 2343 SmiToInteger32(rcx, src2); |
| 2344 if (!dst.is(src1)) { | 2344 if (!dst.is(src1)) { |
| 2345 movp(dst, src1); | 2345 movp(dst, src1); |
| 2346 } | 2346 } |
| 2347 SmiToInteger32(dst, dst); | 2347 SmiToInteger32(dst, dst); |
| 2348 sarl_cl(dst); | 2348 sarl_cl(dst); |
| 2349 Integer32ToSmi(dst, dst); | 2349 Integer32ToSmi(dst, dst); |
| 2350 } | 2350 } |
| 2351 | 2351 |
| 2352 | 2352 |
| 2353 void MacroAssembler::SelectNonSmi(Register dst, | 2353 void MacroAssembler::SelectNonSmi(Register dst, |
| 2354 Register src1, | 2354 Register src1, |
| 2355 Register src2, | 2355 Register src2, |
| 2356 Label* on_not_smis, | 2356 Label* on_not_smis, |
| 2357 Label::Distance near_jump) { | 2357 Label::Distance near_jump) { |
| 2358 ASSERT(!dst.is(kScratchRegister)); | 2358 DCHECK(!dst.is(kScratchRegister)); |
| 2359 ASSERT(!src1.is(kScratchRegister)); | 2359 DCHECK(!src1.is(kScratchRegister)); |
| 2360 ASSERT(!src2.is(kScratchRegister)); | 2360 DCHECK(!src2.is(kScratchRegister)); |
| 2361 ASSERT(!dst.is(src1)); | 2361 DCHECK(!dst.is(src1)); |
| 2362 ASSERT(!dst.is(src2)); | 2362 DCHECK(!dst.is(src2)); |
| 2363 // Both operands must not be smis. | 2363 // Both operands must not be smis. |
| 2364 #ifdef DEBUG | 2364 #ifdef DEBUG |
| 2365 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2)); | 2365 Condition not_both_smis = NegateCondition(CheckBothSmi(src1, src2)); |
| 2366 Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi); | 2366 Check(not_both_smis, kBothRegistersWereSmisInSelectNonSmi); |
| 2367 #endif | 2367 #endif |
| 2368 STATIC_ASSERT(kSmiTag == 0); | 2368 STATIC_ASSERT(kSmiTag == 0); |
| 2369 ASSERT_EQ(0, Smi::FromInt(0)); | 2369 DCHECK_EQ(0, Smi::FromInt(0)); |
| 2370 movl(kScratchRegister, Immediate(kSmiTagMask)); | 2370 movl(kScratchRegister, Immediate(kSmiTagMask)); |
| 2371 andp(kScratchRegister, src1); | 2371 andp(kScratchRegister, src1); |
| 2372 testl(kScratchRegister, src2); | 2372 testl(kScratchRegister, src2); |
| 2373 // If non-zero then both are smis. | 2373 // If non-zero then both are smis. |
| 2374 j(not_zero, on_not_smis, near_jump); | 2374 j(not_zero, on_not_smis, near_jump); |
| 2375 | 2375 |
| 2376 // Exactly one operand is a smi. | 2376 // Exactly one operand is a smi. |
| 2377 ASSERT_EQ(1, static_cast<int>(kSmiTagMask)); | 2377 DCHECK_EQ(1, static_cast<int>(kSmiTagMask)); |
| 2378 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one. | 2378 // kScratchRegister still holds src1 & kSmiTag, which is either zero or one. |
| 2379 subp(kScratchRegister, Immediate(1)); | 2379 subp(kScratchRegister, Immediate(1)); |
| 2380 // If src1 is a smi, then scratch register all 1s, else it is all 0s. | 2380 // If src1 is a smi, then scratch register all 1s, else it is all 0s. |
| 2381 movp(dst, src1); | 2381 movp(dst, src1); |
| 2382 xorp(dst, src2); | 2382 xorp(dst, src2); |
| 2383 andp(dst, kScratchRegister); | 2383 andp(dst, kScratchRegister); |
| 2384 // If src1 is a smi, dst holds src1 ^ src2, else it is zero. | 2384 // If src1 is a smi, dst holds src1 ^ src2, else it is zero. |
| 2385 xorp(dst, src1); | 2385 xorp(dst, src1); |
| 2386 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi. | 2386 // If src1 is a smi, dst is src2, else it is src1, i.e., the non-smi. |
| 2387 } | 2387 } |
| 2388 | 2388 |
| 2389 | 2389 |
| 2390 SmiIndex MacroAssembler::SmiToIndex(Register dst, | 2390 SmiIndex MacroAssembler::SmiToIndex(Register dst, |
| 2391 Register src, | 2391 Register src, |
| 2392 int shift) { | 2392 int shift) { |
| 2393 if (SmiValuesAre32Bits()) { | 2393 if (SmiValuesAre32Bits()) { |
| 2394 ASSERT(is_uint6(shift)); | 2394 DCHECK(is_uint6(shift)); |
| 2395 // There is a possible optimization if shift is in the range 60-63, but that | 2395 // There is a possible optimization if shift is in the range 60-63, but that |
| 2396 // will (and must) never happen. | 2396 // will (and must) never happen. |
| 2397 if (!dst.is(src)) { | 2397 if (!dst.is(src)) { |
| 2398 movp(dst, src); | 2398 movp(dst, src); |
| 2399 } | 2399 } |
| 2400 if (shift < kSmiShift) { | 2400 if (shift < kSmiShift) { |
| 2401 sarp(dst, Immediate(kSmiShift - shift)); | 2401 sarp(dst, Immediate(kSmiShift - shift)); |
| 2402 } else { | 2402 } else { |
| 2403 shlp(dst, Immediate(shift - kSmiShift)); | 2403 shlp(dst, Immediate(shift - kSmiShift)); |
| 2404 } | 2404 } |
| 2405 return SmiIndex(dst, times_1); | 2405 return SmiIndex(dst, times_1); |
| 2406 } else { | 2406 } else { |
| 2407 ASSERT(SmiValuesAre31Bits()); | 2407 DCHECK(SmiValuesAre31Bits()); |
| 2408 ASSERT(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1)); | 2408 DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1)); |
| 2409 if (!dst.is(src)) { | 2409 if (!dst.is(src)) { |
| 2410 movp(dst, src); | 2410 movp(dst, src); |
| 2411 } | 2411 } |
| 2412 // We have to sign extend the index register to 64-bit as the SMI might | 2412 // We have to sign extend the index register to 64-bit as the SMI might |
| 2413 // be negative. | 2413 // be negative. |
| 2414 movsxlq(dst, dst); | 2414 movsxlq(dst, dst); |
| 2415 if (shift == times_1) { | 2415 if (shift == times_1) { |
| 2416 sarq(dst, Immediate(kSmiShift)); | 2416 sarq(dst, Immediate(kSmiShift)); |
| 2417 return SmiIndex(dst, times_1); | 2417 return SmiIndex(dst, times_1); |
| 2418 } | 2418 } |
| 2419 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1)); | 2419 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1)); |
| 2420 } | 2420 } |
| 2421 } | 2421 } |
| 2422 | 2422 |
| 2423 | 2423 |
| 2424 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, | 2424 SmiIndex MacroAssembler::SmiToNegativeIndex(Register dst, |
| 2425 Register src, | 2425 Register src, |
| 2426 int shift) { | 2426 int shift) { |
| 2427 if (SmiValuesAre32Bits()) { | 2427 if (SmiValuesAre32Bits()) { |
| 2428 // Register src holds a positive smi. | 2428 // Register src holds a positive smi. |
| 2429 ASSERT(is_uint6(shift)); | 2429 DCHECK(is_uint6(shift)); |
| 2430 if (!dst.is(src)) { | 2430 if (!dst.is(src)) { |
| 2431 movp(dst, src); | 2431 movp(dst, src); |
| 2432 } | 2432 } |
| 2433 negp(dst); | 2433 negp(dst); |
| 2434 if (shift < kSmiShift) { | 2434 if (shift < kSmiShift) { |
| 2435 sarp(dst, Immediate(kSmiShift - shift)); | 2435 sarp(dst, Immediate(kSmiShift - shift)); |
| 2436 } else { | 2436 } else { |
| 2437 shlp(dst, Immediate(shift - kSmiShift)); | 2437 shlp(dst, Immediate(shift - kSmiShift)); |
| 2438 } | 2438 } |
| 2439 return SmiIndex(dst, times_1); | 2439 return SmiIndex(dst, times_1); |
| 2440 } else { | 2440 } else { |
| 2441 ASSERT(SmiValuesAre31Bits()); | 2441 DCHECK(SmiValuesAre31Bits()); |
| 2442 ASSERT(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1)); | 2442 DCHECK(shift >= times_1 && shift <= (static_cast<int>(times_8) + 1)); |
| 2443 if (!dst.is(src)) { | 2443 if (!dst.is(src)) { |
| 2444 movp(dst, src); | 2444 movp(dst, src); |
| 2445 } | 2445 } |
| 2446 negq(dst); | 2446 negq(dst); |
| 2447 if (shift == times_1) { | 2447 if (shift == times_1) { |
| 2448 sarq(dst, Immediate(kSmiShift)); | 2448 sarq(dst, Immediate(kSmiShift)); |
| 2449 return SmiIndex(dst, times_1); | 2449 return SmiIndex(dst, times_1); |
| 2450 } | 2450 } |
| 2451 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1)); | 2451 return SmiIndex(dst, static_cast<ScaleFactor>(shift - 1)); |
| 2452 } | 2452 } |
| 2453 } | 2453 } |
| 2454 | 2454 |
| 2455 | 2455 |
| 2456 void MacroAssembler::AddSmiField(Register dst, const Operand& src) { | 2456 void MacroAssembler::AddSmiField(Register dst, const Operand& src) { |
| 2457 if (SmiValuesAre32Bits()) { | 2457 if (SmiValuesAre32Bits()) { |
| 2458 ASSERT_EQ(0, kSmiShift % kBitsPerByte); | 2458 DCHECK_EQ(0, kSmiShift % kBitsPerByte); |
| 2459 addl(dst, Operand(src, kSmiShift / kBitsPerByte)); | 2459 addl(dst, Operand(src, kSmiShift / kBitsPerByte)); |
| 2460 } else { | 2460 } else { |
| 2461 ASSERT(SmiValuesAre31Bits()); | 2461 DCHECK(SmiValuesAre31Bits()); |
| 2462 SmiToInteger32(kScratchRegister, src); | 2462 SmiToInteger32(kScratchRegister, src); |
| 2463 addl(dst, kScratchRegister); | 2463 addl(dst, kScratchRegister); |
| 2464 } | 2464 } |
| 2465 } | 2465 } |
| 2466 | 2466 |
| 2467 | 2467 |
| 2468 void MacroAssembler::Push(Smi* source) { | 2468 void MacroAssembler::Push(Smi* source) { |
| 2469 intptr_t smi = reinterpret_cast<intptr_t>(source); | 2469 intptr_t smi = reinterpret_cast<intptr_t>(source); |
| 2470 if (is_int32(smi)) { | 2470 if (is_int32(smi)) { |
| 2471 Push(Immediate(static_cast<int32_t>(smi))); | 2471 Push(Immediate(static_cast<int32_t>(smi))); |
| 2472 } else { | 2472 } else { |
| 2473 Register constant = GetSmiConstant(source); | 2473 Register constant = GetSmiConstant(source); |
| 2474 Push(constant); | 2474 Push(constant); |
| 2475 } | 2475 } |
| 2476 } | 2476 } |
| 2477 | 2477 |
| 2478 | 2478 |
| 2479 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) { | 2479 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) { |
| 2480 ASSERT(!src.is(scratch)); | 2480 DCHECK(!src.is(scratch)); |
| 2481 movp(scratch, src); | 2481 movp(scratch, src); |
| 2482 // High bits. | 2482 // High bits. |
| 2483 shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift)); | 2483 shrp(src, Immediate(kPointerSize * kBitsPerByte - kSmiShift)); |
| 2484 shlp(src, Immediate(kSmiShift)); | 2484 shlp(src, Immediate(kSmiShift)); |
| 2485 Push(src); | 2485 Push(src); |
| 2486 // Low bits. | 2486 // Low bits. |
| 2487 shlp(scratch, Immediate(kSmiShift)); | 2487 shlp(scratch, Immediate(kSmiShift)); |
| 2488 Push(scratch); | 2488 Push(scratch); |
| 2489 } | 2489 } |
| 2490 | 2490 |
| 2491 | 2491 |
| 2492 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) { | 2492 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) { |
| 2493 ASSERT(!dst.is(scratch)); | 2493 DCHECK(!dst.is(scratch)); |
| 2494 Pop(scratch); | 2494 Pop(scratch); |
| 2495 // Low bits. | 2495 // Low bits. |
| 2496 shrp(scratch, Immediate(kSmiShift)); | 2496 shrp(scratch, Immediate(kSmiShift)); |
| 2497 Pop(dst); | 2497 Pop(dst); |
| 2498 shrp(dst, Immediate(kSmiShift)); | 2498 shrp(dst, Immediate(kSmiShift)); |
| 2499 // High bits. | 2499 // High bits. |
| 2500 shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift)); | 2500 shlp(dst, Immediate(kPointerSize * kBitsPerByte - kSmiShift)); |
| 2501 orp(dst, scratch); | 2501 orp(dst, scratch); |
| 2502 } | 2502 } |
| 2503 | 2503 |
| 2504 | 2504 |
| 2505 void MacroAssembler::Test(const Operand& src, Smi* source) { | 2505 void MacroAssembler::Test(const Operand& src, Smi* source) { |
| 2506 if (SmiValuesAre32Bits()) { | 2506 if (SmiValuesAre32Bits()) { |
| 2507 testl(Operand(src, kIntSize), Immediate(source->value())); | 2507 testl(Operand(src, kIntSize), Immediate(source->value())); |
| 2508 } else { | 2508 } else { |
| 2509 ASSERT(SmiValuesAre31Bits()); | 2509 DCHECK(SmiValuesAre31Bits()); |
| 2510 testl(src, Immediate(source)); | 2510 testl(src, Immediate(source)); |
| 2511 } | 2511 } |
| 2512 } | 2512 } |
| 2513 | 2513 |
| 2514 | 2514 |
| 2515 // ---------------------------------------------------------------------------- | 2515 // ---------------------------------------------------------------------------- |
| 2516 | 2516 |
| 2517 | 2517 |
| 2518 void MacroAssembler::LookupNumberStringCache(Register object, | 2518 void MacroAssembler::LookupNumberStringCache(Register object, |
| 2519 Register result, | 2519 Register result, |
| (...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2621 Condition either_smi = CheckEitherSmi(first_object, second_object); | 2621 Condition either_smi = CheckEitherSmi(first_object, second_object); |
| 2622 j(either_smi, on_fail, near_jump); | 2622 j(either_smi, on_fail, near_jump); |
| 2623 | 2623 |
| 2624 // Load instance type for both strings. | 2624 // Load instance type for both strings. |
| 2625 movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset)); | 2625 movp(scratch1, FieldOperand(first_object, HeapObject::kMapOffset)); |
| 2626 movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset)); | 2626 movp(scratch2, FieldOperand(second_object, HeapObject::kMapOffset)); |
| 2627 movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset)); | 2627 movzxbl(scratch1, FieldOperand(scratch1, Map::kInstanceTypeOffset)); |
| 2628 movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset)); | 2628 movzxbl(scratch2, FieldOperand(scratch2, Map::kInstanceTypeOffset)); |
| 2629 | 2629 |
| 2630 // Check that both are flat ASCII strings. | 2630 // Check that both are flat ASCII strings. |
| 2631 ASSERT(kNotStringTag != 0); | 2631 DCHECK(kNotStringTag != 0); |
| 2632 const int kFlatAsciiStringMask = | 2632 const int kFlatAsciiStringMask = |
| 2633 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; | 2633 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; |
| 2634 const int kFlatAsciiStringTag = | 2634 const int kFlatAsciiStringTag = |
| 2635 kStringTag | kOneByteStringTag | kSeqStringTag; | 2635 kStringTag | kOneByteStringTag | kSeqStringTag; |
| 2636 | 2636 |
| 2637 andl(scratch1, Immediate(kFlatAsciiStringMask)); | 2637 andl(scratch1, Immediate(kFlatAsciiStringMask)); |
| 2638 andl(scratch2, Immediate(kFlatAsciiStringMask)); | 2638 andl(scratch2, Immediate(kFlatAsciiStringMask)); |
| 2639 // Interleave the bits to check both scratch1 and scratch2 in one test. | 2639 // Interleave the bits to check both scratch1 and scratch2 in one test. |
| 2640 ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); | 2640 DCHECK_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); |
| 2641 leap(scratch1, Operand(scratch1, scratch2, times_8, 0)); | 2641 leap(scratch1, Operand(scratch1, scratch2, times_8, 0)); |
| 2642 cmpl(scratch1, | 2642 cmpl(scratch1, |
| 2643 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3))); | 2643 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3))); |
| 2644 j(not_equal, on_fail, near_jump); | 2644 j(not_equal, on_fail, near_jump); |
| 2645 } | 2645 } |
| 2646 | 2646 |
| 2647 | 2647 |
| 2648 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii( | 2648 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii( |
| 2649 Register instance_type, | 2649 Register instance_type, |
| 2650 Register scratch, | 2650 Register scratch, |
| (...skipping 17 matching lines...) Expand all Loading... |
| 2668 Register second_object_instance_type, | 2668 Register second_object_instance_type, |
| 2669 Register scratch1, | 2669 Register scratch1, |
| 2670 Register scratch2, | 2670 Register scratch2, |
| 2671 Label* on_fail, | 2671 Label* on_fail, |
| 2672 Label::Distance near_jump) { | 2672 Label::Distance near_jump) { |
| 2673 // Load instance type for both strings. | 2673 // Load instance type for both strings. |
| 2674 movp(scratch1, first_object_instance_type); | 2674 movp(scratch1, first_object_instance_type); |
| 2675 movp(scratch2, second_object_instance_type); | 2675 movp(scratch2, second_object_instance_type); |
| 2676 | 2676 |
| 2677 // Check that both are flat ASCII strings. | 2677 // Check that both are flat ASCII strings. |
| 2678 ASSERT(kNotStringTag != 0); | 2678 DCHECK(kNotStringTag != 0); |
| 2679 const int kFlatAsciiStringMask = | 2679 const int kFlatAsciiStringMask = |
| 2680 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; | 2680 kIsNotStringMask | kStringRepresentationMask | kStringEncodingMask; |
| 2681 const int kFlatAsciiStringTag = | 2681 const int kFlatAsciiStringTag = |
| 2682 kStringTag | kOneByteStringTag | kSeqStringTag; | 2682 kStringTag | kOneByteStringTag | kSeqStringTag; |
| 2683 | 2683 |
| 2684 andl(scratch1, Immediate(kFlatAsciiStringMask)); | 2684 andl(scratch1, Immediate(kFlatAsciiStringMask)); |
| 2685 andl(scratch2, Immediate(kFlatAsciiStringMask)); | 2685 andl(scratch2, Immediate(kFlatAsciiStringMask)); |
| 2686 // Interleave the bits to check both scratch1 and scratch2 in one test. | 2686 // Interleave the bits to check both scratch1 and scratch2 in one test. |
| 2687 ASSERT_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); | 2687 DCHECK_EQ(0, kFlatAsciiStringMask & (kFlatAsciiStringMask << 3)); |
| 2688 leap(scratch1, Operand(scratch1, scratch2, times_8, 0)); | 2688 leap(scratch1, Operand(scratch1, scratch2, times_8, 0)); |
| 2689 cmpl(scratch1, | 2689 cmpl(scratch1, |
| 2690 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3))); | 2690 Immediate(kFlatAsciiStringTag + (kFlatAsciiStringTag << 3))); |
| 2691 j(not_equal, on_fail, near_jump); | 2691 j(not_equal, on_fail, near_jump); |
| 2692 } | 2692 } |
| 2693 | 2693 |
| 2694 | 2694 |
| 2695 template<class T> | 2695 template<class T> |
| 2696 static void JumpIfNotUniqueNameHelper(MacroAssembler* masm, | 2696 static void JumpIfNotUniqueNameHelper(MacroAssembler* masm, |
| 2697 T operand_or_register, | 2697 T operand_or_register, |
| (...skipping 82 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2780 } else { | 2780 } else { |
| 2781 MoveHeapObject(kScratchRegister, source); | 2781 MoveHeapObject(kScratchRegister, source); |
| 2782 Push(kScratchRegister); | 2782 Push(kScratchRegister); |
| 2783 } | 2783 } |
| 2784 } | 2784 } |
| 2785 | 2785 |
| 2786 | 2786 |
| 2787 void MacroAssembler::MoveHeapObject(Register result, | 2787 void MacroAssembler::MoveHeapObject(Register result, |
| 2788 Handle<Object> object) { | 2788 Handle<Object> object) { |
| 2789 AllowDeferredHandleDereference using_raw_address; | 2789 AllowDeferredHandleDereference using_raw_address; |
| 2790 ASSERT(object->IsHeapObject()); | 2790 DCHECK(object->IsHeapObject()); |
| 2791 if (isolate()->heap()->InNewSpace(*object)) { | 2791 if (isolate()->heap()->InNewSpace(*object)) { |
| 2792 Handle<Cell> cell = isolate()->factory()->NewCell(object); | 2792 Handle<Cell> cell = isolate()->factory()->NewCell(object); |
| 2793 Move(result, cell, RelocInfo::CELL); | 2793 Move(result, cell, RelocInfo::CELL); |
| 2794 movp(result, Operand(result, 0)); | 2794 movp(result, Operand(result, 0)); |
| 2795 } else { | 2795 } else { |
| 2796 Move(result, object, RelocInfo::EMBEDDED_OBJECT); | 2796 Move(result, object, RelocInfo::EMBEDDED_OBJECT); |
| 2797 } | 2797 } |
| 2798 } | 2798 } |
| 2799 | 2799 |
| 2800 | 2800 |
| (...skipping 10 matching lines...) Expand all Loading... |
| 2811 | 2811 |
| 2812 void MacroAssembler::Drop(int stack_elements) { | 2812 void MacroAssembler::Drop(int stack_elements) { |
| 2813 if (stack_elements > 0) { | 2813 if (stack_elements > 0) { |
| 2814 addp(rsp, Immediate(stack_elements * kPointerSize)); | 2814 addp(rsp, Immediate(stack_elements * kPointerSize)); |
| 2815 } | 2815 } |
| 2816 } | 2816 } |
| 2817 | 2817 |
| 2818 | 2818 |
| 2819 void MacroAssembler::DropUnderReturnAddress(int stack_elements, | 2819 void MacroAssembler::DropUnderReturnAddress(int stack_elements, |
| 2820 Register scratch) { | 2820 Register scratch) { |
| 2821 ASSERT(stack_elements > 0); | 2821 DCHECK(stack_elements > 0); |
| 2822 if (kPointerSize == kInt64Size && stack_elements == 1) { | 2822 if (kPointerSize == kInt64Size && stack_elements == 1) { |
| 2823 popq(MemOperand(rsp, 0)); | 2823 popq(MemOperand(rsp, 0)); |
| 2824 return; | 2824 return; |
| 2825 } | 2825 } |
| 2826 | 2826 |
| 2827 PopReturnAddressTo(scratch); | 2827 PopReturnAddressTo(scratch); |
| 2828 Drop(stack_elements); | 2828 Drop(stack_elements); |
| 2829 PushReturnAddressFrom(scratch); | 2829 PushReturnAddressFrom(scratch); |
| 2830 } | 2830 } |
| 2831 | 2831 |
| 2832 | 2832 |
| 2833 void MacroAssembler::Push(Register src) { | 2833 void MacroAssembler::Push(Register src) { |
| 2834 if (kPointerSize == kInt64Size) { | 2834 if (kPointerSize == kInt64Size) { |
| 2835 pushq(src); | 2835 pushq(src); |
| 2836 } else { | 2836 } else { |
| 2837 // x32 uses 64-bit push for rbp in the prologue. | 2837 // x32 uses 64-bit push for rbp in the prologue. |
| 2838 ASSERT(src.code() != rbp.code()); | 2838 DCHECK(src.code() != rbp.code()); |
| 2839 leal(rsp, Operand(rsp, -4)); | 2839 leal(rsp, Operand(rsp, -4)); |
| 2840 movp(Operand(rsp, 0), src); | 2840 movp(Operand(rsp, 0), src); |
| 2841 } | 2841 } |
| 2842 } | 2842 } |
| 2843 | 2843 |
| 2844 | 2844 |
| 2845 void MacroAssembler::Push(const Operand& src) { | 2845 void MacroAssembler::Push(const Operand& src) { |
| 2846 if (kPointerSize == kInt64Size) { | 2846 if (kPointerSize == kInt64Size) { |
| 2847 pushq(src); | 2847 pushq(src); |
| 2848 } else { | 2848 } else { |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2881 movp(Operand(rsp, 0), Immediate(imm32)); | 2881 movp(Operand(rsp, 0), Immediate(imm32)); |
| 2882 } | 2882 } |
| 2883 } | 2883 } |
| 2884 | 2884 |
| 2885 | 2885 |
| 2886 void MacroAssembler::Pop(Register dst) { | 2886 void MacroAssembler::Pop(Register dst) { |
| 2887 if (kPointerSize == kInt64Size) { | 2887 if (kPointerSize == kInt64Size) { |
| 2888 popq(dst); | 2888 popq(dst); |
| 2889 } else { | 2889 } else { |
| 2890 // x32 uses 64-bit pop for rbp in the epilogue. | 2890 // x32 uses 64-bit pop for rbp in the epilogue. |
| 2891 ASSERT(dst.code() != rbp.code()); | 2891 DCHECK(dst.code() != rbp.code()); |
| 2892 movp(dst, Operand(rsp, 0)); | 2892 movp(dst, Operand(rsp, 0)); |
| 2893 leal(rsp, Operand(rsp, 4)); | 2893 leal(rsp, Operand(rsp, 4)); |
| 2894 } | 2894 } |
| 2895 } | 2895 } |
| 2896 | 2896 |
| 2897 | 2897 |
| 2898 void MacroAssembler::Pop(const Operand& dst) { | 2898 void MacroAssembler::Pop(const Operand& dst) { |
| 2899 if (kPointerSize == kInt64Size) { | 2899 if (kPointerSize == kInt64Size) { |
| 2900 popq(dst); | 2900 popq(dst); |
| 2901 } else { | 2901 } else { |
| (...skipping 18 matching lines...) Expand all Loading... |
| 2920 } else { | 2920 } else { |
| 2921 popq(kScratchRegister); | 2921 popq(kScratchRegister); |
| 2922 movp(dst, kScratchRegister); | 2922 movp(dst, kScratchRegister); |
| 2923 } | 2923 } |
| 2924 } | 2924 } |
| 2925 | 2925 |
| 2926 | 2926 |
| 2927 void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst, | 2927 void MacroAssembler::LoadSharedFunctionInfoSpecialField(Register dst, |
| 2928 Register base, | 2928 Register base, |
| 2929 int offset) { | 2929 int offset) { |
| 2930 ASSERT(offset > SharedFunctionInfo::kLengthOffset && | 2930 DCHECK(offset > SharedFunctionInfo::kLengthOffset && |
| 2931 offset <= SharedFunctionInfo::kSize && | 2931 offset <= SharedFunctionInfo::kSize && |
| 2932 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1)); | 2932 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1)); |
| 2933 if (kPointerSize == kInt64Size) { | 2933 if (kPointerSize == kInt64Size) { |
| 2934 movsxlq(dst, FieldOperand(base, offset)); | 2934 movsxlq(dst, FieldOperand(base, offset)); |
| 2935 } else { | 2935 } else { |
| 2936 movp(dst, FieldOperand(base, offset)); | 2936 movp(dst, FieldOperand(base, offset)); |
| 2937 SmiToInteger32(dst, dst); | 2937 SmiToInteger32(dst, dst); |
| 2938 } | 2938 } |
| 2939 } | 2939 } |
| 2940 | 2940 |
| 2941 | 2941 |
| 2942 void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base, | 2942 void MacroAssembler::TestBitSharedFunctionInfoSpecialField(Register base, |
| 2943 int offset, | 2943 int offset, |
| 2944 int bits) { | 2944 int bits) { |
| 2945 ASSERT(offset > SharedFunctionInfo::kLengthOffset && | 2945 DCHECK(offset > SharedFunctionInfo::kLengthOffset && |
| 2946 offset <= SharedFunctionInfo::kSize && | 2946 offset <= SharedFunctionInfo::kSize && |
| 2947 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1)); | 2947 (((offset - SharedFunctionInfo::kLengthOffset) / kIntSize) % 2 == 1)); |
| 2948 if (kPointerSize == kInt32Size) { | 2948 if (kPointerSize == kInt32Size) { |
| 2949 // On x32, this field is represented by SMI. | 2949 // On x32, this field is represented by SMI. |
| 2950 bits += kSmiShift; | 2950 bits += kSmiShift; |
| 2951 } | 2951 } |
| 2952 int byte_offset = bits / kBitsPerByte; | 2952 int byte_offset = bits / kBitsPerByte; |
| 2953 int bit_in_byte = bits & (kBitsPerByte - 1); | 2953 int bit_in_byte = bits & (kBitsPerByte - 1); |
| 2954 testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte)); | 2954 testb(FieldOperand(base, offset + byte_offset), Immediate(1 << bit_in_byte)); |
| 2955 } | 2955 } |
| (...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3023 #endif | 3023 #endif |
| 3024 } | 3024 } |
| 3025 | 3025 |
| 3026 | 3026 |
| 3027 void MacroAssembler::Call(Handle<Code> code_object, | 3027 void MacroAssembler::Call(Handle<Code> code_object, |
| 3028 RelocInfo::Mode rmode, | 3028 RelocInfo::Mode rmode, |
| 3029 TypeFeedbackId ast_id) { | 3029 TypeFeedbackId ast_id) { |
| 3030 #ifdef DEBUG | 3030 #ifdef DEBUG |
| 3031 int end_position = pc_offset() + CallSize(code_object); | 3031 int end_position = pc_offset() + CallSize(code_object); |
| 3032 #endif | 3032 #endif |
| 3033 ASSERT(RelocInfo::IsCodeTarget(rmode) || | 3033 DCHECK(RelocInfo::IsCodeTarget(rmode) || |
| 3034 rmode == RelocInfo::CODE_AGE_SEQUENCE); | 3034 rmode == RelocInfo::CODE_AGE_SEQUENCE); |
| 3035 call(code_object, rmode, ast_id); | 3035 call(code_object, rmode, ast_id); |
| 3036 #ifdef DEBUG | 3036 #ifdef DEBUG |
| 3037 CHECK_EQ(end_position, pc_offset()); | 3037 CHECK_EQ(end_position, pc_offset()); |
| 3038 #endif | 3038 #endif |
| 3039 } | 3039 } |
| 3040 | 3040 |
| 3041 | 3041 |
| 3042 void MacroAssembler::Pushad() { | 3042 void MacroAssembler::Pushad() { |
| 3043 Push(rax); | 3043 Push(rax); |
| (...skipping 508 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3552 } | 3552 } |
| 3553 | 3553 |
| 3554 | 3554 |
| 3555 void MacroAssembler::TaggedToI(Register result_reg, | 3555 void MacroAssembler::TaggedToI(Register result_reg, |
| 3556 Register input_reg, | 3556 Register input_reg, |
| 3557 XMMRegister temp, | 3557 XMMRegister temp, |
| 3558 MinusZeroMode minus_zero_mode, | 3558 MinusZeroMode minus_zero_mode, |
| 3559 Label* lost_precision, | 3559 Label* lost_precision, |
| 3560 Label::Distance dst) { | 3560 Label::Distance dst) { |
| 3561 Label done; | 3561 Label done; |
| 3562 ASSERT(!temp.is(xmm0)); | 3562 DCHECK(!temp.is(xmm0)); |
| 3563 | 3563 |
| 3564 // Heap number map check. | 3564 // Heap number map check. |
| 3565 CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), | 3565 CompareRoot(FieldOperand(input_reg, HeapObject::kMapOffset), |
| 3566 Heap::kHeapNumberMapRootIndex); | 3566 Heap::kHeapNumberMapRootIndex); |
| 3567 j(not_equal, lost_precision, dst); | 3567 j(not_equal, lost_precision, dst); |
| 3568 | 3568 |
| 3569 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); | 3569 movsd(xmm0, FieldOperand(input_reg, HeapNumber::kValueOffset)); |
| 3570 cvttsd2si(result_reg, xmm0); | 3570 cvttsd2si(result_reg, xmm0); |
| 3571 Cvtlsi2sd(temp, result_reg); | 3571 Cvtlsi2sd(temp, result_reg); |
| 3572 ucomisd(xmm0, temp); | 3572 ucomisd(xmm0, temp); |
| (...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3653 void MacroAssembler::AssertSmi(const Operand& object) { | 3653 void MacroAssembler::AssertSmi(const Operand& object) { |
| 3654 if (emit_debug_code()) { | 3654 if (emit_debug_code()) { |
| 3655 Condition is_smi = CheckSmi(object); | 3655 Condition is_smi = CheckSmi(object); |
| 3656 Check(is_smi, kOperandIsNotASmi); | 3656 Check(is_smi, kOperandIsNotASmi); |
| 3657 } | 3657 } |
| 3658 } | 3658 } |
| 3659 | 3659 |
| 3660 | 3660 |
| 3661 void MacroAssembler::AssertZeroExtended(Register int32_register) { | 3661 void MacroAssembler::AssertZeroExtended(Register int32_register) { |
| 3662 if (emit_debug_code()) { | 3662 if (emit_debug_code()) { |
| 3663 ASSERT(!int32_register.is(kScratchRegister)); | 3663 DCHECK(!int32_register.is(kScratchRegister)); |
| 3664 movq(kScratchRegister, V8_INT64_C(0x0000000100000000)); | 3664 movq(kScratchRegister, V8_INT64_C(0x0000000100000000)); |
| 3665 cmpq(kScratchRegister, int32_register); | 3665 cmpq(kScratchRegister, int32_register); |
| 3666 Check(above_equal, k32BitValueInRegisterIsNotZeroExtended); | 3666 Check(above_equal, k32BitValueInRegisterIsNotZeroExtended); |
| 3667 } | 3667 } |
| 3668 } | 3668 } |
| 3669 | 3669 |
| 3670 | 3670 |
| 3671 void MacroAssembler::AssertString(Register object) { | 3671 void MacroAssembler::AssertString(Register object) { |
| 3672 if (emit_debug_code()) { | 3672 if (emit_debug_code()) { |
| 3673 testb(object, Immediate(kSmiTagMask)); | 3673 testb(object, Immediate(kSmiTagMask)); |
| (...skipping 30 matching lines...) Expand all Loading... |
| 3704 Assert(equal, kExpectedUndefinedOrCell); | 3704 Assert(equal, kExpectedUndefinedOrCell); |
| 3705 bind(&done_checking); | 3705 bind(&done_checking); |
| 3706 } | 3706 } |
| 3707 } | 3707 } |
| 3708 | 3708 |
| 3709 | 3709 |
| 3710 void MacroAssembler::AssertRootValue(Register src, | 3710 void MacroAssembler::AssertRootValue(Register src, |
| 3711 Heap::RootListIndex root_value_index, | 3711 Heap::RootListIndex root_value_index, |
| 3712 BailoutReason reason) { | 3712 BailoutReason reason) { |
| 3713 if (emit_debug_code()) { | 3713 if (emit_debug_code()) { |
| 3714 ASSERT(!src.is(kScratchRegister)); | 3714 DCHECK(!src.is(kScratchRegister)); |
| 3715 LoadRoot(kScratchRegister, root_value_index); | 3715 LoadRoot(kScratchRegister, root_value_index); |
| 3716 cmpp(src, kScratchRegister); | 3716 cmpp(src, kScratchRegister); |
| 3717 Check(equal, reason); | 3717 Check(equal, reason); |
| 3718 } | 3718 } |
| 3719 } | 3719 } |
| 3720 | 3720 |
| 3721 | 3721 |
| 3722 | 3722 |
| 3723 Condition MacroAssembler::IsObjectStringType(Register heap_object, | 3723 Condition MacroAssembler::IsObjectStringType(Register heap_object, |
| 3724 Register map, | 3724 Register map, |
| (...skipping 79 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3804 | 3804 |
| 3805 void MacroAssembler::SetCounter(StatsCounter* counter, int value) { | 3805 void MacroAssembler::SetCounter(StatsCounter* counter, int value) { |
| 3806 if (FLAG_native_code_counters && counter->Enabled()) { | 3806 if (FLAG_native_code_counters && counter->Enabled()) { |
| 3807 Operand counter_operand = ExternalOperand(ExternalReference(counter)); | 3807 Operand counter_operand = ExternalOperand(ExternalReference(counter)); |
| 3808 movl(counter_operand, Immediate(value)); | 3808 movl(counter_operand, Immediate(value)); |
| 3809 } | 3809 } |
| 3810 } | 3810 } |
| 3811 | 3811 |
| 3812 | 3812 |
| 3813 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) { | 3813 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value) { |
| 3814 ASSERT(value > 0); | 3814 DCHECK(value > 0); |
| 3815 if (FLAG_native_code_counters && counter->Enabled()) { | 3815 if (FLAG_native_code_counters && counter->Enabled()) { |
| 3816 Operand counter_operand = ExternalOperand(ExternalReference(counter)); | 3816 Operand counter_operand = ExternalOperand(ExternalReference(counter)); |
| 3817 if (value == 1) { | 3817 if (value == 1) { |
| 3818 incl(counter_operand); | 3818 incl(counter_operand); |
| 3819 } else { | 3819 } else { |
| 3820 addl(counter_operand, Immediate(value)); | 3820 addl(counter_operand, Immediate(value)); |
| 3821 } | 3821 } |
| 3822 } | 3822 } |
| 3823 } | 3823 } |
| 3824 | 3824 |
| 3825 | 3825 |
| 3826 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) { | 3826 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value) { |
| 3827 ASSERT(value > 0); | 3827 DCHECK(value > 0); |
| 3828 if (FLAG_native_code_counters && counter->Enabled()) { | 3828 if (FLAG_native_code_counters && counter->Enabled()) { |
| 3829 Operand counter_operand = ExternalOperand(ExternalReference(counter)); | 3829 Operand counter_operand = ExternalOperand(ExternalReference(counter)); |
| 3830 if (value == 1) { | 3830 if (value == 1) { |
| 3831 decl(counter_operand); | 3831 decl(counter_operand); |
| 3832 } else { | 3832 } else { |
| 3833 subl(counter_operand, Immediate(value)); | 3833 subl(counter_operand, Immediate(value)); |
| 3834 } | 3834 } |
| 3835 } | 3835 } |
| 3836 } | 3836 } |
| 3837 | 3837 |
| 3838 | 3838 |
| 3839 void MacroAssembler::DebugBreak() { | 3839 void MacroAssembler::DebugBreak() { |
| 3840 Set(rax, 0); // No arguments. | 3840 Set(rax, 0); // No arguments. |
| 3841 LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate())); | 3841 LoadAddress(rbx, ExternalReference(Runtime::kDebugBreak, isolate())); |
| 3842 CEntryStub ces(isolate(), 1); | 3842 CEntryStub ces(isolate(), 1); |
| 3843 ASSERT(AllowThisStubCall(&ces)); | 3843 DCHECK(AllowThisStubCall(&ces)); |
| 3844 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); | 3844 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); |
| 3845 } | 3845 } |
| 3846 | 3846 |
| 3847 | 3847 |
| 3848 void MacroAssembler::InvokeCode(Register code, | 3848 void MacroAssembler::InvokeCode(Register code, |
| 3849 const ParameterCount& expected, | 3849 const ParameterCount& expected, |
| 3850 const ParameterCount& actual, | 3850 const ParameterCount& actual, |
| 3851 InvokeFlag flag, | 3851 InvokeFlag flag, |
| 3852 const CallWrapper& call_wrapper) { | 3852 const CallWrapper& call_wrapper) { |
| 3853 // You can't call a function without a valid frame. | 3853 // You can't call a function without a valid frame. |
| 3854 ASSERT(flag == JUMP_FUNCTION || has_frame()); | 3854 DCHECK(flag == JUMP_FUNCTION || has_frame()); |
| 3855 | 3855 |
| 3856 Label done; | 3856 Label done; |
| 3857 bool definitely_mismatches = false; | 3857 bool definitely_mismatches = false; |
| 3858 InvokePrologue(expected, | 3858 InvokePrologue(expected, |
| 3859 actual, | 3859 actual, |
| 3860 Handle<Code>::null(), | 3860 Handle<Code>::null(), |
| 3861 code, | 3861 code, |
| 3862 &done, | 3862 &done, |
| 3863 &definitely_mismatches, | 3863 &definitely_mismatches, |
| 3864 flag, | 3864 flag, |
| 3865 Label::kNear, | 3865 Label::kNear, |
| 3866 call_wrapper); | 3866 call_wrapper); |
| 3867 if (!definitely_mismatches) { | 3867 if (!definitely_mismatches) { |
| 3868 if (flag == CALL_FUNCTION) { | 3868 if (flag == CALL_FUNCTION) { |
| 3869 call_wrapper.BeforeCall(CallSize(code)); | 3869 call_wrapper.BeforeCall(CallSize(code)); |
| 3870 call(code); | 3870 call(code); |
| 3871 call_wrapper.AfterCall(); | 3871 call_wrapper.AfterCall(); |
| 3872 } else { | 3872 } else { |
| 3873 ASSERT(flag == JUMP_FUNCTION); | 3873 DCHECK(flag == JUMP_FUNCTION); |
| 3874 jmp(code); | 3874 jmp(code); |
| 3875 } | 3875 } |
| 3876 bind(&done); | 3876 bind(&done); |
| 3877 } | 3877 } |
| 3878 } | 3878 } |
| 3879 | 3879 |
| 3880 | 3880 |
| 3881 void MacroAssembler::InvokeFunction(Register function, | 3881 void MacroAssembler::InvokeFunction(Register function, |
| 3882 const ParameterCount& actual, | 3882 const ParameterCount& actual, |
| 3883 InvokeFlag flag, | 3883 InvokeFlag flag, |
| 3884 const CallWrapper& call_wrapper) { | 3884 const CallWrapper& call_wrapper) { |
| 3885 // You can't call a function without a valid frame. | 3885 // You can't call a function without a valid frame. |
| 3886 ASSERT(flag == JUMP_FUNCTION || has_frame()); | 3886 DCHECK(flag == JUMP_FUNCTION || has_frame()); |
| 3887 | 3887 |
| 3888 ASSERT(function.is(rdi)); | 3888 DCHECK(function.is(rdi)); |
| 3889 movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); | 3889 movp(rdx, FieldOperand(function, JSFunction::kSharedFunctionInfoOffset)); |
| 3890 movp(rsi, FieldOperand(function, JSFunction::kContextOffset)); | 3890 movp(rsi, FieldOperand(function, JSFunction::kContextOffset)); |
| 3891 LoadSharedFunctionInfoSpecialField(rbx, rdx, | 3891 LoadSharedFunctionInfoSpecialField(rbx, rdx, |
| 3892 SharedFunctionInfo::kFormalParameterCountOffset); | 3892 SharedFunctionInfo::kFormalParameterCountOffset); |
| 3893 // Advances rdx to the end of the Code object header, to the start of | 3893 // Advances rdx to the end of the Code object header, to the start of |
| 3894 // the executable code. | 3894 // the executable code. |
| 3895 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); | 3895 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); |
| 3896 | 3896 |
| 3897 ParameterCount expected(rbx); | 3897 ParameterCount expected(rbx); |
| 3898 InvokeCode(rdx, expected, actual, flag, call_wrapper); | 3898 InvokeCode(rdx, expected, actual, flag, call_wrapper); |
| 3899 } | 3899 } |
| 3900 | 3900 |
| 3901 | 3901 |
| 3902 void MacroAssembler::InvokeFunction(Register function, | 3902 void MacroAssembler::InvokeFunction(Register function, |
| 3903 const ParameterCount& expected, | 3903 const ParameterCount& expected, |
| 3904 const ParameterCount& actual, | 3904 const ParameterCount& actual, |
| 3905 InvokeFlag flag, | 3905 InvokeFlag flag, |
| 3906 const CallWrapper& call_wrapper) { | 3906 const CallWrapper& call_wrapper) { |
| 3907 // You can't call a function without a valid frame. | 3907 // You can't call a function without a valid frame. |
| 3908 ASSERT(flag == JUMP_FUNCTION || has_frame()); | 3908 DCHECK(flag == JUMP_FUNCTION || has_frame()); |
| 3909 | 3909 |
| 3910 ASSERT(function.is(rdi)); | 3910 DCHECK(function.is(rdi)); |
| 3911 movp(rsi, FieldOperand(function, JSFunction::kContextOffset)); | 3911 movp(rsi, FieldOperand(function, JSFunction::kContextOffset)); |
| 3912 // Advances rdx to the end of the Code object header, to the start of | 3912 // Advances rdx to the end of the Code object header, to the start of |
| 3913 // the executable code. | 3913 // the executable code. |
| 3914 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); | 3914 movp(rdx, FieldOperand(rdi, JSFunction::kCodeEntryOffset)); |
| 3915 | 3915 |
| 3916 InvokeCode(rdx, expected, actual, flag, call_wrapper); | 3916 InvokeCode(rdx, expected, actual, flag, call_wrapper); |
| 3917 } | 3917 } |
| 3918 | 3918 |
| 3919 | 3919 |
| 3920 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, | 3920 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, |
| (...skipping 12 matching lines...) Expand all Loading... |
| 3933 Register code_register, | 3933 Register code_register, |
| 3934 Label* done, | 3934 Label* done, |
| 3935 bool* definitely_mismatches, | 3935 bool* definitely_mismatches, |
| 3936 InvokeFlag flag, | 3936 InvokeFlag flag, |
| 3937 Label::Distance near_jump, | 3937 Label::Distance near_jump, |
| 3938 const CallWrapper& call_wrapper) { | 3938 const CallWrapper& call_wrapper) { |
| 3939 bool definitely_matches = false; | 3939 bool definitely_matches = false; |
| 3940 *definitely_mismatches = false; | 3940 *definitely_mismatches = false; |
| 3941 Label invoke; | 3941 Label invoke; |
| 3942 if (expected.is_immediate()) { | 3942 if (expected.is_immediate()) { |
| 3943 ASSERT(actual.is_immediate()); | 3943 DCHECK(actual.is_immediate()); |
| 3944 if (expected.immediate() == actual.immediate()) { | 3944 if (expected.immediate() == actual.immediate()) { |
| 3945 definitely_matches = true; | 3945 definitely_matches = true; |
| 3946 } else { | 3946 } else { |
| 3947 Set(rax, actual.immediate()); | 3947 Set(rax, actual.immediate()); |
| 3948 if (expected.immediate() == | 3948 if (expected.immediate() == |
| 3949 SharedFunctionInfo::kDontAdaptArgumentsSentinel) { | 3949 SharedFunctionInfo::kDontAdaptArgumentsSentinel) { |
| 3950 // Don't worry about adapting arguments for built-ins that | 3950 // Don't worry about adapting arguments for built-ins that |
| 3951 // don't want that done. Skip adaption code by making it look | 3951 // don't want that done. Skip adaption code by making it look |
| 3952 // like we have a match between expected and actual number of | 3952 // like we have a match between expected and actual number of |
| 3953 // arguments. | 3953 // arguments. |
| 3954 definitely_matches = true; | 3954 definitely_matches = true; |
| 3955 } else { | 3955 } else { |
| 3956 *definitely_mismatches = true; | 3956 *definitely_mismatches = true; |
| 3957 Set(rbx, expected.immediate()); | 3957 Set(rbx, expected.immediate()); |
| 3958 } | 3958 } |
| 3959 } | 3959 } |
| 3960 } else { | 3960 } else { |
| 3961 if (actual.is_immediate()) { | 3961 if (actual.is_immediate()) { |
| 3962 // Expected is in register, actual is immediate. This is the | 3962 // Expected is in register, actual is immediate. This is the |
| 3963 // case when we invoke function values without going through the | 3963 // case when we invoke function values without going through the |
| 3964 // IC mechanism. | 3964 // IC mechanism. |
| 3965 cmpp(expected.reg(), Immediate(actual.immediate())); | 3965 cmpp(expected.reg(), Immediate(actual.immediate())); |
| 3966 j(equal, &invoke, Label::kNear); | 3966 j(equal, &invoke, Label::kNear); |
| 3967 ASSERT(expected.reg().is(rbx)); | 3967 DCHECK(expected.reg().is(rbx)); |
| 3968 Set(rax, actual.immediate()); | 3968 Set(rax, actual.immediate()); |
| 3969 } else if (!expected.reg().is(actual.reg())) { | 3969 } else if (!expected.reg().is(actual.reg())) { |
| 3970 // Both expected and actual are in (different) registers. This | 3970 // Both expected and actual are in (different) registers. This |
| 3971 // is the case when we invoke functions using call and apply. | 3971 // is the case when we invoke functions using call and apply. |
| 3972 cmpp(expected.reg(), actual.reg()); | 3972 cmpp(expected.reg(), actual.reg()); |
| 3973 j(equal, &invoke, Label::kNear); | 3973 j(equal, &invoke, Label::kNear); |
| 3974 ASSERT(actual.reg().is(rax)); | 3974 DCHECK(actual.reg().is(rax)); |
| 3975 ASSERT(expected.reg().is(rbx)); | 3975 DCHECK(expected.reg().is(rbx)); |
| 3976 } | 3976 } |
| 3977 } | 3977 } |
| 3978 | 3978 |
| 3979 if (!definitely_matches) { | 3979 if (!definitely_matches) { |
| 3980 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline(); | 3980 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline(); |
| 3981 if (!code_constant.is_null()) { | 3981 if (!code_constant.is_null()) { |
| 3982 Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT); | 3982 Move(rdx, code_constant, RelocInfo::EMBEDDED_OBJECT); |
| 3983 addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag)); | 3983 addp(rdx, Immediate(Code::kHeaderSize - kHeapObjectTag)); |
| 3984 } else if (!code_register.is(rdx)) { | 3984 } else if (!code_register.is(rdx)) { |
| 3985 movp(rdx, code_register); | 3985 movp(rdx, code_register); |
| (...skipping 63 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4049 Check(equal, kStackFrameTypesMustMatch); | 4049 Check(equal, kStackFrameTypesMustMatch); |
| 4050 } | 4050 } |
| 4051 movp(rsp, rbp); | 4051 movp(rsp, rbp); |
| 4052 popq(rbp); | 4052 popq(rbp); |
| 4053 } | 4053 } |
| 4054 | 4054 |
| 4055 | 4055 |
| 4056 void MacroAssembler::EnterExitFramePrologue(bool save_rax) { | 4056 void MacroAssembler::EnterExitFramePrologue(bool save_rax) { |
| 4057 // Set up the frame structure on the stack. | 4057 // Set up the frame structure on the stack. |
| 4058 // All constants are relative to the frame pointer of the exit frame. | 4058 // All constants are relative to the frame pointer of the exit frame. |
| 4059 ASSERT(ExitFrameConstants::kCallerSPDisplacement == | 4059 DCHECK(ExitFrameConstants::kCallerSPDisplacement == |
| 4060 kFPOnStackSize + kPCOnStackSize); | 4060 kFPOnStackSize + kPCOnStackSize); |
| 4061 ASSERT(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize); | 4061 DCHECK(ExitFrameConstants::kCallerPCOffset == kFPOnStackSize); |
| 4062 ASSERT(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); | 4062 DCHECK(ExitFrameConstants::kCallerFPOffset == 0 * kPointerSize); |
| 4063 pushq(rbp); | 4063 pushq(rbp); |
| 4064 movp(rbp, rsp); | 4064 movp(rbp, rsp); |
| 4065 | 4065 |
| 4066 // Reserve room for entry stack pointer and push the code object. | 4066 // Reserve room for entry stack pointer and push the code object. |
| 4067 ASSERT(ExitFrameConstants::kSPOffset == -1 * kPointerSize); | 4067 DCHECK(ExitFrameConstants::kSPOffset == -1 * kPointerSize); |
| 4068 Push(Immediate(0)); // Saved entry sp, patched before call. | 4068 Push(Immediate(0)); // Saved entry sp, patched before call. |
| 4069 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT); | 4069 Move(kScratchRegister, CodeObject(), RelocInfo::EMBEDDED_OBJECT); |
| 4070 Push(kScratchRegister); // Accessed from EditFrame::code_slot. | 4070 Push(kScratchRegister); // Accessed from EditFrame::code_slot. |
| 4071 | 4071 |
| 4072 // Save the frame pointer and the context in top. | 4072 // Save the frame pointer and the context in top. |
| 4073 if (save_rax) { | 4073 if (save_rax) { |
| 4074 movp(r14, rax); // Backup rax in callee-save register. | 4074 movp(r14, rax); // Backup rax in callee-save register. |
| 4075 } | 4075 } |
| 4076 | 4076 |
| 4077 Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp); | 4077 Store(ExternalReference(Isolate::kCEntryFPAddress, isolate()), rbp); |
| (...skipping 17 matching lines...) Expand all Loading... |
| 4095 XMMRegister reg = XMMRegister::FromAllocationIndex(i); | 4095 XMMRegister reg = XMMRegister::FromAllocationIndex(i); |
| 4096 movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg); | 4096 movsd(Operand(rbp, offset - ((i + 1) * kDoubleSize)), reg); |
| 4097 } | 4097 } |
| 4098 } else if (arg_stack_space > 0) { | 4098 } else if (arg_stack_space > 0) { |
| 4099 subp(rsp, Immediate(arg_stack_space * kRegisterSize)); | 4099 subp(rsp, Immediate(arg_stack_space * kRegisterSize)); |
| 4100 } | 4100 } |
| 4101 | 4101 |
| 4102 // Get the required frame alignment for the OS. | 4102 // Get the required frame alignment for the OS. |
| 4103 const int kFrameAlignment = base::OS::ActivationFrameAlignment(); | 4103 const int kFrameAlignment = base::OS::ActivationFrameAlignment(); |
| 4104 if (kFrameAlignment > 0) { | 4104 if (kFrameAlignment > 0) { |
| 4105 ASSERT(IsPowerOf2(kFrameAlignment)); | 4105 DCHECK(IsPowerOf2(kFrameAlignment)); |
| 4106 ASSERT(is_int8(kFrameAlignment)); | 4106 DCHECK(is_int8(kFrameAlignment)); |
| 4107 andp(rsp, Immediate(-kFrameAlignment)); | 4107 andp(rsp, Immediate(-kFrameAlignment)); |
| 4108 } | 4108 } |
| 4109 | 4109 |
| 4110 // Patch the saved entry sp. | 4110 // Patch the saved entry sp. |
| 4111 movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp); | 4111 movp(Operand(rbp, ExitFrameConstants::kSPOffset), rsp); |
| 4112 } | 4112 } |
| 4113 | 4113 |
| 4114 | 4114 |
| 4115 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) { | 4115 void MacroAssembler::EnterExitFrame(int arg_stack_space, bool save_doubles) { |
| 4116 EnterExitFramePrologue(true); | 4116 EnterExitFramePrologue(true); |
| (...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4179 Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address); | 4179 Operand c_entry_fp_operand = ExternalOperand(c_entry_fp_address); |
| 4180 movp(c_entry_fp_operand, Immediate(0)); | 4180 movp(c_entry_fp_operand, Immediate(0)); |
| 4181 } | 4181 } |
| 4182 | 4182 |
| 4183 | 4183 |
| 4184 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, | 4184 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, |
| 4185 Register scratch, | 4185 Register scratch, |
| 4186 Label* miss) { | 4186 Label* miss) { |
| 4187 Label same_contexts; | 4187 Label same_contexts; |
| 4188 | 4188 |
| 4189 ASSERT(!holder_reg.is(scratch)); | 4189 DCHECK(!holder_reg.is(scratch)); |
| 4190 ASSERT(!scratch.is(kScratchRegister)); | 4190 DCHECK(!scratch.is(kScratchRegister)); |
| 4191 // Load current lexical context from the stack frame. | 4191 // Load current lexical context from the stack frame. |
| 4192 movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset)); | 4192 movp(scratch, Operand(rbp, StandardFrameConstants::kContextOffset)); |
| 4193 | 4193 |
| 4194 // When generating debug code, make sure the lexical context is set. | 4194 // When generating debug code, make sure the lexical context is set. |
| 4195 if (emit_debug_code()) { | 4195 if (emit_debug_code()) { |
| 4196 cmpp(scratch, Immediate(0)); | 4196 cmpp(scratch, Immediate(0)); |
| 4197 Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext); | 4197 Check(not_equal, kWeShouldNotHaveAnEmptyLexicalContext); |
| 4198 } | 4198 } |
| 4199 // Load the native context of the current context. | 4199 // Load the native context of the current context. |
| 4200 int offset = | 4200 int offset = |
| (...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4326 for (int i = 0; i < kNumberDictionaryProbes; i++) { | 4326 for (int i = 0; i < kNumberDictionaryProbes; i++) { |
| 4327 // Use r2 for index calculations and keep the hash intact in r0. | 4327 // Use r2 for index calculations and keep the hash intact in r0. |
| 4328 movp(r2, r0); | 4328 movp(r2, r0); |
| 4329 // Compute the masked index: (hash + i + i * i) & mask. | 4329 // Compute the masked index: (hash + i + i * i) & mask. |
| 4330 if (i > 0) { | 4330 if (i > 0) { |
| 4331 addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i))); | 4331 addl(r2, Immediate(SeededNumberDictionary::GetProbeOffset(i))); |
| 4332 } | 4332 } |
| 4333 andp(r2, r1); | 4333 andp(r2, r1); |
| 4334 | 4334 |
| 4335 // Scale the index by multiplying by the entry size. | 4335 // Scale the index by multiplying by the entry size. |
| 4336 ASSERT(SeededNumberDictionary::kEntrySize == 3); | 4336 DCHECK(SeededNumberDictionary::kEntrySize == 3); |
| 4337 leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3 | 4337 leap(r2, Operand(r2, r2, times_2, 0)); // r2 = r2 * 3 |
| 4338 | 4338 |
| 4339 // Check if the key matches. | 4339 // Check if the key matches. |
| 4340 cmpp(key, FieldOperand(elements, | 4340 cmpp(key, FieldOperand(elements, |
| 4341 r2, | 4341 r2, |
| 4342 times_pointer_size, | 4342 times_pointer_size, |
| 4343 SeededNumberDictionary::kElementsStartOffset)); | 4343 SeededNumberDictionary::kElementsStartOffset)); |
| 4344 if (i != (kNumberDictionaryProbes - 1)) { | 4344 if (i != (kNumberDictionaryProbes - 1)) { |
| 4345 j(equal, &done); | 4345 j(equal, &done); |
| 4346 } else { | 4346 } else { |
| 4347 j(not_equal, miss); | 4347 j(not_equal, miss); |
| 4348 } | 4348 } |
| 4349 } | 4349 } |
| 4350 | 4350 |
| 4351 bind(&done); | 4351 bind(&done); |
| 4352 // Check that the value is a normal propety. | 4352 // Check that the value is a normal propety. |
| 4353 const int kDetailsOffset = | 4353 const int kDetailsOffset = |
| 4354 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; | 4354 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; |
| 4355 ASSERT_EQ(NORMAL, 0); | 4355 DCHECK_EQ(NORMAL, 0); |
| 4356 Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset), | 4356 Test(FieldOperand(elements, r2, times_pointer_size, kDetailsOffset), |
| 4357 Smi::FromInt(PropertyDetails::TypeField::kMask)); | 4357 Smi::FromInt(PropertyDetails::TypeField::kMask)); |
| 4358 j(not_zero, miss); | 4358 j(not_zero, miss); |
| 4359 | 4359 |
| 4360 // Get the value at the masked, scaled index. | 4360 // Get the value at the masked, scaled index. |
| 4361 const int kValueOffset = | 4361 const int kValueOffset = |
| 4362 SeededNumberDictionary::kElementsStartOffset + kPointerSize; | 4362 SeededNumberDictionary::kElementsStartOffset + kPointerSize; |
| 4363 movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset)); | 4363 movp(result, FieldOperand(elements, r2, times_pointer_size, kValueOffset)); |
| 4364 } | 4364 } |
| 4365 | 4365 |
| 4366 | 4366 |
| 4367 void MacroAssembler::LoadAllocationTopHelper(Register result, | 4367 void MacroAssembler::LoadAllocationTopHelper(Register result, |
| 4368 Register scratch, | 4368 Register scratch, |
| 4369 AllocationFlags flags) { | 4369 AllocationFlags flags) { |
| 4370 ExternalReference allocation_top = | 4370 ExternalReference allocation_top = |
| 4371 AllocationUtils::GetAllocationTopReference(isolate(), flags); | 4371 AllocationUtils::GetAllocationTopReference(isolate(), flags); |
| 4372 | 4372 |
| 4373 // Just return if allocation top is already known. | 4373 // Just return if allocation top is already known. |
| 4374 if ((flags & RESULT_CONTAINS_TOP) != 0) { | 4374 if ((flags & RESULT_CONTAINS_TOP) != 0) { |
| 4375 // No use of scratch if allocation top is provided. | 4375 // No use of scratch if allocation top is provided. |
| 4376 ASSERT(!scratch.is_valid()); | 4376 DCHECK(!scratch.is_valid()); |
| 4377 #ifdef DEBUG | 4377 #ifdef DEBUG |
| 4378 // Assert that result actually contains top on entry. | 4378 // Assert that result actually contains top on entry. |
| 4379 Operand top_operand = ExternalOperand(allocation_top); | 4379 Operand top_operand = ExternalOperand(allocation_top); |
| 4380 cmpp(result, top_operand); | 4380 cmpp(result, top_operand); |
| 4381 Check(equal, kUnexpectedAllocationTop); | 4381 Check(equal, kUnexpectedAllocationTop); |
| 4382 #endif | 4382 #endif |
| 4383 return; | 4383 return; |
| 4384 } | 4384 } |
| 4385 | 4385 |
| 4386 // Move address of new object to result. Use scratch register if available, | 4386 // Move address of new object to result. Use scratch register if available, |
| (...skipping 12 matching lines...) Expand all Loading... |
| 4399 Label* gc_required, | 4399 Label* gc_required, |
| 4400 AllocationFlags flags) { | 4400 AllocationFlags flags) { |
| 4401 if (kPointerSize == kDoubleSize) { | 4401 if (kPointerSize == kDoubleSize) { |
| 4402 if (FLAG_debug_code) { | 4402 if (FLAG_debug_code) { |
| 4403 testl(result, Immediate(kDoubleAlignmentMask)); | 4403 testl(result, Immediate(kDoubleAlignmentMask)); |
| 4404 Check(zero, kAllocationIsNotDoubleAligned); | 4404 Check(zero, kAllocationIsNotDoubleAligned); |
| 4405 } | 4405 } |
| 4406 } else { | 4406 } else { |
| 4407 // Align the next allocation. Storing the filler map without checking top | 4407 // Align the next allocation. Storing the filler map without checking top |
| 4408 // is safe in new-space because the limit of the heap is aligned there. | 4408 // is safe in new-space because the limit of the heap is aligned there. |
| 4409 ASSERT(kPointerSize * 2 == kDoubleSize); | 4409 DCHECK(kPointerSize * 2 == kDoubleSize); |
| 4410 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); | 4410 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0); |
| 4411 ASSERT(kPointerAlignment * 2 == kDoubleAlignment); | 4411 DCHECK(kPointerAlignment * 2 == kDoubleAlignment); |
| 4412 // Make sure scratch is not clobbered by this function as it might be | 4412 // Make sure scratch is not clobbered by this function as it might be |
| 4413 // used in UpdateAllocationTopHelper later. | 4413 // used in UpdateAllocationTopHelper later. |
| 4414 ASSERT(!scratch.is(kScratchRegister)); | 4414 DCHECK(!scratch.is(kScratchRegister)); |
| 4415 Label aligned; | 4415 Label aligned; |
| 4416 testl(result, Immediate(kDoubleAlignmentMask)); | 4416 testl(result, Immediate(kDoubleAlignmentMask)); |
| 4417 j(zero, &aligned, Label::kNear); | 4417 j(zero, &aligned, Label::kNear); |
| 4418 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { | 4418 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) { |
| 4419 ExternalReference allocation_limit = | 4419 ExternalReference allocation_limit = |
| 4420 AllocationUtils::GetAllocationLimitReference(isolate(), flags); | 4420 AllocationUtils::GetAllocationLimitReference(isolate(), flags); |
| 4421 cmpp(result, ExternalOperand(allocation_limit)); | 4421 cmpp(result, ExternalOperand(allocation_limit)); |
| 4422 j(above_equal, gc_required); | 4422 j(above_equal, gc_required); |
| 4423 } | 4423 } |
| 4424 LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex); | 4424 LoadRoot(kScratchRegister, Heap::kOnePointerFillerMapRootIndex); |
| (...skipping 24 matching lines...) Expand all Loading... |
| 4449 } | 4449 } |
| 4450 } | 4450 } |
| 4451 | 4451 |
| 4452 | 4452 |
| 4453 void MacroAssembler::Allocate(int object_size, | 4453 void MacroAssembler::Allocate(int object_size, |
| 4454 Register result, | 4454 Register result, |
| 4455 Register result_end, | 4455 Register result_end, |
| 4456 Register scratch, | 4456 Register scratch, |
| 4457 Label* gc_required, | 4457 Label* gc_required, |
| 4458 AllocationFlags flags) { | 4458 AllocationFlags flags) { |
| 4459 ASSERT((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); | 4459 DCHECK((flags & (RESULT_CONTAINS_TOP | SIZE_IN_WORDS)) == 0); |
| 4460 ASSERT(object_size <= Page::kMaxRegularHeapObjectSize); | 4460 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize); |
| 4461 if (!FLAG_inline_new) { | 4461 if (!FLAG_inline_new) { |
| 4462 if (emit_debug_code()) { | 4462 if (emit_debug_code()) { |
| 4463 // Trash the registers to simulate an allocation failure. | 4463 // Trash the registers to simulate an allocation failure. |
| 4464 movl(result, Immediate(0x7091)); | 4464 movl(result, Immediate(0x7091)); |
| 4465 if (result_end.is_valid()) { | 4465 if (result_end.is_valid()) { |
| 4466 movl(result_end, Immediate(0x7191)); | 4466 movl(result_end, Immediate(0x7191)); |
| 4467 } | 4467 } |
| 4468 if (scratch.is_valid()) { | 4468 if (scratch.is_valid()) { |
| 4469 movl(scratch, Immediate(0x7291)); | 4469 movl(scratch, Immediate(0x7291)); |
| 4470 } | 4470 } |
| 4471 } | 4471 } |
| 4472 jmp(gc_required); | 4472 jmp(gc_required); |
| 4473 return; | 4473 return; |
| 4474 } | 4474 } |
| 4475 ASSERT(!result.is(result_end)); | 4475 DCHECK(!result.is(result_end)); |
| 4476 | 4476 |
| 4477 // Load address of new object into result. | 4477 // Load address of new object into result. |
| 4478 LoadAllocationTopHelper(result, scratch, flags); | 4478 LoadAllocationTopHelper(result, scratch, flags); |
| 4479 | 4479 |
| 4480 if ((flags & DOUBLE_ALIGNMENT) != 0) { | 4480 if ((flags & DOUBLE_ALIGNMENT) != 0) { |
| 4481 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags); | 4481 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags); |
| 4482 } | 4482 } |
| 4483 | 4483 |
| 4484 // Calculate new top and bail out if new space is exhausted. | 4484 // Calculate new top and bail out if new space is exhausted. |
| 4485 ExternalReference allocation_limit = | 4485 ExternalReference allocation_limit = |
| (...skipping 15 matching lines...) Expand all Loading... |
| 4501 | 4501 |
| 4502 bool tag_result = (flags & TAG_OBJECT) != 0; | 4502 bool tag_result = (flags & TAG_OBJECT) != 0; |
| 4503 if (top_reg.is(result)) { | 4503 if (top_reg.is(result)) { |
| 4504 if (tag_result) { | 4504 if (tag_result) { |
| 4505 subp(result, Immediate(object_size - kHeapObjectTag)); | 4505 subp(result, Immediate(object_size - kHeapObjectTag)); |
| 4506 } else { | 4506 } else { |
| 4507 subp(result, Immediate(object_size)); | 4507 subp(result, Immediate(object_size)); |
| 4508 } | 4508 } |
| 4509 } else if (tag_result) { | 4509 } else if (tag_result) { |
| 4510 // Tag the result if requested. | 4510 // Tag the result if requested. |
| 4511 ASSERT(kHeapObjectTag == 1); | 4511 DCHECK(kHeapObjectTag == 1); |
| 4512 incp(result); | 4512 incp(result); |
| 4513 } | 4513 } |
| 4514 } | 4514 } |
| 4515 | 4515 |
| 4516 | 4516 |
| 4517 void MacroAssembler::Allocate(int header_size, | 4517 void MacroAssembler::Allocate(int header_size, |
| 4518 ScaleFactor element_size, | 4518 ScaleFactor element_size, |
| 4519 Register element_count, | 4519 Register element_count, |
| 4520 Register result, | 4520 Register result, |
| 4521 Register result_end, | 4521 Register result_end, |
| 4522 Register scratch, | 4522 Register scratch, |
| 4523 Label* gc_required, | 4523 Label* gc_required, |
| 4524 AllocationFlags flags) { | 4524 AllocationFlags flags) { |
| 4525 ASSERT((flags & SIZE_IN_WORDS) == 0); | 4525 DCHECK((flags & SIZE_IN_WORDS) == 0); |
| 4526 leap(result_end, Operand(element_count, element_size, header_size)); | 4526 leap(result_end, Operand(element_count, element_size, header_size)); |
| 4527 Allocate(result_end, result, result_end, scratch, gc_required, flags); | 4527 Allocate(result_end, result, result_end, scratch, gc_required, flags); |
| 4528 } | 4528 } |
| 4529 | 4529 |
| 4530 | 4530 |
| 4531 void MacroAssembler::Allocate(Register object_size, | 4531 void MacroAssembler::Allocate(Register object_size, |
| 4532 Register result, | 4532 Register result, |
| 4533 Register result_end, | 4533 Register result_end, |
| 4534 Register scratch, | 4534 Register scratch, |
| 4535 Label* gc_required, | 4535 Label* gc_required, |
| 4536 AllocationFlags flags) { | 4536 AllocationFlags flags) { |
| 4537 ASSERT((flags & SIZE_IN_WORDS) == 0); | 4537 DCHECK((flags & SIZE_IN_WORDS) == 0); |
| 4538 if (!FLAG_inline_new) { | 4538 if (!FLAG_inline_new) { |
| 4539 if (emit_debug_code()) { | 4539 if (emit_debug_code()) { |
| 4540 // Trash the registers to simulate an allocation failure. | 4540 // Trash the registers to simulate an allocation failure. |
| 4541 movl(result, Immediate(0x7091)); | 4541 movl(result, Immediate(0x7091)); |
| 4542 movl(result_end, Immediate(0x7191)); | 4542 movl(result_end, Immediate(0x7191)); |
| 4543 if (scratch.is_valid()) { | 4543 if (scratch.is_valid()) { |
| 4544 movl(scratch, Immediate(0x7291)); | 4544 movl(scratch, Immediate(0x7291)); |
| 4545 } | 4545 } |
| 4546 // object_size is left unchanged by this function. | 4546 // object_size is left unchanged by this function. |
| 4547 } | 4547 } |
| 4548 jmp(gc_required); | 4548 jmp(gc_required); |
| 4549 return; | 4549 return; |
| 4550 } | 4550 } |
| 4551 ASSERT(!result.is(result_end)); | 4551 DCHECK(!result.is(result_end)); |
| 4552 | 4552 |
| 4553 // Load address of new object into result. | 4553 // Load address of new object into result. |
| 4554 LoadAllocationTopHelper(result, scratch, flags); | 4554 LoadAllocationTopHelper(result, scratch, flags); |
| 4555 | 4555 |
| 4556 if ((flags & DOUBLE_ALIGNMENT) != 0) { | 4556 if ((flags & DOUBLE_ALIGNMENT) != 0) { |
| 4557 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags); | 4557 MakeSureDoubleAlignedHelper(result, scratch, gc_required, flags); |
| 4558 } | 4558 } |
| 4559 | 4559 |
| 4560 // Calculate new top and bail out if new space is exhausted. | 4560 // Calculate new top and bail out if new space is exhausted. |
| 4561 ExternalReference allocation_limit = | 4561 ExternalReference allocation_limit = |
| (...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4614 void MacroAssembler::AllocateTwoByteString(Register result, | 4614 void MacroAssembler::AllocateTwoByteString(Register result, |
| 4615 Register length, | 4615 Register length, |
| 4616 Register scratch1, | 4616 Register scratch1, |
| 4617 Register scratch2, | 4617 Register scratch2, |
| 4618 Register scratch3, | 4618 Register scratch3, |
| 4619 Label* gc_required) { | 4619 Label* gc_required) { |
| 4620 // Calculate the number of bytes needed for the characters in the string while | 4620 // Calculate the number of bytes needed for the characters in the string while |
| 4621 // observing object alignment. | 4621 // observing object alignment. |
| 4622 const int kHeaderAlignment = SeqTwoByteString::kHeaderSize & | 4622 const int kHeaderAlignment = SeqTwoByteString::kHeaderSize & |
| 4623 kObjectAlignmentMask; | 4623 kObjectAlignmentMask; |
| 4624 ASSERT(kShortSize == 2); | 4624 DCHECK(kShortSize == 2); |
| 4625 // scratch1 = length * 2 + kObjectAlignmentMask. | 4625 // scratch1 = length * 2 + kObjectAlignmentMask. |
| 4626 leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask + | 4626 leap(scratch1, Operand(length, length, times_1, kObjectAlignmentMask + |
| 4627 kHeaderAlignment)); | 4627 kHeaderAlignment)); |
| 4628 andp(scratch1, Immediate(~kObjectAlignmentMask)); | 4628 andp(scratch1, Immediate(~kObjectAlignmentMask)); |
| 4629 if (kHeaderAlignment > 0) { | 4629 if (kHeaderAlignment > 0) { |
| 4630 subp(scratch1, Immediate(kHeaderAlignment)); | 4630 subp(scratch1, Immediate(kHeaderAlignment)); |
| 4631 } | 4631 } |
| 4632 | 4632 |
| 4633 // Allocate two byte string in new space. | 4633 // Allocate two byte string in new space. |
| 4634 Allocate(SeqTwoByteString::kHeaderSize, | 4634 Allocate(SeqTwoByteString::kHeaderSize, |
| (...skipping 19 matching lines...) Expand all Loading... |
| 4654 Register length, | 4654 Register length, |
| 4655 Register scratch1, | 4655 Register scratch1, |
| 4656 Register scratch2, | 4656 Register scratch2, |
| 4657 Register scratch3, | 4657 Register scratch3, |
| 4658 Label* gc_required) { | 4658 Label* gc_required) { |
| 4659 // Calculate the number of bytes needed for the characters in the string while | 4659 // Calculate the number of bytes needed for the characters in the string while |
| 4660 // observing object alignment. | 4660 // observing object alignment. |
| 4661 const int kHeaderAlignment = SeqOneByteString::kHeaderSize & | 4661 const int kHeaderAlignment = SeqOneByteString::kHeaderSize & |
| 4662 kObjectAlignmentMask; | 4662 kObjectAlignmentMask; |
| 4663 movl(scratch1, length); | 4663 movl(scratch1, length); |
| 4664 ASSERT(kCharSize == 1); | 4664 DCHECK(kCharSize == 1); |
| 4665 addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment)); | 4665 addp(scratch1, Immediate(kObjectAlignmentMask + kHeaderAlignment)); |
| 4666 andp(scratch1, Immediate(~kObjectAlignmentMask)); | 4666 andp(scratch1, Immediate(~kObjectAlignmentMask)); |
| 4667 if (kHeaderAlignment > 0) { | 4667 if (kHeaderAlignment > 0) { |
| 4668 subp(scratch1, Immediate(kHeaderAlignment)); | 4668 subp(scratch1, Immediate(kHeaderAlignment)); |
| 4669 } | 4669 } |
| 4670 | 4670 |
| 4671 // Allocate ASCII string in new space. | 4671 // Allocate ASCII string in new space. |
| 4672 Allocate(SeqOneByteString::kHeaderSize, | 4672 Allocate(SeqOneByteString::kHeaderSize, |
| 4673 times_1, | 4673 times_1, |
| 4674 scratch1, | 4674 scratch1, |
| (...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4752 // Destination is incremented by length, source, length and scratch are | 4752 // Destination is incremented by length, source, length and scratch are |
| 4753 // clobbered. | 4753 // clobbered. |
| 4754 // A simpler loop is faster on small copies, but slower on large ones. | 4754 // A simpler loop is faster on small copies, but slower on large ones. |
| 4755 // The cld() instruction must have been emitted, to set the direction flag(), | 4755 // The cld() instruction must have been emitted, to set the direction flag(), |
| 4756 // before calling this function. | 4756 // before calling this function. |
| 4757 void MacroAssembler::CopyBytes(Register destination, | 4757 void MacroAssembler::CopyBytes(Register destination, |
| 4758 Register source, | 4758 Register source, |
| 4759 Register length, | 4759 Register length, |
| 4760 int min_length, | 4760 int min_length, |
| 4761 Register scratch) { | 4761 Register scratch) { |
| 4762 ASSERT(min_length >= 0); | 4762 DCHECK(min_length >= 0); |
| 4763 if (emit_debug_code()) { | 4763 if (emit_debug_code()) { |
| 4764 cmpl(length, Immediate(min_length)); | 4764 cmpl(length, Immediate(min_length)); |
| 4765 Assert(greater_equal, kInvalidMinLength); | 4765 Assert(greater_equal, kInvalidMinLength); |
| 4766 } | 4766 } |
| 4767 Label short_loop, len8, len16, len24, done, short_string; | 4767 Label short_loop, len8, len16, len24, done, short_string; |
| 4768 | 4768 |
| 4769 const int kLongStringLimit = 4 * kPointerSize; | 4769 const int kLongStringLimit = 4 * kPointerSize; |
| 4770 if (min_length <= kLongStringLimit) { | 4770 if (min_length <= kLongStringLimit) { |
| 4771 cmpl(length, Immediate(kPointerSize)); | 4771 cmpl(length, Immediate(kPointerSize)); |
| 4772 j(below, &short_string, Label::kNear); | 4772 j(below, &short_string, Label::kNear); |
| 4773 } | 4773 } |
| 4774 | 4774 |
| 4775 ASSERT(source.is(rsi)); | 4775 DCHECK(source.is(rsi)); |
| 4776 ASSERT(destination.is(rdi)); | 4776 DCHECK(destination.is(rdi)); |
| 4777 ASSERT(length.is(rcx)); | 4777 DCHECK(length.is(rcx)); |
| 4778 | 4778 |
| 4779 if (min_length <= kLongStringLimit) { | 4779 if (min_length <= kLongStringLimit) { |
| 4780 cmpl(length, Immediate(2 * kPointerSize)); | 4780 cmpl(length, Immediate(2 * kPointerSize)); |
| 4781 j(below_equal, &len8, Label::kNear); | 4781 j(below_equal, &len8, Label::kNear); |
| 4782 cmpl(length, Immediate(3 * kPointerSize)); | 4782 cmpl(length, Immediate(3 * kPointerSize)); |
| 4783 j(below_equal, &len16, Label::kNear); | 4783 j(below_equal, &len16, Label::kNear); |
| 4784 cmpl(length, Immediate(4 * kPointerSize)); | 4784 cmpl(length, Immediate(4 * kPointerSize)); |
| 4785 j(below_equal, &len24, Label::kNear); | 4785 j(below_equal, &len24, Label::kNear); |
| 4786 } | 4786 } |
| 4787 | 4787 |
| (...skipping 144 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4932 } | 4932 } |
| 4933 | 4933 |
| 4934 | 4934 |
| 4935 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) { | 4935 int MacroAssembler::ArgumentStackSlotsForCFunctionCall(int num_arguments) { |
| 4936 // On Windows 64 stack slots are reserved by the caller for all arguments | 4936 // On Windows 64 stack slots are reserved by the caller for all arguments |
| 4937 // including the ones passed in registers, and space is always allocated for | 4937 // including the ones passed in registers, and space is always allocated for |
| 4938 // the four register arguments even if the function takes fewer than four | 4938 // the four register arguments even if the function takes fewer than four |
| 4939 // arguments. | 4939 // arguments. |
| 4940 // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers | 4940 // On AMD64 ABI (Linux/Mac) the first six arguments are passed in registers |
| 4941 // and the caller does not reserve stack slots for them. | 4941 // and the caller does not reserve stack slots for them. |
| 4942 ASSERT(num_arguments >= 0); | 4942 DCHECK(num_arguments >= 0); |
| 4943 #ifdef _WIN64 | 4943 #ifdef _WIN64 |
| 4944 const int kMinimumStackSlots = kRegisterPassedArguments; | 4944 const int kMinimumStackSlots = kRegisterPassedArguments; |
| 4945 if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots; | 4945 if (num_arguments < kMinimumStackSlots) return kMinimumStackSlots; |
| 4946 return num_arguments; | 4946 return num_arguments; |
| 4947 #else | 4947 #else |
| 4948 if (num_arguments < kRegisterPassedArguments) return 0; | 4948 if (num_arguments < kRegisterPassedArguments) return 0; |
| 4949 return num_arguments - kRegisterPassedArguments; | 4949 return num_arguments - kRegisterPassedArguments; |
| 4950 #endif | 4950 #endif |
| 4951 } | 4951 } |
| 4952 | 4952 |
| (...skipping 26 matching lines...) Expand all Loading... |
| 4979 SmiCompare(index, Smi::FromInt(0)); | 4979 SmiCompare(index, Smi::FromInt(0)); |
| 4980 Check(greater_equal, kIndexIsNegative); | 4980 Check(greater_equal, kIndexIsNegative); |
| 4981 | 4981 |
| 4982 // Restore the index | 4982 // Restore the index |
| 4983 SmiToInteger32(index, index); | 4983 SmiToInteger32(index, index); |
| 4984 } | 4984 } |
| 4985 | 4985 |
| 4986 | 4986 |
| 4987 void MacroAssembler::PrepareCallCFunction(int num_arguments) { | 4987 void MacroAssembler::PrepareCallCFunction(int num_arguments) { |
| 4988 int frame_alignment = base::OS::ActivationFrameAlignment(); | 4988 int frame_alignment = base::OS::ActivationFrameAlignment(); |
| 4989 ASSERT(frame_alignment != 0); | 4989 DCHECK(frame_alignment != 0); |
| 4990 ASSERT(num_arguments >= 0); | 4990 DCHECK(num_arguments >= 0); |
| 4991 | 4991 |
| 4992 // Make stack end at alignment and allocate space for arguments and old rsp. | 4992 // Make stack end at alignment and allocate space for arguments and old rsp. |
| 4993 movp(kScratchRegister, rsp); | 4993 movp(kScratchRegister, rsp); |
| 4994 ASSERT(IsPowerOf2(frame_alignment)); | 4994 DCHECK(IsPowerOf2(frame_alignment)); |
| 4995 int argument_slots_on_stack = | 4995 int argument_slots_on_stack = |
| 4996 ArgumentStackSlotsForCFunctionCall(num_arguments); | 4996 ArgumentStackSlotsForCFunctionCall(num_arguments); |
| 4997 subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize)); | 4997 subp(rsp, Immediate((argument_slots_on_stack + 1) * kRegisterSize)); |
| 4998 andp(rsp, Immediate(-frame_alignment)); | 4998 andp(rsp, Immediate(-frame_alignment)); |
| 4999 movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister); | 4999 movp(Operand(rsp, argument_slots_on_stack * kRegisterSize), kScratchRegister); |
| 5000 } | 5000 } |
| 5001 | 5001 |
| 5002 | 5002 |
| 5003 void MacroAssembler::CallCFunction(ExternalReference function, | 5003 void MacroAssembler::CallCFunction(ExternalReference function, |
| 5004 int num_arguments) { | 5004 int num_arguments) { |
| 5005 LoadAddress(rax, function); | 5005 LoadAddress(rax, function); |
| 5006 CallCFunction(rax, num_arguments); | 5006 CallCFunction(rax, num_arguments); |
| 5007 } | 5007 } |
| 5008 | 5008 |
| 5009 | 5009 |
| 5010 void MacroAssembler::CallCFunction(Register function, int num_arguments) { | 5010 void MacroAssembler::CallCFunction(Register function, int num_arguments) { |
| 5011 ASSERT(has_frame()); | 5011 DCHECK(has_frame()); |
| 5012 // Check stack alignment. | 5012 // Check stack alignment. |
| 5013 if (emit_debug_code()) { | 5013 if (emit_debug_code()) { |
| 5014 CheckStackAlignment(); | 5014 CheckStackAlignment(); |
| 5015 } | 5015 } |
| 5016 | 5016 |
| 5017 call(function); | 5017 call(function); |
| 5018 ASSERT(base::OS::ActivationFrameAlignment() != 0); | 5018 DCHECK(base::OS::ActivationFrameAlignment() != 0); |
| 5019 ASSERT(num_arguments >= 0); | 5019 DCHECK(num_arguments >= 0); |
| 5020 int argument_slots_on_stack = | 5020 int argument_slots_on_stack = |
| 5021 ArgumentStackSlotsForCFunctionCall(num_arguments); | 5021 ArgumentStackSlotsForCFunctionCall(num_arguments); |
| 5022 movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize)); | 5022 movp(rsp, Operand(rsp, argument_slots_on_stack * kRegisterSize)); |
| 5023 } | 5023 } |
| 5024 | 5024 |
| 5025 | 5025 |
| 5026 #ifdef DEBUG | 5026 #ifdef DEBUG |
| 5027 bool AreAliased(Register reg1, | 5027 bool AreAliased(Register reg1, |
| 5028 Register reg2, | 5028 Register reg2, |
| 5029 Register reg3, | 5029 Register reg3, |
| (...skipping 22 matching lines...) Expand all Loading... |
| 5052 #endif | 5052 #endif |
| 5053 | 5053 |
| 5054 | 5054 |
| 5055 CodePatcher::CodePatcher(byte* address, int size) | 5055 CodePatcher::CodePatcher(byte* address, int size) |
| 5056 : address_(address), | 5056 : address_(address), |
| 5057 size_(size), | 5057 size_(size), |
| 5058 masm_(NULL, address, size + Assembler::kGap) { | 5058 masm_(NULL, address, size + Assembler::kGap) { |
| 5059 // Create a new macro assembler pointing to the address of the code to patch. | 5059 // Create a new macro assembler pointing to the address of the code to patch. |
| 5060 // The size is adjusted with kGap on order for the assembler to generate size | 5060 // The size is adjusted with kGap on order for the assembler to generate size |
| 5061 // bytes of instructions without failing with buffer size constraints. | 5061 // bytes of instructions without failing with buffer size constraints. |
| 5062 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 5062 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
| 5063 } | 5063 } |
| 5064 | 5064 |
| 5065 | 5065 |
| 5066 CodePatcher::~CodePatcher() { | 5066 CodePatcher::~CodePatcher() { |
| 5067 // Indicate that code has changed. | 5067 // Indicate that code has changed. |
| 5068 CpuFeatures::FlushICache(address_, size_); | 5068 CpuFeatures::FlushICache(address_, size_); |
| 5069 | 5069 |
| 5070 // Check that the code was patched as expected. | 5070 // Check that the code was patched as expected. |
| 5071 ASSERT(masm_.pc_ == address_ + size_); | 5071 DCHECK(masm_.pc_ == address_ + size_); |
| 5072 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); | 5072 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); |
| 5073 } | 5073 } |
| 5074 | 5074 |
| 5075 | 5075 |
| 5076 void MacroAssembler::CheckPageFlag( | 5076 void MacroAssembler::CheckPageFlag( |
| 5077 Register object, | 5077 Register object, |
| 5078 Register scratch, | 5078 Register scratch, |
| 5079 int mask, | 5079 int mask, |
| 5080 Condition cc, | 5080 Condition cc, |
| 5081 Label* condition_met, | 5081 Label* condition_met, |
| 5082 Label::Distance condition_met_distance) { | 5082 Label::Distance condition_met_distance) { |
| 5083 ASSERT(cc == zero || cc == not_zero); | 5083 DCHECK(cc == zero || cc == not_zero); |
| 5084 if (scratch.is(object)) { | 5084 if (scratch.is(object)) { |
| 5085 andp(scratch, Immediate(~Page::kPageAlignmentMask)); | 5085 andp(scratch, Immediate(~Page::kPageAlignmentMask)); |
| 5086 } else { | 5086 } else { |
| 5087 movp(scratch, Immediate(~Page::kPageAlignmentMask)); | 5087 movp(scratch, Immediate(~Page::kPageAlignmentMask)); |
| 5088 andp(scratch, object); | 5088 andp(scratch, object); |
| 5089 } | 5089 } |
| 5090 if (mask < (1 << kBitsPerByte)) { | 5090 if (mask < (1 << kBitsPerByte)) { |
| 5091 testb(Operand(scratch, MemoryChunk::kFlagsOffset), | 5091 testb(Operand(scratch, MemoryChunk::kFlagsOffset), |
| 5092 Immediate(static_cast<uint8_t>(mask))); | 5092 Immediate(static_cast<uint8_t>(mask))); |
| 5093 } else { | 5093 } else { |
| (...skipping 13 matching lines...) Expand all Loading... |
| 5107 j(not_zero, if_deprecated); | 5107 j(not_zero, if_deprecated); |
| 5108 } | 5108 } |
| 5109 } | 5109 } |
| 5110 | 5110 |
| 5111 | 5111 |
| 5112 void MacroAssembler::JumpIfBlack(Register object, | 5112 void MacroAssembler::JumpIfBlack(Register object, |
| 5113 Register bitmap_scratch, | 5113 Register bitmap_scratch, |
| 5114 Register mask_scratch, | 5114 Register mask_scratch, |
| 5115 Label* on_black, | 5115 Label* on_black, |
| 5116 Label::Distance on_black_distance) { | 5116 Label::Distance on_black_distance) { |
| 5117 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, rcx)); | 5117 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, rcx)); |
| 5118 GetMarkBits(object, bitmap_scratch, mask_scratch); | 5118 GetMarkBits(object, bitmap_scratch, mask_scratch); |
| 5119 | 5119 |
| 5120 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); | 5120 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); |
| 5121 // The mask_scratch register contains a 1 at the position of the first bit | 5121 // The mask_scratch register contains a 1 at the position of the first bit |
| 5122 // and a 0 at all other positions, including the position of the second bit. | 5122 // and a 0 at all other positions, including the position of the second bit. |
| 5123 movp(rcx, mask_scratch); | 5123 movp(rcx, mask_scratch); |
| 5124 // Make rcx into a mask that covers both marking bits using the operation | 5124 // Make rcx into a mask that covers both marking bits using the operation |
| 5125 // rcx = mask | (mask << 1). | 5125 // rcx = mask | (mask << 1). |
| 5126 leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0)); | 5126 leap(rcx, Operand(mask_scratch, mask_scratch, times_2, 0)); |
| 5127 // Note that we are using a 4-byte aligned 8-byte load. | 5127 // Note that we are using a 4-byte aligned 8-byte load. |
| 5128 andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 5128 andp(rcx, Operand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
| 5129 cmpp(mask_scratch, rcx); | 5129 cmpp(mask_scratch, rcx); |
| 5130 j(equal, on_black, on_black_distance); | 5130 j(equal, on_black, on_black_distance); |
| 5131 } | 5131 } |
| 5132 | 5132 |
| 5133 | 5133 |
| 5134 // Detect some, but not all, common pointer-free objects. This is used by the | 5134 // Detect some, but not all, common pointer-free objects. This is used by the |
| 5135 // incremental write barrier which doesn't care about oddballs (they are always | 5135 // incremental write barrier which doesn't care about oddballs (they are always |
| 5136 // marked black immediately so this code is not hit). | 5136 // marked black immediately so this code is not hit). |
| 5137 void MacroAssembler::JumpIfDataObject( | 5137 void MacroAssembler::JumpIfDataObject( |
| 5138 Register value, | 5138 Register value, |
| 5139 Register scratch, | 5139 Register scratch, |
| 5140 Label* not_data_object, | 5140 Label* not_data_object, |
| 5141 Label::Distance not_data_object_distance) { | 5141 Label::Distance not_data_object_distance) { |
| 5142 Label is_data_object; | 5142 Label is_data_object; |
| 5143 movp(scratch, FieldOperand(value, HeapObject::kMapOffset)); | 5143 movp(scratch, FieldOperand(value, HeapObject::kMapOffset)); |
| 5144 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); | 5144 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex); |
| 5145 j(equal, &is_data_object, Label::kNear); | 5145 j(equal, &is_data_object, Label::kNear); |
| 5146 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); | 5146 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); |
| 5147 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); | 5147 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); |
| 5148 // If it's a string and it's not a cons string then it's an object containing | 5148 // If it's a string and it's not a cons string then it's an object containing |
| 5149 // no GC pointers. | 5149 // no GC pointers. |
| 5150 testb(FieldOperand(scratch, Map::kInstanceTypeOffset), | 5150 testb(FieldOperand(scratch, Map::kInstanceTypeOffset), |
| 5151 Immediate(kIsIndirectStringMask | kIsNotStringMask)); | 5151 Immediate(kIsIndirectStringMask | kIsNotStringMask)); |
| 5152 j(not_zero, not_data_object, not_data_object_distance); | 5152 j(not_zero, not_data_object, not_data_object_distance); |
| 5153 bind(&is_data_object); | 5153 bind(&is_data_object); |
| 5154 } | 5154 } |
| 5155 | 5155 |
| 5156 | 5156 |
| 5157 void MacroAssembler::GetMarkBits(Register addr_reg, | 5157 void MacroAssembler::GetMarkBits(Register addr_reg, |
| 5158 Register bitmap_reg, | 5158 Register bitmap_reg, |
| 5159 Register mask_reg) { | 5159 Register mask_reg) { |
| 5160 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx)); | 5160 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, rcx)); |
| 5161 movp(bitmap_reg, addr_reg); | 5161 movp(bitmap_reg, addr_reg); |
| 5162 // Sign extended 32 bit immediate. | 5162 // Sign extended 32 bit immediate. |
| 5163 andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask)); | 5163 andp(bitmap_reg, Immediate(~Page::kPageAlignmentMask)); |
| 5164 movp(rcx, addr_reg); | 5164 movp(rcx, addr_reg); |
| 5165 int shift = | 5165 int shift = |
| 5166 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2; | 5166 Bitmap::kBitsPerCellLog2 + kPointerSizeLog2 - Bitmap::kBytesPerCellLog2; |
| 5167 shrl(rcx, Immediate(shift)); | 5167 shrl(rcx, Immediate(shift)); |
| 5168 andp(rcx, | 5168 andp(rcx, |
| 5169 Immediate((Page::kPageAlignmentMask >> shift) & | 5169 Immediate((Page::kPageAlignmentMask >> shift) & |
| 5170 ~(Bitmap::kBytesPerCell - 1))); | 5170 ~(Bitmap::kBytesPerCell - 1))); |
| 5171 | 5171 |
| 5172 addp(bitmap_reg, rcx); | 5172 addp(bitmap_reg, rcx); |
| 5173 movp(rcx, addr_reg); | 5173 movp(rcx, addr_reg); |
| 5174 shrl(rcx, Immediate(kPointerSizeLog2)); | 5174 shrl(rcx, Immediate(kPointerSizeLog2)); |
| 5175 andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1)); | 5175 andp(rcx, Immediate((1 << Bitmap::kBitsPerCellLog2) - 1)); |
| 5176 movl(mask_reg, Immediate(1)); | 5176 movl(mask_reg, Immediate(1)); |
| 5177 shlp_cl(mask_reg); | 5177 shlp_cl(mask_reg); |
| 5178 } | 5178 } |
| 5179 | 5179 |
| 5180 | 5180 |
| 5181 void MacroAssembler::EnsureNotWhite( | 5181 void MacroAssembler::EnsureNotWhite( |
| 5182 Register value, | 5182 Register value, |
| 5183 Register bitmap_scratch, | 5183 Register bitmap_scratch, |
| 5184 Register mask_scratch, | 5184 Register mask_scratch, |
| 5185 Label* value_is_white_and_not_data, | 5185 Label* value_is_white_and_not_data, |
| 5186 Label::Distance distance) { | 5186 Label::Distance distance) { |
| 5187 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, rcx)); | 5187 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, rcx)); |
| 5188 GetMarkBits(value, bitmap_scratch, mask_scratch); | 5188 GetMarkBits(value, bitmap_scratch, mask_scratch); |
| 5189 | 5189 |
| 5190 // If the value is black or grey we don't need to do anything. | 5190 // If the value is black or grey we don't need to do anything. |
| 5191 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); | 5191 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0); |
| 5192 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); | 5192 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); |
| 5193 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); | 5193 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0); |
| 5194 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); | 5194 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
| 5195 | 5195 |
| 5196 Label done; | 5196 Label done; |
| 5197 | 5197 |
| 5198 // Since both black and grey have a 1 in the first position and white does | 5198 // Since both black and grey have a 1 in the first position and white does |
| 5199 // not have a 1 there we only need to check one bit. | 5199 // not have a 1 there we only need to check one bit. |
| 5200 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); | 5200 testp(Operand(bitmap_scratch, MemoryChunk::kHeaderSize), mask_scratch); |
| 5201 j(not_zero, &done, Label::kNear); | 5201 j(not_zero, &done, Label::kNear); |
| 5202 | 5202 |
| 5203 if (emit_debug_code()) { | 5203 if (emit_debug_code()) { |
| 5204 // Check for impossible bit pattern. | 5204 // Check for impossible bit pattern. |
| (...skipping 17 matching lines...) Expand all Loading... |
| 5222 | 5222 |
| 5223 // Check for heap-number | 5223 // Check for heap-number |
| 5224 movp(map, FieldOperand(value, HeapObject::kMapOffset)); | 5224 movp(map, FieldOperand(value, HeapObject::kMapOffset)); |
| 5225 CompareRoot(map, Heap::kHeapNumberMapRootIndex); | 5225 CompareRoot(map, Heap::kHeapNumberMapRootIndex); |
| 5226 j(not_equal, ¬_heap_number, Label::kNear); | 5226 j(not_equal, ¬_heap_number, Label::kNear); |
| 5227 movp(length, Immediate(HeapNumber::kSize)); | 5227 movp(length, Immediate(HeapNumber::kSize)); |
| 5228 jmp(&is_data_object, Label::kNear); | 5228 jmp(&is_data_object, Label::kNear); |
| 5229 | 5229 |
| 5230 bind(¬_heap_number); | 5230 bind(¬_heap_number); |
| 5231 // Check for strings. | 5231 // Check for strings. |
| 5232 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); | 5232 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); |
| 5233 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); | 5233 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); |
| 5234 // If it's a string and it's not a cons string then it's an object containing | 5234 // If it's a string and it's not a cons string then it's an object containing |
| 5235 // no GC pointers. | 5235 // no GC pointers. |
| 5236 Register instance_type = rcx; | 5236 Register instance_type = rcx; |
| 5237 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset)); | 5237 movzxbl(instance_type, FieldOperand(map, Map::kInstanceTypeOffset)); |
| 5238 testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask)); | 5238 testb(instance_type, Immediate(kIsIndirectStringMask | kIsNotStringMask)); |
| 5239 j(not_zero, value_is_white_and_not_data); | 5239 j(not_zero, value_is_white_and_not_data); |
| 5240 // It's a non-indirect (non-cons and non-slice) string. | 5240 // It's a non-indirect (non-cons and non-slice) string. |
| 5241 // If it's external, the length is just ExternalString::kSize. | 5241 // If it's external, the length is just ExternalString::kSize. |
| 5242 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). | 5242 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2). |
| 5243 Label not_external; | 5243 Label not_external; |
| 5244 // External strings are the only ones with the kExternalStringTag bit | 5244 // External strings are the only ones with the kExternalStringTag bit |
| 5245 // set. | 5245 // set. |
| 5246 ASSERT_EQ(0, kSeqStringTag & kExternalStringTag); | 5246 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag); |
| 5247 ASSERT_EQ(0, kConsStringTag & kExternalStringTag); | 5247 DCHECK_EQ(0, kConsStringTag & kExternalStringTag); |
| 5248 testb(instance_type, Immediate(kExternalStringTag)); | 5248 testb(instance_type, Immediate(kExternalStringTag)); |
| 5249 j(zero, ¬_external, Label::kNear); | 5249 j(zero, ¬_external, Label::kNear); |
| 5250 movp(length, Immediate(ExternalString::kSize)); | 5250 movp(length, Immediate(ExternalString::kSize)); |
| 5251 jmp(&is_data_object, Label::kNear); | 5251 jmp(&is_data_object, Label::kNear); |
| 5252 | 5252 |
| 5253 bind(¬_external); | 5253 bind(¬_external); |
| 5254 // Sequential string, either ASCII or UC16. | 5254 // Sequential string, either ASCII or UC16. |
| 5255 ASSERT(kOneByteStringTag == 0x04); | 5255 DCHECK(kOneByteStringTag == 0x04); |
| 5256 andp(length, Immediate(kStringEncodingMask)); | 5256 andp(length, Immediate(kStringEncodingMask)); |
| 5257 xorp(length, Immediate(kStringEncodingMask)); | 5257 xorp(length, Immediate(kStringEncodingMask)); |
| 5258 addp(length, Immediate(0x04)); | 5258 addp(length, Immediate(0x04)); |
| 5259 // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2. | 5259 // Value now either 4 (if ASCII) or 8 (if UC16), i.e. char-size shifted by 2. |
| 5260 imulp(length, FieldOperand(value, String::kLengthOffset)); | 5260 imulp(length, FieldOperand(value, String::kLengthOffset)); |
| 5261 shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize)); | 5261 shrp(length, Immediate(2 + kSmiTagSize + kSmiShiftSize)); |
| 5262 addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); | 5262 addp(length, Immediate(SeqString::kHeaderSize + kObjectAlignmentMask)); |
| 5263 andp(length, Immediate(~kObjectAlignmentMask)); | 5263 andp(length, Immediate(~kObjectAlignmentMask)); |
| 5264 | 5264 |
| 5265 bind(&is_data_object); | 5265 bind(&is_data_object); |
| (...skipping 72 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 5338 CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize), | 5338 CompareRoot(MemOperand(scratch_reg, -AllocationMemento::kSize), |
| 5339 Heap::kAllocationMementoMapRootIndex); | 5339 Heap::kAllocationMementoMapRootIndex); |
| 5340 } | 5340 } |
| 5341 | 5341 |
| 5342 | 5342 |
| 5343 void MacroAssembler::JumpIfDictionaryInPrototypeChain( | 5343 void MacroAssembler::JumpIfDictionaryInPrototypeChain( |
| 5344 Register object, | 5344 Register object, |
| 5345 Register scratch0, | 5345 Register scratch0, |
| 5346 Register scratch1, | 5346 Register scratch1, |
| 5347 Label* found) { | 5347 Label* found) { |
| 5348 ASSERT(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister))); | 5348 DCHECK(!(scratch0.is(kScratchRegister) && scratch1.is(kScratchRegister))); |
| 5349 ASSERT(!scratch1.is(scratch0)); | 5349 DCHECK(!scratch1.is(scratch0)); |
| 5350 Register current = scratch0; | 5350 Register current = scratch0; |
| 5351 Label loop_again; | 5351 Label loop_again; |
| 5352 | 5352 |
| 5353 movp(current, object); | 5353 movp(current, object); |
| 5354 | 5354 |
| 5355 // Loop based on the map going up the prototype chain. | 5355 // Loop based on the map going up the prototype chain. |
| 5356 bind(&loop_again); | 5356 bind(&loop_again); |
| 5357 movp(current, FieldOperand(current, HeapObject::kMapOffset)); | 5357 movp(current, FieldOperand(current, HeapObject::kMapOffset)); |
| 5358 movp(scratch1, FieldOperand(current, Map::kBitField2Offset)); | 5358 movp(scratch1, FieldOperand(current, Map::kBitField2Offset)); |
| 5359 DecodeField<Map::ElementsKindBits>(scratch1); | 5359 DecodeField<Map::ElementsKindBits>(scratch1); |
| 5360 cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS)); | 5360 cmpp(scratch1, Immediate(DICTIONARY_ELEMENTS)); |
| 5361 j(equal, found); | 5361 j(equal, found); |
| 5362 movp(current, FieldOperand(current, Map::kPrototypeOffset)); | 5362 movp(current, FieldOperand(current, Map::kPrototypeOffset)); |
| 5363 CompareRoot(current, Heap::kNullValueRootIndex); | 5363 CompareRoot(current, Heap::kNullValueRootIndex); |
| 5364 j(not_equal, &loop_again); | 5364 j(not_equal, &loop_again); |
| 5365 } | 5365 } |
| 5366 | 5366 |
| 5367 | 5367 |
| 5368 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) { | 5368 void MacroAssembler::TruncatingDiv(Register dividend, int32_t divisor) { |
| 5369 ASSERT(!dividend.is(rax)); | 5369 DCHECK(!dividend.is(rax)); |
| 5370 ASSERT(!dividend.is(rdx)); | 5370 DCHECK(!dividend.is(rdx)); |
| 5371 MultiplierAndShift ms(divisor); | 5371 MultiplierAndShift ms(divisor); |
| 5372 movl(rax, Immediate(ms.multiplier())); | 5372 movl(rax, Immediate(ms.multiplier())); |
| 5373 imull(dividend); | 5373 imull(dividend); |
| 5374 if (divisor > 0 && ms.multiplier() < 0) addl(rdx, dividend); | 5374 if (divisor > 0 && ms.multiplier() < 0) addl(rdx, dividend); |
| 5375 if (divisor < 0 && ms.multiplier() > 0) subl(rdx, dividend); | 5375 if (divisor < 0 && ms.multiplier() > 0) subl(rdx, dividend); |
| 5376 if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift())); | 5376 if (ms.shift() > 0) sarl(rdx, Immediate(ms.shift())); |
| 5377 movl(rax, dividend); | 5377 movl(rax, dividend); |
| 5378 shrl(rax, Immediate(31)); | 5378 shrl(rax, Immediate(31)); |
| 5379 addl(rdx, rax); | 5379 addl(rdx, rax); |
| 5380 } | 5380 } |
| 5381 | 5381 |
| 5382 | 5382 |
| 5383 } } // namespace v8::internal | 5383 } } // namespace v8::internal |
| 5384 | 5384 |
| 5385 #endif // V8_TARGET_ARCH_X64 | 5385 #endif // V8_TARGET_ARCH_X64 |
| OLD | NEW |