Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(43)

Side by Side Diff: src/arm/macro-assembler-arm.cc

Issue 6529032: Merge 6168:6800 from bleeding_edge to experimental/gc branch. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/gc/
Patch Set: Created 9 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/macro-assembler-arm.h ('k') | src/arm/regexp-macro-assembler-arm.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
(...skipping 299 matching lines...) Expand 10 before | Expand all | Expand 10 after
311 usat(dst, satpos, src, cond); 311 usat(dst, satpos, src, cond);
312 } 312 }
313 } 313 }
314 314
315 315
316 void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) { 316 void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
317 // Empty the const pool. 317 // Empty the const pool.
318 CheckConstPool(true, true); 318 CheckConstPool(true, true);
319 add(pc, pc, Operand(index, 319 add(pc, pc, Operand(index,
320 LSL, 320 LSL,
321 assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize)); 321 Instruction::kInstrSizeLog2 - kSmiTagSize));
322 BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize); 322 BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
323 nop(); // Jump table alignment. 323 nop(); // Jump table alignment.
324 for (int i = 0; i < targets.length(); i++) { 324 for (int i = 0; i < targets.length(); i++) {
325 b(targets[i]); 325 b(targets[i]);
326 } 326 }
327 } 327 }
328 328
329 329
330 void MacroAssembler::LoadRoot(Register destination, 330 void MacroAssembler::LoadRoot(Register destination,
331 Heap::RootListIndex index, 331 Heap::RootListIndex index,
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
363 // Mark region dirty. 363 // Mark region dirty.
364 ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset)); 364 ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
365 mov(ip, Operand(1)); 365 mov(ip, Operand(1));
366 orr(scratch, scratch, Operand(ip, LSL, address)); 366 orr(scratch, scratch, Operand(ip, LSL, address));
367 str(scratch, MemOperand(object, Page::kDirtyFlagOffset)); 367 str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
368 } 368 }
369 369
370 370
371 void MacroAssembler::InNewSpace(Register object, 371 void MacroAssembler::InNewSpace(Register object,
372 Register scratch, 372 Register scratch,
373 Condition cc, 373 Condition cond,
374 Label* branch) { 374 Label* branch) {
375 ASSERT(cc == eq || cc == ne); 375 ASSERT(cond == eq || cond == ne);
376 and_(scratch, object, Operand(ExternalReference::new_space_mask())); 376 and_(scratch, object, Operand(ExternalReference::new_space_mask()));
377 cmp(scratch, Operand(ExternalReference::new_space_start())); 377 cmp(scratch, Operand(ExternalReference::new_space_start()));
378 b(cc, branch); 378 b(cond, branch);
379 } 379 }
380 380
381 381
382 // Will clobber 4 registers: object, offset, scratch, ip. The 382 // Will clobber 4 registers: object, offset, scratch, ip. The
383 // register 'object' contains a heap object pointer. The heap object 383 // register 'object' contains a heap object pointer. The heap object
384 // tag is shifted away. 384 // tag is shifted away.
385 void MacroAssembler::RecordWrite(Register object, 385 void MacroAssembler::RecordWrite(Register object,
386 Operand offset, 386 Operand offset,
387 Register scratch0, 387 Register scratch0,
388 Register scratch1) { 388 Register scratch1) {
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
460 } 460 }
461 461
462 462
463 void MacroAssembler::PopSafepointRegisters() { 463 void MacroAssembler::PopSafepointRegisters() {
464 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; 464 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
465 ldm(ia_w, sp, kSafepointSavedRegisters); 465 ldm(ia_w, sp, kSafepointSavedRegisters);
466 add(sp, sp, Operand(num_unsaved * kPointerSize)); 466 add(sp, sp, Operand(num_unsaved * kPointerSize));
467 } 467 }
468 468
469 469
470 void MacroAssembler::PushSafepointRegistersAndDoubles() {
471 PushSafepointRegisters();
472 sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
473 kDoubleSize));
474 for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
475 vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
476 }
477 }
478
479
480 void MacroAssembler::PopSafepointRegistersAndDoubles() {
481 for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
482 vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
483 }
484 add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
485 kDoubleSize));
486 PopSafepointRegisters();
487 }
488
489 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register reg) {
490 str(reg, SafepointRegistersAndDoublesSlot(reg));
491 }
492
493
494 void MacroAssembler::StoreToSafepointRegisterSlot(Register reg) {
495 str(reg, SafepointRegisterSlot(reg));
496 }
497
498
499 void MacroAssembler::LoadFromSafepointRegisterSlot(Register reg) {
500 ldr(reg, SafepointRegisterSlot(reg));
501 }
502
503
470 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { 504 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
471 // The registers are pushed starting with the highest encoding, 505 // The registers are pushed starting with the highest encoding,
472 // which means that lowest encodings are closest to the stack pointer. 506 // which means that lowest encodings are closest to the stack pointer.
473 ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters); 507 ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
474 return reg_code; 508 return reg_code;
475 } 509 }
476 510
477 511
512 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
513 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
514 }
515
516
517 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
518 // General purpose registers are pushed last on the stack.
519 int doubles_size = DwVfpRegister::kNumAllocatableRegisters * kDoubleSize;
520 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
521 return MemOperand(sp, doubles_size + register_offset);
522 }
523
524
478 void MacroAssembler::Ldrd(Register dst1, Register dst2, 525 void MacroAssembler::Ldrd(Register dst1, Register dst2,
479 const MemOperand& src, Condition cond) { 526 const MemOperand& src, Condition cond) {
480 ASSERT(src.rm().is(no_reg)); 527 ASSERT(src.rm().is(no_reg));
481 ASSERT(!dst1.is(lr)); // r14. 528 ASSERT(!dst1.is(lr)); // r14.
482 ASSERT_EQ(0, dst1.code() % 2); 529 ASSERT_EQ(0, dst1.code() % 2);
483 ASSERT_EQ(dst1.code() + 1, dst2.code()); 530 ASSERT_EQ(dst1.code() + 1, dst2.code());
484 531
485 // Generate two ldr instructions if ldrd is not available. 532 // Generate two ldr instructions if ldrd is not available.
486 if (CpuFeatures::IsSupported(ARMv7)) { 533 if (CpuFeatures::IsSupported(ARMv7)) {
487 CpuFeatures::Scope scope(ARMv7); 534 CpuFeatures::Scope scope(ARMv7);
(...skipping 25 matching lines...) Expand all
513 strd(src1, src2, dst, cond); 560 strd(src1, src2, dst, cond);
514 } else { 561 } else {
515 MemOperand dst2(dst); 562 MemOperand dst2(dst);
516 dst2.set_offset(dst2.offset() + 4); 563 dst2.set_offset(dst2.offset() + 4);
517 str(src1, dst, cond); 564 str(src1, dst, cond);
518 str(src2, dst2, cond); 565 str(src2, dst2, cond);
519 } 566 }
520 } 567 }
521 568
522 569
570 void MacroAssembler::ClearFPSCRBits(const uint32_t bits_to_clear,
571 const Register scratch,
572 const Condition cond) {
573 vmrs(scratch, cond);
574 bic(scratch, scratch, Operand(bits_to_clear), LeaveCC, cond);
575 vmsr(scratch, cond);
576 }
577
578
579 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
580 const DwVfpRegister src2,
581 const Condition cond) {
582 // Compare and move FPSCR flags to the normal condition flags.
583 VFPCompareAndLoadFlags(src1, src2, pc, cond);
584 }
585
586 void MacroAssembler::VFPCompareAndSetFlags(const DwVfpRegister src1,
587 const double src2,
588 const Condition cond) {
589 // Compare and move FPSCR flags to the normal condition flags.
590 VFPCompareAndLoadFlags(src1, src2, pc, cond);
591 }
592
593
594 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
595 const DwVfpRegister src2,
596 const Register fpscr_flags,
597 const Condition cond) {
598 // Compare and load FPSCR.
599 vcmp(src1, src2, cond);
600 vmrs(fpscr_flags, cond);
601 }
602
603 void MacroAssembler::VFPCompareAndLoadFlags(const DwVfpRegister src1,
604 const double src2,
605 const Register fpscr_flags,
606 const Condition cond) {
607 // Compare and load FPSCR.
608 vcmp(src1, src2, cond);
609 vmrs(fpscr_flags, cond);
610 }
611
612
523 void MacroAssembler::EnterFrame(StackFrame::Type type) { 613 void MacroAssembler::EnterFrame(StackFrame::Type type) {
524 // r0-r3: preserved 614 // r0-r3: preserved
525 stm(db_w, sp, cp.bit() | fp.bit() | lr.bit()); 615 stm(db_w, sp, cp.bit() | fp.bit() | lr.bit());
526 mov(ip, Operand(Smi::FromInt(type))); 616 mov(ip, Operand(Smi::FromInt(type)));
527 push(ip); 617 push(ip);
528 mov(ip, Operand(CodeObject())); 618 mov(ip, Operand(CodeObject()));
529 push(ip); 619 push(ip);
530 add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP. 620 add(fp, sp, Operand(3 * kPointerSize)); // Adjust FP to point to saved FP.
531 } 621 }
532 622
533 623
534 void MacroAssembler::LeaveFrame(StackFrame::Type type) { 624 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
535 // r0: preserved 625 // r0: preserved
536 // r1: preserved 626 // r1: preserved
537 // r2: preserved 627 // r2: preserved
538 628
539 // Drop the execution stack down to the frame pointer and restore 629 // Drop the execution stack down to the frame pointer and restore
540 // the caller frame pointer and return address. 630 // the caller frame pointer and return address.
541 mov(sp, fp); 631 mov(sp, fp);
542 ldm(ia_w, sp, fp.bit() | lr.bit()); 632 ldm(ia_w, sp, fp.bit() | lr.bit());
543 } 633 }
544 634
545 635
546 void MacroAssembler::EnterExitFrame(bool save_doubles) { 636 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
547 // r0 is argc. 637 // Setup the frame structure on the stack.
548 // Compute callee's stack pointer before making changes and save it as 638 ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
549 // ip register so that it is restored as sp register on exit, thereby 639 ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
550 // popping the args. 640 ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
551 641 Push(lr, fp);
552 // ip = sp + kPointerSize * #args; 642 mov(fp, Operand(sp)); // Setup new frame pointer.
553 add(ip, sp, Operand(r0, LSL, kPointerSizeLog2)); 643 // Reserve room for saved entry sp and code object.
554 644 sub(sp, sp, Operand(2 * kPointerSize));
555 // Compute the argv pointer and keep it in a callee-saved register. 645 if (FLAG_debug_code) {
556 sub(r6, ip, Operand(kPointerSize)); 646 mov(ip, Operand(0));
557 647 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
558 // Prepare the stack to be aligned when calling into C. After this point there
559 // are 5 pushes before the call into C, so the stack needs to be aligned after
560 // 5 pushes.
561 int frame_alignment = ActivationFrameAlignment();
562 int frame_alignment_mask = frame_alignment - 1;
563 if (frame_alignment != kPointerSize) {
564 // The following code needs to be more general if this assert does not hold.
565 ASSERT(frame_alignment == 2 * kPointerSize);
566 // With 5 pushes left the frame must be unaligned at this point.
567 mov(r7, Operand(Smi::FromInt(0)));
568 tst(sp, Operand((frame_alignment - kPointerSize) & frame_alignment_mask));
569 push(r7, eq); // Push if aligned to make it unaligned.
570 } 648 }
571
572 // Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
573 stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
574 mov(fp, Operand(sp)); // Setup new frame pointer.
575
576 mov(ip, Operand(CodeObject())); 649 mov(ip, Operand(CodeObject()));
577 push(ip); // Accessed from ExitFrame::code_slot. 650 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
578 651
579 // Save the frame pointer and the context in top. 652 // Save the frame pointer and the context in top.
580 mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); 653 mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
581 str(fp, MemOperand(ip)); 654 str(fp, MemOperand(ip));
582 mov(ip, Operand(ExternalReference(Top::k_context_address))); 655 mov(ip, Operand(ExternalReference(Top::k_context_address)));
583 str(cp, MemOperand(ip)); 656 str(cp, MemOperand(ip));
584 657
585 // Setup argc and the builtin function in callee-saved registers.
586 mov(r4, Operand(r0));
587 mov(r5, Operand(r1));
588
589 // Optionally save all double registers. 658 // Optionally save all double registers.
590 if (save_doubles) { 659 if (save_doubles) {
591 // TODO(regis): Use vstrm instruction. 660 sub(sp, sp, Operand(DwVfpRegister::kNumRegisters * kDoubleSize));
592 // The stack alignment code above made sp unaligned, so add space for one 661 const int offset = -2 * kPointerSize;
593 // more double register and use aligned addresses.
594 ASSERT(kDoubleSize == frame_alignment);
595 // Mark the frame as containing doubles by pushing a non-valid return
596 // address, i.e. 0.
597 ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize);
598 mov(ip, Operand(0)); // Marker and alignment word.
599 push(ip);
600 int space = DwVfpRegister::kNumRegisters * kDoubleSize + kPointerSize;
601 sub(sp, sp, Operand(space));
602 for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { 662 for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
603 DwVfpRegister reg = DwVfpRegister::from_code(i); 663 DwVfpRegister reg = DwVfpRegister::from_code(i);
604 vstr(reg, sp, i * kDoubleSize + kPointerSize); 664 vstr(reg, fp, offset - ((i + 1) * kDoubleSize));
605 } 665 }
606 // Note that d0 will be accessible at fp - 2*kPointerSize - 666 // Note that d0 will be accessible at
607 // DwVfpRegister::kNumRegisters * kDoubleSize, since the code slot and the 667 // fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
608 // alignment word were pushed after the fp. 668 // since the sp slot and code slot were pushed after the fp.
609 } 669 }
670
671 // Reserve place for the return address and stack space and align the frame
672 // preparing for calling the runtime function.
673 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
674 sub(sp, sp, Operand((stack_space + 1) * kPointerSize));
675 if (frame_alignment > 0) {
676 ASSERT(IsPowerOf2(frame_alignment));
677 and_(sp, sp, Operand(-frame_alignment));
678 }
679
680 // Set the exit frame sp value to point just before the return address
681 // location.
682 add(ip, sp, Operand(kPointerSize));
683 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
610 } 684 }
611 685
612 686
613 void MacroAssembler::InitializeNewString(Register string, 687 void MacroAssembler::InitializeNewString(Register string,
614 Register length, 688 Register length,
615 Heap::RootListIndex map_index, 689 Heap::RootListIndex map_index,
616 Register scratch1, 690 Register scratch1,
617 Register scratch2) { 691 Register scratch2) {
618 mov(scratch1, Operand(length, LSL, kSmiTagSize)); 692 mov(scratch1, Operand(length, LSL, kSmiTagSize));
619 LoadRoot(scratch2, map_index); 693 LoadRoot(scratch2, map_index);
(...skipping 14 matching lines...) Expand all
634 #else // defined(V8_HOST_ARCH_ARM) 708 #else // defined(V8_HOST_ARCH_ARM)
635 // If we are using the simulator then we should always align to the expected 709 // If we are using the simulator then we should always align to the expected
636 // alignment. As the simulator is used to generate snapshots we do not know 710 // alignment. As the simulator is used to generate snapshots we do not know
637 // if the target platform will need alignment, so this is controlled from a 711 // if the target platform will need alignment, so this is controlled from a
638 // flag. 712 // flag.
639 return FLAG_sim_stack_alignment; 713 return FLAG_sim_stack_alignment;
640 #endif // defined(V8_HOST_ARCH_ARM) 714 #endif // defined(V8_HOST_ARCH_ARM)
641 } 715 }
642 716
643 717
644 void MacroAssembler::LeaveExitFrame(bool save_doubles) { 718 void MacroAssembler::LeaveExitFrame(bool save_doubles,
719 Register argument_count) {
645 // Optionally restore all double registers. 720 // Optionally restore all double registers.
646 if (save_doubles) { 721 if (save_doubles) {
647 // TODO(regis): Use vldrm instruction.
648 for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { 722 for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
649 DwVfpRegister reg = DwVfpRegister::from_code(i); 723 DwVfpRegister reg = DwVfpRegister::from_code(i);
650 // Register d15 is just below the marker. 724 const int offset = -2 * kPointerSize;
651 const int offset = ExitFrameConstants::kMarkerOffset; 725 vldr(reg, fp, offset - ((i + 1) * kDoubleSize));
652 vldr(reg, fp, (i - DwVfpRegister::kNumRegisters) * kDoubleSize + offset);
653 } 726 }
654 } 727 }
655 728
656 // Clear top frame. 729 // Clear top frame.
657 mov(r3, Operand(0, RelocInfo::NONE)); 730 mov(r3, Operand(0, RelocInfo::NONE));
658 mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address))); 731 mov(ip, Operand(ExternalReference(Top::k_c_entry_fp_address)));
659 str(r3, MemOperand(ip)); 732 str(r3, MemOperand(ip));
660 733
661 // Restore current context from top and clear it in debug mode. 734 // Restore current context from top and clear it in debug mode.
662 mov(ip, Operand(ExternalReference(Top::k_context_address))); 735 mov(ip, Operand(ExternalReference(Top::k_context_address)));
663 ldr(cp, MemOperand(ip)); 736 ldr(cp, MemOperand(ip));
664 #ifdef DEBUG 737 #ifdef DEBUG
665 str(r3, MemOperand(ip)); 738 str(r3, MemOperand(ip));
666 #endif 739 #endif
667 740
668 // Pop the arguments, restore registers, and return. 741 // Tear down the exit frame, pop the arguments, and return.
669 mov(sp, Operand(fp)); // respect ABI stack constraint 742 mov(sp, Operand(fp));
670 ldm(ia, sp, fp.bit() | sp.bit() | pc.bit()); 743 ldm(ia_w, sp, fp.bit() | lr.bit());
744 if (argument_count.is_valid()) {
745 add(sp, sp, Operand(argument_count, LSL, kPointerSizeLog2));
746 }
671 } 747 }
672 748
673 749
674 void MacroAssembler::InvokePrologue(const ParameterCount& expected, 750 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
675 const ParameterCount& actual, 751 const ParameterCount& actual,
676 Handle<Code> code_constant, 752 Handle<Code> code_constant,
677 Register code_reg, 753 Register code_reg,
678 Label* done, 754 Label* done,
679 InvokeFlag flag) { 755 InvokeFlag flag,
756 PostCallGenerator* post_call_generator) {
680 bool definitely_matches = false; 757 bool definitely_matches = false;
681 Label regular_invoke; 758 Label regular_invoke;
682 759
683 // Check whether the expected and actual arguments count match. If not, 760 // Check whether the expected and actual arguments count match. If not,
684 // setup registers according to contract with ArgumentsAdaptorTrampoline: 761 // setup registers according to contract with ArgumentsAdaptorTrampoline:
685 // r0: actual arguments count 762 // r0: actual arguments count
686 // r1: function (passed through to callee) 763 // r1: function (passed through to callee)
687 // r2: expected arguments count 764 // r2: expected arguments count
688 // r3: callee code entry 765 // r3: callee code entry
689 766
(...skipping 35 matching lines...) Expand 10 before | Expand all | Expand 10 after
725 if (!definitely_matches) { 802 if (!definitely_matches) {
726 if (!code_constant.is_null()) { 803 if (!code_constant.is_null()) {
727 mov(r3, Operand(code_constant)); 804 mov(r3, Operand(code_constant));
728 add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag)); 805 add(r3, r3, Operand(Code::kHeaderSize - kHeapObjectTag));
729 } 806 }
730 807
731 Handle<Code> adaptor = 808 Handle<Code> adaptor =
732 Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline)); 809 Handle<Code>(Builtins::builtin(Builtins::ArgumentsAdaptorTrampoline));
733 if (flag == CALL_FUNCTION) { 810 if (flag == CALL_FUNCTION) {
734 Call(adaptor, RelocInfo::CODE_TARGET); 811 Call(adaptor, RelocInfo::CODE_TARGET);
812 if (post_call_generator != NULL) post_call_generator->Generate();
735 b(done); 813 b(done);
736 } else { 814 } else {
737 Jump(adaptor, RelocInfo::CODE_TARGET); 815 Jump(adaptor, RelocInfo::CODE_TARGET);
738 } 816 }
739 bind(&regular_invoke); 817 bind(&regular_invoke);
740 } 818 }
741 } 819 }
742 820
743 821
744 void MacroAssembler::InvokeCode(Register code, 822 void MacroAssembler::InvokeCode(Register code,
745 const ParameterCount& expected, 823 const ParameterCount& expected,
746 const ParameterCount& actual, 824 const ParameterCount& actual,
747 InvokeFlag flag) { 825 InvokeFlag flag,
826 PostCallGenerator* post_call_generator) {
748 Label done; 827 Label done;
749 828
750 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag); 829 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done, flag,
830 post_call_generator);
751 if (flag == CALL_FUNCTION) { 831 if (flag == CALL_FUNCTION) {
752 Call(code); 832 Call(code);
833 if (post_call_generator != NULL) post_call_generator->Generate();
753 } else { 834 } else {
754 ASSERT(flag == JUMP_FUNCTION); 835 ASSERT(flag == JUMP_FUNCTION);
755 Jump(code); 836 Jump(code);
756 } 837 }
757 838
758 // Continue here if InvokePrologue does handle the invocation due to 839 // Continue here if InvokePrologue does handle the invocation due to
759 // mismatched parameter counts. 840 // mismatched parameter counts.
760 bind(&done); 841 bind(&done);
761 } 842 }
762 843
(...skipping 13 matching lines...) Expand all
776 } 857 }
777 858
778 // Continue here if InvokePrologue does handle the invocation due to 859 // Continue here if InvokePrologue does handle the invocation due to
779 // mismatched parameter counts. 860 // mismatched parameter counts.
780 bind(&done); 861 bind(&done);
781 } 862 }
782 863
783 864
784 void MacroAssembler::InvokeFunction(Register fun, 865 void MacroAssembler::InvokeFunction(Register fun,
785 const ParameterCount& actual, 866 const ParameterCount& actual,
786 InvokeFlag flag) { 867 InvokeFlag flag,
868 PostCallGenerator* post_call_generator) {
787 // Contract with called JS functions requires that function is passed in r1. 869 // Contract with called JS functions requires that function is passed in r1.
788 ASSERT(fun.is(r1)); 870 ASSERT(fun.is(r1));
789 871
790 Register expected_reg = r2; 872 Register expected_reg = r2;
791 Register code_reg = r3; 873 Register code_reg = r3;
792 874
793 ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); 875 ldr(code_reg, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset));
794 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); 876 ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset));
795 ldr(expected_reg, 877 ldr(expected_reg,
796 FieldMemOperand(code_reg, 878 FieldMemOperand(code_reg,
797 SharedFunctionInfo::kFormalParameterCountOffset)); 879 SharedFunctionInfo::kFormalParameterCountOffset));
798 mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize)); 880 mov(expected_reg, Operand(expected_reg, ASR, kSmiTagSize));
799 ldr(code_reg, 881 ldr(code_reg,
800 FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); 882 FieldMemOperand(r1, JSFunction::kCodeEntryOffset));
801 883
802 ParameterCount expected(expected_reg); 884 ParameterCount expected(expected_reg);
803 InvokeCode(code_reg, expected, actual, flag); 885 InvokeCode(code_reg, expected, actual, flag, post_call_generator);
804 } 886 }
805 887
806 888
807 void MacroAssembler::InvokeFunction(JSFunction* function, 889 void MacroAssembler::InvokeFunction(JSFunction* function,
808 const ParameterCount& actual, 890 const ParameterCount& actual,
809 InvokeFlag flag) { 891 InvokeFlag flag) {
810 ASSERT(function->is_compiled()); 892 ASSERT(function->is_compiled());
811 893
812 // Get the function and setup the context. 894 // Get the function and setup the context.
813 mov(r1, Operand(Handle<JSFunction>(function))); 895 mov(r1, Operand(Handle<JSFunction>(function)));
(...skipping 28 matching lines...) Expand all
842 Label* fail) { 924 Label* fail) {
843 ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); 925 ldrb(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
844 cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE)); 926 cmp(scratch, Operand(FIRST_JS_OBJECT_TYPE));
845 b(lt, fail); 927 b(lt, fail);
846 cmp(scratch, Operand(LAST_JS_OBJECT_TYPE)); 928 cmp(scratch, Operand(LAST_JS_OBJECT_TYPE));
847 b(gt, fail); 929 b(gt, fail);
848 } 930 }
849 931
850 932
851 void MacroAssembler::IsObjectJSStringType(Register object, 933 void MacroAssembler::IsObjectJSStringType(Register object,
852 Register scratch, 934 Register scratch,
853 Label* fail) { 935 Label* fail) {
854 ASSERT(kNotStringTag != 0); 936 ASSERT(kNotStringTag != 0);
855 937
856 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); 938 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
857 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 939 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
858 tst(scratch, Operand(kIsNotStringMask)); 940 tst(scratch, Operand(kIsNotStringMask));
859 b(nz, fail); 941 b(ne, fail);
860 } 942 }
861 943
862 944
863 #ifdef ENABLE_DEBUGGER_SUPPORT 945 #ifdef ENABLE_DEBUGGER_SUPPORT
864 void MacroAssembler::DebugBreak() { 946 void MacroAssembler::DebugBreak() {
865 ASSERT(allow_stub_calls()); 947 ASSERT(allow_stub_calls());
866 mov(r0, Operand(0, RelocInfo::NONE)); 948 mov(r0, Operand(0, RelocInfo::NONE));
867 mov(r1, Operand(ExternalReference(Runtime::kDebugBreak))); 949 mov(r1, Operand(ExternalReference(Runtime::kDebugBreak)));
868 CEntryStub ces(1); 950 CEntryStub ces(1);
869 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); 951 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
918 1000
919 void MacroAssembler::PopTryHandler() { 1001 void MacroAssembler::PopTryHandler() {
920 ASSERT_EQ(0, StackHandlerConstants::kNextOffset); 1002 ASSERT_EQ(0, StackHandlerConstants::kNextOffset);
921 pop(r1); 1003 pop(r1);
922 mov(ip, Operand(ExternalReference(Top::k_handler_address))); 1004 mov(ip, Operand(ExternalReference(Top::k_handler_address)));
923 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); 1005 add(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
924 str(r1, MemOperand(ip)); 1006 str(r1, MemOperand(ip));
925 } 1007 }
926 1008
927 1009
1010 void MacroAssembler::Throw(Register value) {
1011 // r0 is expected to hold the exception.
1012 if (!value.is(r0)) {
1013 mov(r0, value);
1014 }
1015
1016 // Adjust this code if not the case.
1017 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
1018
1019 // Drop the sp to the top of the handler.
1020 mov(r3, Operand(ExternalReference(Top::k_handler_address)));
1021 ldr(sp, MemOperand(r3));
1022
1023 // Restore the next handler and frame pointer, discard handler state.
1024 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1025 pop(r2);
1026 str(r2, MemOperand(r3));
1027 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
1028 ldm(ia_w, sp, r3.bit() | fp.bit()); // r3: discarded state.
1029
1030 // Before returning we restore the context from the frame pointer if
1031 // not NULL. The frame pointer is NULL in the exception handler of a
1032 // JS entry frame.
1033 cmp(fp, Operand(0, RelocInfo::NONE));
1034 // Set cp to NULL if fp is NULL.
1035 mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
1036 // Restore cp otherwise.
1037 ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
1038 #ifdef DEBUG
1039 if (FLAG_debug_code) {
1040 mov(lr, Operand(pc));
1041 }
1042 #endif
1043 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
1044 pop(pc);
1045 }
1046
1047
1048 void MacroAssembler::ThrowUncatchable(UncatchableExceptionType type,
1049 Register value) {
1050 // Adjust this code if not the case.
1051 STATIC_ASSERT(StackHandlerConstants::kSize == 4 * kPointerSize);
1052
1053 // r0 is expected to hold the exception.
1054 if (!value.is(r0)) {
1055 mov(r0, value);
1056 }
1057
1058 // Drop sp to the top stack handler.
1059 mov(r3, Operand(ExternalReference(Top::k_handler_address)));
1060 ldr(sp, MemOperand(r3));
1061
1062 // Unwind the handlers until the ENTRY handler is found.
1063 Label loop, done;
1064 bind(&loop);
1065 // Load the type of the current stack handler.
1066 const int kStateOffset = StackHandlerConstants::kStateOffset;
1067 ldr(r2, MemOperand(sp, kStateOffset));
1068 cmp(r2, Operand(StackHandler::ENTRY));
1069 b(eq, &done);
1070 // Fetch the next handler in the list.
1071 const int kNextOffset = StackHandlerConstants::kNextOffset;
1072 ldr(sp, MemOperand(sp, kNextOffset));
1073 jmp(&loop);
1074 bind(&done);
1075
1076 // Set the top handler address to next handler past the current ENTRY handler.
1077 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1078 pop(r2);
1079 str(r2, MemOperand(r3));
1080
1081 if (type == OUT_OF_MEMORY) {
1082 // Set external caught exception to false.
1083 ExternalReference external_caught(Top::k_external_caught_exception_address);
1084 mov(r0, Operand(false, RelocInfo::NONE));
1085 mov(r2, Operand(external_caught));
1086 str(r0, MemOperand(r2));
1087
1088 // Set pending exception and r0 to out of memory exception.
1089 Failure* out_of_memory = Failure::OutOfMemoryException();
1090 mov(r0, Operand(reinterpret_cast<int32_t>(out_of_memory)));
1091 mov(r2, Operand(ExternalReference(Top::k_pending_exception_address)));
1092 str(r0, MemOperand(r2));
1093 }
1094
1095 // Stack layout at this point. See also StackHandlerConstants.
1096 // sp -> state (ENTRY)
1097 // fp
1098 // lr
1099
1100 // Discard handler state (r2 is not used) and restore frame pointer.
1101 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 2 * kPointerSize);
1102 ldm(ia_w, sp, r2.bit() | fp.bit()); // r2: discarded state.
1103 // Before returning we restore the context from the frame pointer if
1104 // not NULL. The frame pointer is NULL in the exception handler of a
1105 // JS entry frame.
1106 cmp(fp, Operand(0, RelocInfo::NONE));
1107 // Set cp to NULL if fp is NULL.
1108 mov(cp, Operand(0, RelocInfo::NONE), LeaveCC, eq);
1109 // Restore cp otherwise.
1110 ldr(cp, MemOperand(fp, StandardFrameConstants::kContextOffset), ne);
1111 #ifdef DEBUG
1112 if (FLAG_debug_code) {
1113 mov(lr, Operand(pc));
1114 }
1115 #endif
1116 STATIC_ASSERT(StackHandlerConstants::kPCOffset == 3 * kPointerSize);
1117 pop(pc);
1118 }
1119
1120
928 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, 1121 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
929 Register scratch, 1122 Register scratch,
930 Label* miss) { 1123 Label* miss) {
931 Label same_contexts; 1124 Label same_contexts;
932 1125
933 ASSERT(!holder_reg.is(scratch)); 1126 ASSERT(!holder_reg.is(scratch));
934 ASSERT(!holder_reg.is(ip)); 1127 ASSERT(!holder_reg.is(ip));
935 ASSERT(!scratch.is(ip)); 1128 ASSERT(!scratch.is(ip));
936 1129
937 // Load current lexical context from the stack frame. 1130 // Load current lexical context from the stack frame.
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
1063 ldr(ip, MemOperand(topaddr)); 1256 ldr(ip, MemOperand(topaddr));
1064 cmp(result, ip); 1257 cmp(result, ip);
1065 Check(eq, "Unexpected allocation top"); 1258 Check(eq, "Unexpected allocation top");
1066 } 1259 }
1067 // Load allocation limit into ip. Result already contains allocation top. 1260 // Load allocation limit into ip. Result already contains allocation top.
1068 ldr(ip, MemOperand(topaddr, limit - top)); 1261 ldr(ip, MemOperand(topaddr, limit - top));
1069 } 1262 }
1070 1263
1071 // Calculate new top and bail out if new space is exhausted. Use result 1264 // Calculate new top and bail out if new space is exhausted. Use result
1072 // to calculate the new top. 1265 // to calculate the new top.
1073 add(scratch2, result, Operand(obj_size_reg)); 1266 add(scratch2, result, Operand(obj_size_reg), SetCC);
1267 b(cs, gc_required);
1074 cmp(scratch2, Operand(ip)); 1268 cmp(scratch2, Operand(ip));
1075 b(hi, gc_required); 1269 b(hi, gc_required);
1076 str(scratch2, MemOperand(topaddr)); 1270 str(scratch2, MemOperand(topaddr));
1077 1271
1078 // Tag object if requested. 1272 // Tag object if requested.
1079 if ((flags & TAG_OBJECT) != 0) { 1273 if ((flags & TAG_OBJECT) != 0) {
1080 add(result, result, Operand(kHeapObjectTag)); 1274 add(result, result, Operand(kHeapObjectTag));
1081 } 1275 }
1082 } 1276 }
1083 1277
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after
1142 Check(eq, "Unexpected allocation top"); 1336 Check(eq, "Unexpected allocation top");
1143 } 1337 }
1144 // Load allocation limit into ip. Result already contains allocation top. 1338 // Load allocation limit into ip. Result already contains allocation top.
1145 ldr(ip, MemOperand(topaddr, limit - top)); 1339 ldr(ip, MemOperand(topaddr, limit - top));
1146 } 1340 }
1147 1341
1148 // Calculate new top and bail out if new space is exhausted. Use result 1342 // Calculate new top and bail out if new space is exhausted. Use result
1149 // to calculate the new top. Object size may be in words so a shift is 1343 // to calculate the new top. Object size may be in words so a shift is
1150 // required to get the number of bytes. 1344 // required to get the number of bytes.
1151 if ((flags & SIZE_IN_WORDS) != 0) { 1345 if ((flags & SIZE_IN_WORDS) != 0) {
1152 add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2)); 1346 add(scratch2, result, Operand(object_size, LSL, kPointerSizeLog2), SetCC);
1153 } else { 1347 } else {
1154 add(scratch2, result, Operand(object_size)); 1348 add(scratch2, result, Operand(object_size), SetCC);
1155 } 1349 }
1350 b(cs, gc_required);
1156 cmp(scratch2, Operand(ip)); 1351 cmp(scratch2, Operand(ip));
1157 b(hi, gc_required); 1352 b(hi, gc_required);
1158 1353
1159 // Update allocation top. result temporarily holds the new top. 1354 // Update allocation top. result temporarily holds the new top.
1160 if (FLAG_debug_code) { 1355 if (FLAG_debug_code) {
1161 tst(scratch2, Operand(kObjectAlignmentMask)); 1356 tst(scratch2, Operand(kObjectAlignmentMask));
1162 Check(eq, "Unaligned allocation in new space"); 1357 Check(eq, "Unaligned allocation in new space");
1163 } 1358 }
1164 str(scratch2, MemOperand(topaddr)); 1359 str(scratch2, MemOperand(topaddr));
1165 1360
(...skipping 142 matching lines...) Expand 10 before | Expand all | Expand 10 after
1308 cmp(type_reg, Operand(type)); 1503 cmp(type_reg, Operand(type));
1309 } 1504 }
1310 1505
1311 1506
1312 void MacroAssembler::CheckMap(Register obj, 1507 void MacroAssembler::CheckMap(Register obj,
1313 Register scratch, 1508 Register scratch,
1314 Handle<Map> map, 1509 Handle<Map> map,
1315 Label* fail, 1510 Label* fail,
1316 bool is_heap_object) { 1511 bool is_heap_object) {
1317 if (!is_heap_object) { 1512 if (!is_heap_object) {
1318 BranchOnSmi(obj, fail); 1513 JumpIfSmi(obj, fail);
1319 } 1514 }
1320 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); 1515 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
1321 mov(ip, Operand(map)); 1516 mov(ip, Operand(map));
1322 cmp(scratch, ip); 1517 cmp(scratch, ip);
1323 b(ne, fail); 1518 b(ne, fail);
1324 } 1519 }
1325 1520
1326 1521
1327 void MacroAssembler::CheckMap(Register obj, 1522 void MacroAssembler::CheckMap(Register obj,
1328 Register scratch, 1523 Register scratch,
1329 Heap::RootListIndex index, 1524 Heap::RootListIndex index,
1330 Label* fail, 1525 Label* fail,
1331 bool is_heap_object) { 1526 bool is_heap_object) {
1332 if (!is_heap_object) { 1527 if (!is_heap_object) {
1333 BranchOnSmi(obj, fail); 1528 JumpIfSmi(obj, fail);
1334 } 1529 }
1335 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); 1530 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
1336 LoadRoot(ip, index); 1531 LoadRoot(ip, index);
1337 cmp(scratch, ip); 1532 cmp(scratch, ip);
1338 b(ne, fail); 1533 b(ne, fail);
1339 } 1534 }
1340 1535
1341 1536
1342 void MacroAssembler::TryGetFunctionPrototype(Register function, 1537 void MacroAssembler::TryGetFunctionPrototype(Register function,
1343 Register result, 1538 Register result,
1344 Register scratch, 1539 Register scratch,
1345 Label* miss) { 1540 Label* miss) {
1346 // Check that the receiver isn't a smi. 1541 // Check that the receiver isn't a smi.
1347 BranchOnSmi(function, miss); 1542 JumpIfSmi(function, miss);
1348 1543
1349 // Check that the function really is a function. Load map into result reg. 1544 // Check that the function really is a function. Load map into result reg.
1350 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE); 1545 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
1351 b(ne, miss); 1546 b(ne, miss);
1352 1547
1353 // Make sure that the function has an instance prototype. 1548 // Make sure that the function has an instance prototype.
1354 Label non_instance; 1549 Label non_instance;
1355 ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); 1550 ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
1356 tst(scratch, Operand(1 << Map::kHasNonInstancePrototype)); 1551 tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
1357 b(ne, &non_instance); 1552 b(ne, &non_instance);
(...skipping 22 matching lines...) Expand all
1380 // in initial map. 1575 // in initial map.
1381 bind(&non_instance); 1576 bind(&non_instance);
1382 ldr(result, FieldMemOperand(result, Map::kConstructorOffset)); 1577 ldr(result, FieldMemOperand(result, Map::kConstructorOffset));
1383 1578
1384 // All done. 1579 // All done.
1385 bind(&done); 1580 bind(&done);
1386 } 1581 }
1387 1582
1388 1583
1389 void MacroAssembler::CallStub(CodeStub* stub, Condition cond) { 1584 void MacroAssembler::CallStub(CodeStub* stub, Condition cond) {
1390 ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs 1585 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
1391 Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond); 1586 Call(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1392 } 1587 }
1393 1588
1394 1589
1395 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { 1590 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
1396 ASSERT(allow_stub_calls()); // stub calls are not allowed in some stubs 1591 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
1397 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); 1592 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1398 } 1593 }
1399 1594
1400 1595
1596 MaybeObject* MacroAssembler::TryTailCallStub(CodeStub* stub, Condition cond) {
1597 ASSERT(allow_stub_calls()); // Stub calls are not allowed in some stubs.
1598 Object* result;
1599 { MaybeObject* maybe_result = stub->TryGetCode();
1600 if (!maybe_result->ToObject(&result)) return maybe_result;
1601 }
1602 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
1603 return result;
1604 }
1605
1606
1607 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
1608 return ref0.address() - ref1.address();
1609 }
1610
1611
1612 MaybeObject* MacroAssembler::TryCallApiFunctionAndReturn(
1613 ApiFunction* function, int stack_space) {
1614 ExternalReference next_address =
1615 ExternalReference::handle_scope_next_address();
1616 const int kNextOffset = 0;
1617 const int kLimitOffset = AddressOffset(
1618 ExternalReference::handle_scope_limit_address(),
1619 next_address);
1620 const int kLevelOffset = AddressOffset(
1621 ExternalReference::handle_scope_level_address(),
1622 next_address);
1623
1624 // Allocate HandleScope in callee-save registers.
1625 mov(r7, Operand(next_address));
1626 ldr(r4, MemOperand(r7, kNextOffset));
1627 ldr(r5, MemOperand(r7, kLimitOffset));
1628 ldr(r6, MemOperand(r7, kLevelOffset));
1629 add(r6, r6, Operand(1));
1630 str(r6, MemOperand(r7, kLevelOffset));
1631
1632 // Native call returns to the DirectCEntry stub which redirects to the
1633 // return address pushed on stack (could have moved after GC).
1634 // DirectCEntry stub itself is generated early and never moves.
1635 DirectCEntryStub stub;
1636 stub.GenerateCall(this, function);
1637
1638 Label promote_scheduled_exception;
1639 Label delete_allocated_handles;
1640 Label leave_exit_frame;
1641
1642 // If result is non-zero, dereference to get the result value
1643 // otherwise set it to undefined.
1644 cmp(r0, Operand(0));
1645 LoadRoot(r0, Heap::kUndefinedValueRootIndex, eq);
1646 ldr(r0, MemOperand(r0), ne);
1647
1648 // No more valid handles (the result handle was the last one). Restore
1649 // previous handle scope.
1650 str(r4, MemOperand(r7, kNextOffset));
1651 if (FLAG_debug_code) {
1652 ldr(r1, MemOperand(r7, kLevelOffset));
1653 cmp(r1, r6);
1654 Check(eq, "Unexpected level after return from api call");
1655 }
1656 sub(r6, r6, Operand(1));
1657 str(r6, MemOperand(r7, kLevelOffset));
1658 ldr(ip, MemOperand(r7, kLimitOffset));
1659 cmp(r5, ip);
1660 b(ne, &delete_allocated_handles);
1661
1662 // Check if the function scheduled an exception.
1663 bind(&leave_exit_frame);
1664 LoadRoot(r4, Heap::kTheHoleValueRootIndex);
1665 mov(ip, Operand(ExternalReference::scheduled_exception_address()));
1666 ldr(r5, MemOperand(ip));
1667 cmp(r4, r5);
1668 b(ne, &promote_scheduled_exception);
1669
1670 // LeaveExitFrame expects unwind space to be in a register.
1671 mov(r4, Operand(stack_space));
1672 LeaveExitFrame(false, r4);
1673 mov(pc, lr);
1674
1675 bind(&promote_scheduled_exception);
1676 MaybeObject* result = TryTailCallExternalReference(
1677 ExternalReference(Runtime::kPromoteScheduledException), 0, 1);
1678 if (result->IsFailure()) {
1679 return result;
1680 }
1681
1682 // HandleScope limit has changed. Delete allocated extensions.
1683 bind(&delete_allocated_handles);
1684 str(r5, MemOperand(r7, kLimitOffset));
1685 mov(r4, r0);
1686 PrepareCallCFunction(0, r5);
1687 CallCFunction(ExternalReference::delete_handle_scope_extensions(), 0);
1688 mov(r0, r4);
1689 jmp(&leave_exit_frame);
1690
1691 return result;
1692 }
1693
1694
1401 void MacroAssembler::IllegalOperation(int num_arguments) { 1695 void MacroAssembler::IllegalOperation(int num_arguments) {
1402 if (num_arguments > 0) { 1696 if (num_arguments > 0) {
1403 add(sp, sp, Operand(num_arguments * kPointerSize)); 1697 add(sp, sp, Operand(num_arguments * kPointerSize));
1404 } 1698 }
1405 LoadRoot(r0, Heap::kUndefinedValueRootIndex); 1699 LoadRoot(r0, Heap::kUndefinedValueRootIndex);
1406 } 1700 }
1407 1701
1408 1702
1409 void MacroAssembler::IndexFromHash(Register hash, Register index) { 1703 void MacroAssembler::IndexFromHash(Register hash, Register index) {
1410 // If the hash field contains an array index pick it out. The assert checks 1704 // If the hash field contains an array index pick it out. The assert checks
(...skipping 25 matching lines...) Expand all
1436 DwVfpRegister result, 1730 DwVfpRegister result,
1437 Register scratch1, 1731 Register scratch1,
1438 Register scratch2, 1732 Register scratch2,
1439 Register heap_number_map, 1733 Register heap_number_map,
1440 SwVfpRegister scratch3, 1734 SwVfpRegister scratch3,
1441 Label* not_number, 1735 Label* not_number,
1442 ObjectToDoubleFlags flags) { 1736 ObjectToDoubleFlags flags) {
1443 Label done; 1737 Label done;
1444 if ((flags & OBJECT_NOT_SMI) == 0) { 1738 if ((flags & OBJECT_NOT_SMI) == 0) {
1445 Label not_smi; 1739 Label not_smi;
1446 BranchOnNotSmi(object, &not_smi); 1740 JumpIfNotSmi(object, &not_smi);
1447 // Remove smi tag and convert to double. 1741 // Remove smi tag and convert to double.
1448 mov(scratch1, Operand(object, ASR, kSmiTagSize)); 1742 mov(scratch1, Operand(object, ASR, kSmiTagSize));
1449 vmov(scratch3, scratch1); 1743 vmov(scratch3, scratch1);
1450 vcvt_f64_s32(result, scratch3); 1744 vcvt_f64_s32(result, scratch3);
1451 b(&done); 1745 b(&done);
1452 bind(&not_smi); 1746 bind(&not_smi);
1453 } 1747 }
1454 // Check for heap number and load double value from it. 1748 // Check for heap number and load double value from it.
1455 ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset)); 1749 ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
1456 sub(scratch2, object, Operand(kHeapObjectTag)); 1750 sub(scratch2, object, Operand(kHeapObjectTag));
(...skipping 25 matching lines...) Expand all
1482 } 1776 }
1483 1777
1484 1778
1485 // Tries to get a signed int32 out of a double precision floating point heap 1779 // Tries to get a signed int32 out of a double precision floating point heap
1486 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the 1780 // number. Rounds towards 0. Branch to 'not_int32' if the double is out of the
1487 // 32bits signed integer range. 1781 // 32bits signed integer range.
1488 void MacroAssembler::ConvertToInt32(Register source, 1782 void MacroAssembler::ConvertToInt32(Register source,
1489 Register dest, 1783 Register dest,
1490 Register scratch, 1784 Register scratch,
1491 Register scratch2, 1785 Register scratch2,
1786 DwVfpRegister double_scratch,
1492 Label *not_int32) { 1787 Label *not_int32) {
1493 if (CpuFeatures::IsSupported(VFP3)) { 1788 if (CpuFeatures::IsSupported(VFP3)) {
1494 CpuFeatures::Scope scope(VFP3); 1789 CpuFeatures::Scope scope(VFP3);
1495 sub(scratch, source, Operand(kHeapObjectTag)); 1790 sub(scratch, source, Operand(kHeapObjectTag));
1496 vldr(d0, scratch, HeapNumber::kValueOffset); 1791 vldr(double_scratch, scratch, HeapNumber::kValueOffset);
1497 vcvt_s32_f64(s0, d0); 1792 vcvt_s32_f64(double_scratch.low(), double_scratch);
1498 vmov(dest, s0); 1793 vmov(dest, double_scratch.low());
1499 // Signed vcvt instruction will saturate to the minimum (0x80000000) or 1794 // Signed vcvt instruction will saturate to the minimum (0x80000000) or
1500 // maximun (0x7fffffff) signed 32bits integer when the double is out of 1795 // maximun (0x7fffffff) signed 32bits integer when the double is out of
1501 // range. When substracting one, the minimum signed integer becomes the 1796 // range. When substracting one, the minimum signed integer becomes the
1502 // maximun signed integer. 1797 // maximun signed integer.
1503 sub(scratch, dest, Operand(1)); 1798 sub(scratch, dest, Operand(1));
1504 cmp(scratch, Operand(LONG_MAX - 1)); 1799 cmp(scratch, Operand(LONG_MAX - 1));
1505 // If equal then dest was LONG_MAX, if greater dest was LONG_MIN. 1800 // If equal then dest was LONG_MAX, if greater dest was LONG_MIN.
1506 b(ge, not_int32); 1801 b(ge, not_int32);
1507 } else { 1802 } else {
1508 // This code is faster for doubles that are in the ranges -0x7fffffff to 1803 // This code is faster for doubles that are in the ranges -0x7fffffff to
(...skipping 76 matching lines...) Expand 10 before | Expand all | Expand 10 after
1585 int num_least_bits) { 1880 int num_least_bits) {
1586 if (CpuFeatures::IsSupported(ARMv7)) { 1881 if (CpuFeatures::IsSupported(ARMv7)) {
1587 ubfx(dst, src, kSmiTagSize, num_least_bits); 1882 ubfx(dst, src, kSmiTagSize, num_least_bits);
1588 } else { 1883 } else {
1589 mov(dst, Operand(src, ASR, kSmiTagSize)); 1884 mov(dst, Operand(src, ASR, kSmiTagSize));
1590 and_(dst, dst, Operand((1 << num_least_bits) - 1)); 1885 and_(dst, dst, Operand((1 << num_least_bits) - 1));
1591 } 1886 }
1592 } 1887 }
1593 1888
1594 1889
1890 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1891 Register src,
1892 int num_least_bits) {
1893 and_(dst, src, Operand((1 << num_least_bits) - 1));
1894 }
1895
1896
1595 void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) { 1897 void MacroAssembler::CallRuntime(Runtime::Function* f, int num_arguments) {
1596 // All parameters are on the stack. r0 has the return value after call. 1898 // All parameters are on the stack. r0 has the return value after call.
1597 1899
1598 // If the expected number of arguments of the runtime function is 1900 // If the expected number of arguments of the runtime function is
1599 // constant, we check that the actual number of arguments match the 1901 // constant, we check that the actual number of arguments match the
1600 // expectation. 1902 // expectation.
1601 if (f->nargs >= 0 && f->nargs != num_arguments) { 1903 if (f->nargs >= 0 && f->nargs != num_arguments) {
1602 IllegalOperation(num_arguments); 1904 IllegalOperation(num_arguments);
1603 return; 1905 return;
1604 } 1906 }
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
1644 int result_size) { 1946 int result_size) {
1645 // TODO(1236192): Most runtime routines don't need the number of 1947 // TODO(1236192): Most runtime routines don't need the number of
1646 // arguments passed in because it is constant. At some point we 1948 // arguments passed in because it is constant. At some point we
1647 // should remove this need and make the runtime routine entry code 1949 // should remove this need and make the runtime routine entry code
1648 // smarter. 1950 // smarter.
1649 mov(r0, Operand(num_arguments)); 1951 mov(r0, Operand(num_arguments));
1650 JumpToExternalReference(ext); 1952 JumpToExternalReference(ext);
1651 } 1953 }
1652 1954
1653 1955
1956 MaybeObject* MacroAssembler::TryTailCallExternalReference(
1957 const ExternalReference& ext, int num_arguments, int result_size) {
1958 // TODO(1236192): Most runtime routines don't need the number of
1959 // arguments passed in because it is constant. At some point we
1960 // should remove this need and make the runtime routine entry code
1961 // smarter.
1962 mov(r0, Operand(num_arguments));
1963 return TryJumpToExternalReference(ext);
1964 }
1965
1966
1654 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, 1967 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid,
1655 int num_arguments, 1968 int num_arguments,
1656 int result_size) { 1969 int result_size) {
1657 TailCallExternalReference(ExternalReference(fid), num_arguments, result_size); 1970 TailCallExternalReference(ExternalReference(fid), num_arguments, result_size);
1658 } 1971 }
1659 1972
1660 1973
1661 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) { 1974 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
1662 #if defined(__thumb__) 1975 #if defined(__thumb__)
1663 // Thumb mode builtin. 1976 // Thumb mode builtin.
1664 ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1); 1977 ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
1665 #endif 1978 #endif
1666 mov(r1, Operand(builtin)); 1979 mov(r1, Operand(builtin));
1667 CEntryStub stub(1); 1980 CEntryStub stub(1);
1668 Jump(stub.GetCode(), RelocInfo::CODE_TARGET); 1981 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
1669 } 1982 }
1670 1983
1671 1984
1985 MaybeObject* MacroAssembler::TryJumpToExternalReference(
1986 const ExternalReference& builtin) {
1987 #if defined(__thumb__)
1988 // Thumb mode builtin.
1989 ASSERT((reinterpret_cast<intptr_t>(builtin.address()) & 1) == 1);
1990 #endif
1991 mov(r1, Operand(builtin));
1992 CEntryStub stub(1);
1993 return TryTailCallStub(&stub);
1994 }
1995
1996
1672 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, 1997 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id,
1673 InvokeJSFlags flags) { 1998 InvokeJSFlags flags,
1999 PostCallGenerator* post_call_generator) {
1674 GetBuiltinEntry(r2, id); 2000 GetBuiltinEntry(r2, id);
1675 if (flags == CALL_JS) { 2001 if (flags == CALL_JS) {
1676 Call(r2); 2002 Call(r2);
2003 if (post_call_generator != NULL) post_call_generator->Generate();
1677 } else { 2004 } else {
1678 ASSERT(flags == JUMP_JS); 2005 ASSERT(flags == JUMP_JS);
1679 Jump(r2); 2006 Jump(r2);
1680 } 2007 }
1681 } 2008 }
1682 2009
1683 2010
1684 void MacroAssembler::GetBuiltinFunction(Register target, 2011 void MacroAssembler::GetBuiltinFunction(Register target,
1685 Builtins::JavaScript id) { 2012 Builtins::JavaScript id) {
1686 // Load the builtins object into target register. 2013 // Load the builtins object into target register.
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
1727 ASSERT(value > 0); 2054 ASSERT(value > 0);
1728 if (FLAG_native_code_counters && counter->Enabled()) { 2055 if (FLAG_native_code_counters && counter->Enabled()) {
1729 mov(scratch2, Operand(ExternalReference(counter))); 2056 mov(scratch2, Operand(ExternalReference(counter)));
1730 ldr(scratch1, MemOperand(scratch2)); 2057 ldr(scratch1, MemOperand(scratch2));
1731 sub(scratch1, scratch1, Operand(value)); 2058 sub(scratch1, scratch1, Operand(value));
1732 str(scratch1, MemOperand(scratch2)); 2059 str(scratch1, MemOperand(scratch2));
1733 } 2060 }
1734 } 2061 }
1735 2062
1736 2063
1737 void MacroAssembler::Assert(Condition cc, const char* msg) { 2064 void MacroAssembler::Assert(Condition cond, const char* msg) {
1738 if (FLAG_debug_code) 2065 if (FLAG_debug_code)
1739 Check(cc, msg); 2066 Check(cond, msg);
1740 } 2067 }
1741 2068
1742 2069
1743 void MacroAssembler::AssertRegisterIsRoot(Register reg, 2070 void MacroAssembler::AssertRegisterIsRoot(Register reg,
1744 Heap::RootListIndex index) { 2071 Heap::RootListIndex index) {
1745 if (FLAG_debug_code) { 2072 if (FLAG_debug_code) {
1746 LoadRoot(ip, index); 2073 LoadRoot(ip, index);
1747 cmp(reg, ip); 2074 cmp(reg, ip);
1748 Check(eq, "Register did not match expected root"); 2075 Check(eq, "Register did not match expected root");
1749 } 2076 }
(...skipping 12 matching lines...) Expand all
1762 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex); 2089 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
1763 cmp(elements, ip); 2090 cmp(elements, ip);
1764 b(eq, &ok); 2091 b(eq, &ok);
1765 Abort("JSObject with fast elements map has slow elements"); 2092 Abort("JSObject with fast elements map has slow elements");
1766 bind(&ok); 2093 bind(&ok);
1767 pop(elements); 2094 pop(elements);
1768 } 2095 }
1769 } 2096 }
1770 2097
1771 2098
1772 void MacroAssembler::Check(Condition cc, const char* msg) { 2099 void MacroAssembler::Check(Condition cond, const char* msg) {
1773 Label L; 2100 Label L;
1774 b(cc, &L); 2101 b(cond, &L);
1775 Abort(msg); 2102 Abort(msg);
1776 // will not return here 2103 // will not return here
1777 bind(&L); 2104 bind(&L);
1778 } 2105 }
1779 2106
1780 2107
1781 void MacroAssembler::Abort(const char* msg) { 2108 void MacroAssembler::Abort(const char* msg) {
1782 Label abort_start; 2109 Label abort_start;
1783 bind(&abort_start); 2110 bind(&abort_start);
1784 // We want to pass the msg string like a smi to avoid GC 2111 // We want to pass the msg string like a smi to avoid GC
1785 // problems, however msg is not guaranteed to be aligned 2112 // problems, however msg is not guaranteed to be aligned
1786 // properly. Instead, we pass an aligned pointer that is 2113 // properly. Instead, we pass an aligned pointer that is
1787 // a proper v8 smi, but also pass the alignment difference 2114 // a proper v8 smi, but also pass the alignment difference
1788 // from the real pointer as a smi. 2115 // from the real pointer as a smi.
1789 intptr_t p1 = reinterpret_cast<intptr_t>(msg); 2116 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
1790 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag; 2117 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
1791 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi()); 2118 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
1792 #ifdef DEBUG 2119 #ifdef DEBUG
1793 if (msg != NULL) { 2120 if (msg != NULL) {
1794 RecordComment("Abort message: "); 2121 RecordComment("Abort message: ");
1795 RecordComment(msg); 2122 RecordComment(msg);
1796 } 2123 }
1797 #endif 2124 #endif
1798 // Disable stub call restrictions to always allow calls to abort. 2125 // Disable stub call restrictions to always allow calls to abort.
1799 set_allow_stub_calls(true); 2126 AllowStubCallsScope allow_scope(this, true);
1800 2127
1801 mov(r0, Operand(p0)); 2128 mov(r0, Operand(p0));
1802 push(r0); 2129 push(r0);
1803 mov(r0, Operand(Smi::FromInt(p1 - p0))); 2130 mov(r0, Operand(Smi::FromInt(p1 - p0)));
1804 push(r0); 2131 push(r0);
1805 CallRuntime(Runtime::kAbort, 2); 2132 CallRuntime(Runtime::kAbort, 2);
1806 // will not return here 2133 // will not return here
1807 if (is_const_pool_blocked()) { 2134 if (is_const_pool_blocked()) {
1808 // If the calling code cares about the exact number of 2135 // If the calling code cares about the exact number of
1809 // instructions generated, we insert padding here to keep the size 2136 // instructions generated, we insert padding here to keep the size
(...skipping 47 matching lines...) Expand 10 before | Expand all | Expand 10 after
1857 Label ok, fail; 2184 Label ok, fail;
1858 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false); 2185 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, false);
1859 b(&ok); 2186 b(&ok);
1860 bind(&fail); 2187 bind(&fail);
1861 Abort("Global functions must have initial map"); 2188 Abort("Global functions must have initial map");
1862 bind(&ok); 2189 bind(&ok);
1863 } 2190 }
1864 } 2191 }
1865 2192
1866 2193
2194 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2195 Register reg,
2196 Register scratch,
2197 Label* not_power_of_two_or_zero) {
2198 sub(scratch, reg, Operand(1), SetCC);
2199 b(mi, not_power_of_two_or_zero);
2200 tst(scratch, reg);
2201 b(ne, not_power_of_two_or_zero);
2202 }
2203
2204
1867 void MacroAssembler::JumpIfNotBothSmi(Register reg1, 2205 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
1868 Register reg2, 2206 Register reg2,
1869 Label* on_not_both_smi) { 2207 Label* on_not_both_smi) {
1870 ASSERT_EQ(0, kSmiTag); 2208 STATIC_ASSERT(kSmiTag == 0);
1871 tst(reg1, Operand(kSmiTagMask)); 2209 tst(reg1, Operand(kSmiTagMask));
1872 tst(reg2, Operand(kSmiTagMask), eq); 2210 tst(reg2, Operand(kSmiTagMask), eq);
1873 b(ne, on_not_both_smi); 2211 b(ne, on_not_both_smi);
1874 } 2212 }
1875 2213
1876 2214
1877 void MacroAssembler::JumpIfEitherSmi(Register reg1, 2215 void MacroAssembler::JumpIfEitherSmi(Register reg1,
1878 Register reg2, 2216 Register reg2,
1879 Label* on_either_smi) { 2217 Label* on_either_smi) {
1880 ASSERT_EQ(0, kSmiTag); 2218 STATIC_ASSERT(kSmiTag == 0);
1881 tst(reg1, Operand(kSmiTagMask)); 2219 tst(reg1, Operand(kSmiTagMask));
1882 tst(reg2, Operand(kSmiTagMask), ne); 2220 tst(reg2, Operand(kSmiTagMask), ne);
1883 b(eq, on_either_smi); 2221 b(eq, on_either_smi);
1884 } 2222 }
1885 2223
1886 2224
1887 void MacroAssembler::AbortIfSmi(Register object) { 2225 void MacroAssembler::AbortIfSmi(Register object) {
1888 ASSERT_EQ(0, kSmiTag); 2226 STATIC_ASSERT(kSmiTag == 0);
1889 tst(object, Operand(kSmiTagMask)); 2227 tst(object, Operand(kSmiTagMask));
1890 Assert(ne, "Operand is a smi"); 2228 Assert(ne, "Operand is a smi");
1891 } 2229 }
1892 2230
1893 2231
2232 void MacroAssembler::AbortIfNotSmi(Register object) {
2233 STATIC_ASSERT(kSmiTag == 0);
2234 tst(object, Operand(kSmiTagMask));
2235 Assert(eq, "Operand is not smi");
2236 }
2237
2238
2239 void MacroAssembler::AbortIfNotString(Register object) {
2240 STATIC_ASSERT(kSmiTag == 0);
2241 tst(object, Operand(kSmiTagMask));
2242 Assert(ne, "Operand is not a string");
2243 push(object);
2244 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
2245 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
2246 pop(object);
2247 Assert(lo, "Operand is not a string");
2248 }
2249
2250
2251
2252 void MacroAssembler::AbortIfNotRootValue(Register src,
2253 Heap::RootListIndex root_value_index,
2254 const char* message) {
2255 ASSERT(!src.is(ip));
2256 LoadRoot(ip, root_value_index);
2257 cmp(src, ip);
2258 Assert(eq, message);
2259 }
2260
2261
2262 void MacroAssembler::JumpIfNotHeapNumber(Register object,
2263 Register heap_number_map,
2264 Register scratch,
2265 Label* on_not_heap_number) {
2266 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2267 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2268 cmp(scratch, heap_number_map);
2269 b(ne, on_not_heap_number);
2270 }
2271
2272
1894 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( 2273 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
1895 Register first, 2274 Register first,
1896 Register second, 2275 Register second,
1897 Register scratch1, 2276 Register scratch1,
1898 Register scratch2, 2277 Register scratch2,
1899 Label* failure) { 2278 Label* failure) {
1900 // Test that both first and second are sequential ASCII strings. 2279 // Test that both first and second are sequential ASCII strings.
1901 // Assume that they are non-smis. 2280 // Assume that they are non-smis.
1902 ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); 2281 ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
1903 ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); 2282 ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
1904 ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); 2283 ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
1905 ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); 2284 ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
1906 2285
1907 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1, 2286 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
1908 scratch2, 2287 scratch2,
1909 scratch1, 2288 scratch1,
1910 scratch2, 2289 scratch2,
1911 failure); 2290 failure);
1912 } 2291 }
1913 2292
1914 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, 2293 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
1915 Register second, 2294 Register second,
1916 Register scratch1, 2295 Register scratch1,
1917 Register scratch2, 2296 Register scratch2,
1918 Label* failure) { 2297 Label* failure) {
1919 // Check that neither is a smi. 2298 // Check that neither is a smi.
1920 ASSERT_EQ(0, kSmiTag); 2299 STATIC_ASSERT(kSmiTag == 0);
1921 and_(scratch1, first, Operand(second)); 2300 and_(scratch1, first, Operand(second));
1922 tst(scratch1, Operand(kSmiTagMask)); 2301 tst(scratch1, Operand(kSmiTagMask));
1923 b(eq, failure); 2302 b(eq, failure);
1924 JumpIfNonSmisNotBothSequentialAsciiStrings(first, 2303 JumpIfNonSmisNotBothSequentialAsciiStrings(first,
1925 second, 2304 second,
1926 scratch1, 2305 scratch1,
1927 scratch2, 2306 scratch2,
1928 failure); 2307 failure);
1929 } 2308 }
1930 2309
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
1987 for (int i = 0; i < field_count; i++) { 2366 for (int i = 0; i < field_count; i++) {
1988 ldr(tmp, FieldMemOperand(src, i * kPointerSize)); 2367 ldr(tmp, FieldMemOperand(src, i * kPointerSize));
1989 str(tmp, FieldMemOperand(dst, i * kPointerSize)); 2368 str(tmp, FieldMemOperand(dst, i * kPointerSize));
1990 } 2369 }
1991 } 2370 }
1992 2371
1993 2372
1994 void MacroAssembler::CountLeadingZeros(Register zeros, // Answer. 2373 void MacroAssembler::CountLeadingZeros(Register zeros, // Answer.
1995 Register source, // Input. 2374 Register source, // Input.
1996 Register scratch) { 2375 Register scratch) {
1997 ASSERT(!zeros.is(source) || !source.is(zeros)); 2376 ASSERT(!zeros.is(source) || !source.is(scratch));
1998 ASSERT(!zeros.is(scratch)); 2377 ASSERT(!zeros.is(scratch));
1999 ASSERT(!scratch.is(ip)); 2378 ASSERT(!scratch.is(ip));
2000 ASSERT(!source.is(ip)); 2379 ASSERT(!source.is(ip));
2001 ASSERT(!zeros.is(ip)); 2380 ASSERT(!zeros.is(ip));
2002 #ifdef CAN_USE_ARMV5_INSTRUCTIONS 2381 #ifdef CAN_USE_ARMV5_INSTRUCTIONS
2003 clz(zeros, source); // This instruction is only supported after ARM5. 2382 clz(zeros, source); // This instruction is only supported after ARM5.
2004 #else 2383 #else
2005 mov(zeros, Operand(0, RelocInfo::NONE)); 2384 mov(zeros, Operand(0, RelocInfo::NONE));
2006 Move(scratch, source); 2385 Move(scratch, source);
2007 // Top 16. 2386 // Top 16.
(...skipping 101 matching lines...) Expand 10 before | Expand all | Expand 10 after
2109 Call(function); 2488 Call(function);
2110 int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4; 2489 int stack_passed_arguments = (num_arguments <= 4) ? 0 : num_arguments - 4;
2111 if (OS::ActivationFrameAlignment() > kPointerSize) { 2490 if (OS::ActivationFrameAlignment() > kPointerSize) {
2112 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); 2491 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
2113 } else { 2492 } else {
2114 add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); 2493 add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
2115 } 2494 }
2116 } 2495 }
2117 2496
2118 2497
2119 #ifdef ENABLE_DEBUGGER_SUPPORT 2498 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
2499 Register result) {
2500 const uint32_t kLdrOffsetMask = (1 << 12) - 1;
2501 const int32_t kPCRegOffset = 2 * kPointerSize;
2502 ldr(result, MemOperand(ldr_location));
2503 if (FLAG_debug_code) {
2504 // Check that the instruction is a ldr reg, [pc + offset] .
2505 and_(result, result, Operand(kLdrPCPattern));
2506 cmp(result, Operand(kLdrPCPattern));
2507 Check(eq, "The instruction to patch should be a load from pc.");
2508 // Result was clobbered. Restore it.
2509 ldr(result, MemOperand(ldr_location));
2510 }
2511 // Get the address of the constant.
2512 and_(result, result, Operand(kLdrOffsetMask));
2513 add(result, ldr_location, Operand(result));
2514 add(result, result, Operand(kPCRegOffset));
2515 }
2516
2517
2120 CodePatcher::CodePatcher(byte* address, int instructions) 2518 CodePatcher::CodePatcher(byte* address, int instructions)
2121 : address_(address), 2519 : address_(address),
2122 instructions_(instructions), 2520 instructions_(instructions),
2123 size_(instructions * Assembler::kInstrSize), 2521 size_(instructions * Assembler::kInstrSize),
2124 masm_(address, size_ + Assembler::kGap) { 2522 masm_(address, size_ + Assembler::kGap) {
2125 // Create a new macro assembler pointing to the address of the code to patch. 2523 // Create a new macro assembler pointing to the address of the code to patch.
2126 // The size is adjusted with kGap on order for the assembler to generate size 2524 // The size is adjusted with kGap on order for the assembler to generate size
2127 // bytes of instructions without failing with buffer size constraints. 2525 // bytes of instructions without failing with buffer size constraints.
2128 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 2526 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2129 } 2527 }
2130 2528
2131 2529
2132 CodePatcher::~CodePatcher() { 2530 CodePatcher::~CodePatcher() {
2133 // Indicate that code has changed. 2531 // Indicate that code has changed.
2134 CPU::FlushICache(address_, size_); 2532 CPU::FlushICache(address_, size_);
2135 2533
2136 // Check that the code was patched as expected. 2534 // Check that the code was patched as expected.
2137 ASSERT(masm_.pc_ == address_ + size_); 2535 ASSERT(masm_.pc_ == address_ + size_);
2138 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 2536 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
2139 } 2537 }
2140 2538
2141 2539
2142 void CodePatcher::Emit(Instr x) { 2540 void CodePatcher::Emit(Instr instr) {
2143 masm()->emit(x); 2541 masm()->emit(instr);
2144 } 2542 }
2145 2543
2146 2544
2147 void CodePatcher::Emit(Address addr) { 2545 void CodePatcher::Emit(Address addr) {
2148 masm()->emit(reinterpret_cast<Instr>(addr)); 2546 masm()->emit(reinterpret_cast<Instr>(addr));
2149 } 2547 }
2150 #endif // ENABLE_DEBUGGER_SUPPORT 2548
2549
2550 void CodePatcher::EmitCondition(Condition cond) {
2551 Instr instr = Assembler::instr_at(masm_.pc_);
2552 instr = (instr & ~kCondMask) | cond;
2553 masm_.emit(instr);
2554 }
2151 2555
2152 2556
2153 } } // namespace v8::internal 2557 } } // namespace v8::internal
2154 2558
2155 #endif // V8_TARGET_ARCH_ARM 2559 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/macro-assembler-arm.h ('k') | src/arm/regexp-macro-assembler-arm.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698