Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(361)

Side by Side Diff: src/arm/macro-assembler-arm.cc

Issue 6597029: [Isolates] Merge r 6300:6500 from bleeding_edge to isolates. (Closed) Base URL: http://v8.googlecode.com/svn/branches/experimental/isolates/
Patch Set: Created 9 years, 9 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/macro-assembler-arm.h ('k') | src/arm/simulator-arm.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2010 the V8 project authors. All rights reserved. 1 // Copyright 2011 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution. 11 // with the distribution.
(...skipping 299 matching lines...) Expand 10 before | Expand all | Expand 10 after
311 usat(dst, satpos, src, cond); 311 usat(dst, satpos, src, cond);
312 } 312 }
313 } 313 }
314 314
315 315
316 void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) { 316 void MacroAssembler::SmiJumpTable(Register index, Vector<Label*> targets) {
317 // Empty the const pool. 317 // Empty the const pool.
318 CheckConstPool(true, true); 318 CheckConstPool(true, true);
319 add(pc, pc, Operand(index, 319 add(pc, pc, Operand(index,
320 LSL, 320 LSL,
321 assembler::arm::Instr::kInstrSizeLog2 - kSmiTagSize)); 321 Instruction::kInstrSizeLog2 - kSmiTagSize));
322 BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize); 322 BlockConstPoolBefore(pc_offset() + (targets.length() + 1) * kInstrSize);
323 nop(); // Jump table alignment. 323 nop(); // Jump table alignment.
324 for (int i = 0; i < targets.length(); i++) { 324 for (int i = 0; i < targets.length(); i++) {
325 b(targets[i]); 325 b(targets[i]);
326 } 326 }
327 } 327 }
328 328
329 329
330 void MacroAssembler::LoadRoot(Register destination, 330 void MacroAssembler::LoadRoot(Register destination,
331 Heap::RootListIndex index, 331 Heap::RootListIndex index,
(...skipping 30 matching lines...) Expand all
362 // Mark region dirty. 362 // Mark region dirty.
363 ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset)); 363 ldr(scratch, MemOperand(object, Page::kDirtyFlagOffset));
364 mov(ip, Operand(1)); 364 mov(ip, Operand(1));
365 orr(scratch, scratch, Operand(ip, LSL, address)); 365 orr(scratch, scratch, Operand(ip, LSL, address));
366 str(scratch, MemOperand(object, Page::kDirtyFlagOffset)); 366 str(scratch, MemOperand(object, Page::kDirtyFlagOffset));
367 } 367 }
368 368
369 369
370 void MacroAssembler::InNewSpace(Register object, 370 void MacroAssembler::InNewSpace(Register object,
371 Register scratch, 371 Register scratch,
372 Condition cc, 372 Condition cond,
373 Label* branch) { 373 Label* branch) {
374 ASSERT(cc == eq || cc == ne); 374 ASSERT(cond == eq || cond == ne);
375 and_(scratch, object, Operand(ExternalReference::new_space_mask())); 375 and_(scratch, object, Operand(ExternalReference::new_space_mask()));
376 cmp(scratch, Operand(ExternalReference::new_space_start())); 376 cmp(scratch, Operand(ExternalReference::new_space_start()));
377 b(cc, branch); 377 b(cond, branch);
378 } 378 }
379 379
380 380
381 // Will clobber 4 registers: object, offset, scratch, ip. The 381 // Will clobber 4 registers: object, offset, scratch, ip. The
382 // register 'object' contains a heap object pointer. The heap object 382 // register 'object' contains a heap object pointer. The heap object
383 // tag is shifted away. 383 // tag is shifted away.
384 void MacroAssembler::RecordWrite(Register object, 384 void MacroAssembler::RecordWrite(Register object,
385 Operand offset, 385 Operand offset,
386 Register scratch0, 386 Register scratch0,
387 Register scratch1) { 387 Register scratch1) {
(...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after
459 } 459 }
460 460
461 461
462 void MacroAssembler::PopSafepointRegisters() { 462 void MacroAssembler::PopSafepointRegisters() {
463 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; 463 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
464 ldm(ia_w, sp, kSafepointSavedRegisters); 464 ldm(ia_w, sp, kSafepointSavedRegisters);
465 add(sp, sp, Operand(num_unsaved * kPointerSize)); 465 add(sp, sp, Operand(num_unsaved * kPointerSize));
466 } 466 }
467 467
468 468
469 void MacroAssembler::PushSafepointRegistersAndDoubles() {
470 PushSafepointRegisters();
471 sub(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
472 kDoubleSize));
473 for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
474 vstr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
475 }
476 }
477
478
479 void MacroAssembler::PopSafepointRegistersAndDoubles() {
480 for (int i = 0; i < DwVfpRegister::kNumAllocatableRegisters; i++) {
481 vldr(DwVfpRegister::FromAllocationIndex(i), sp, i * kDoubleSize);
482 }
483 add(sp, sp, Operand(DwVfpRegister::kNumAllocatableRegisters *
484 kDoubleSize));
485 PopSafepointRegisters();
486 }
487
488 void MacroAssembler::StoreToSafepointRegisterSlot(Register reg) {
489 str(reg, SafepointRegisterSlot(reg));
490 }
491
492
469 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { 493 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
470 // The registers are pushed starting with the highest encoding, 494 // The registers are pushed starting with the highest encoding,
471 // which means that lowest encodings are closest to the stack pointer. 495 // which means that lowest encodings are closest to the stack pointer.
472 ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters); 496 ASSERT(reg_code >= 0 && reg_code < kNumSafepointRegisters);
473 return reg_code; 497 return reg_code;
474 } 498 }
475 499
476 500
501 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
502 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
503 }
504
505
477 void MacroAssembler::Ldrd(Register dst1, Register dst2, 506 void MacroAssembler::Ldrd(Register dst1, Register dst2,
478 const MemOperand& src, Condition cond) { 507 const MemOperand& src, Condition cond) {
479 ASSERT(src.rm().is(no_reg)); 508 ASSERT(src.rm().is(no_reg));
480 ASSERT(!dst1.is(lr)); // r14. 509 ASSERT(!dst1.is(lr)); // r14.
481 ASSERT_EQ(0, dst1.code() % 2); 510 ASSERT_EQ(0, dst1.code() % 2);
482 ASSERT_EQ(dst1.code() + 1, dst2.code()); 511 ASSERT_EQ(dst1.code() + 1, dst2.code());
483 512
484 // Generate two ldr instructions if ldrd is not available. 513 // Generate two ldr instructions if ldrd is not available.
485 if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) { 514 if (Isolate::Current()->cpu_features()->IsSupported(ARMv7)) {
486 CpuFeatures::Scope scope(ARMv7); 515 CpuFeatures::Scope scope(ARMv7);
(...skipping 92 matching lines...) Expand 10 before | Expand all | Expand 10 after
579 // r2: preserved 608 // r2: preserved
580 609
581 // Drop the execution stack down to the frame pointer and restore 610 // Drop the execution stack down to the frame pointer and restore
582 // the caller frame pointer and return address. 611 // the caller frame pointer and return address.
583 mov(sp, fp); 612 mov(sp, fp);
584 ldm(ia_w, sp, fp.bit() | lr.bit()); 613 ldm(ia_w, sp, fp.bit() | lr.bit());
585 } 614 }
586 615
587 616
588 void MacroAssembler::EnterExitFrame(bool save_doubles) { 617 void MacroAssembler::EnterExitFrame(bool save_doubles) {
589 // r0 is argc. 618 // Compute the argv pointer in a callee-saved register.
590 // Compute callee's stack pointer before making changes and save it as 619 add(r6, sp, Operand(r0, LSL, kPointerSizeLog2));
591 // ip register so that it is restored as sp register on exit, thereby 620 sub(r6, r6, Operand(kPointerSize));
592 // popping the args.
593 621
594 // ip = sp + kPointerSize * #args; 622 // Setup the frame structure on the stack.
595 add(ip, sp, Operand(r0, LSL, kPointerSizeLog2)); 623 ASSERT_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
596 624 ASSERT_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
597 // Compute the argv pointer and keep it in a callee-saved register. 625 ASSERT_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
598 sub(r6, ip, Operand(kPointerSize)); 626 Push(lr, fp);
599 627 mov(fp, Operand(sp)); // Setup new frame pointer.
600 // Prepare the stack to be aligned when calling into C. After this point there 628 // Reserve room for saved entry sp and code object.
601 // are 5 pushes before the call into C, so the stack needs to be aligned after 629 sub(sp, sp, Operand(2 * kPointerSize));
602 // 5 pushes. 630 if (FLAG_debug_code) {
603 int frame_alignment = ActivationFrameAlignment(); 631 mov(ip, Operand(0));
604 int frame_alignment_mask = frame_alignment - 1; 632 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
605 if (frame_alignment != kPointerSize) {
606 // The following code needs to be more general if this assert does not hold.
607 ASSERT(frame_alignment == 2 * kPointerSize);
608 // With 5 pushes left the frame must be unaligned at this point.
609 mov(r7, Operand(Smi::FromInt(0)));
610 tst(sp, Operand((frame_alignment - kPointerSize) & frame_alignment_mask));
611 push(r7, eq); // Push if aligned to make it unaligned.
612 } 633 }
613
614 // Push in reverse order: caller_fp, sp_on_exit, and caller_pc.
615 stm(db_w, sp, fp.bit() | ip.bit() | lr.bit());
616 mov(fp, Operand(sp)); // Setup new frame pointer.
617
618 mov(ip, Operand(CodeObject())); 634 mov(ip, Operand(CodeObject()));
619 push(ip); // Accessed from ExitFrame::code_slot. 635 str(ip, MemOperand(fp, ExitFrameConstants::kCodeOffset));
620 636
621 // Save the frame pointer and the context in top. 637 // Save the frame pointer and the context in top.
622 mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address))); 638 mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address)));
623 str(fp, MemOperand(ip)); 639 str(fp, MemOperand(ip));
624 mov(ip, Operand(ExternalReference(Isolate::k_context_address))); 640 mov(ip, Operand(ExternalReference(Isolate::k_context_address)));
625 str(cp, MemOperand(ip)); 641 str(cp, MemOperand(ip));
626 642
627 // Setup argc and the builtin function in callee-saved registers. 643 // Setup argc and the builtin function in callee-saved registers.
628 mov(r4, Operand(r0)); 644 mov(r4, Operand(r0));
629 mov(r5, Operand(r1)); 645 mov(r5, Operand(r1));
630 646
631 // Optionally save all double registers. 647 // Optionally save all double registers.
632 if (save_doubles) { 648 if (save_doubles) {
633 // TODO(regis): Use vstrm instruction. 649 sub(sp, sp, Operand(DwVfpRegister::kNumRegisters * kDoubleSize));
634 // The stack alignment code above made sp unaligned, so add space for one 650 const int offset = -2 * kPointerSize;
635 // more double register and use aligned addresses.
636 ASSERT(kDoubleSize == frame_alignment);
637 // Mark the frame as containing doubles by pushing a non-valid return
638 // address, i.e. 0.
639 ASSERT(ExitFrameConstants::kMarkerOffset == -2 * kPointerSize);
640 mov(ip, Operand(0)); // Marker and alignment word.
641 push(ip);
642 int space = DwVfpRegister::kNumRegisters * kDoubleSize + kPointerSize;
643 sub(sp, sp, Operand(space));
644 for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { 651 for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
645 DwVfpRegister reg = DwVfpRegister::from_code(i); 652 DwVfpRegister reg = DwVfpRegister::from_code(i);
646 vstr(reg, sp, i * kDoubleSize + kPointerSize); 653 vstr(reg, fp, offset - ((i + 1) * kDoubleSize));
647 } 654 }
648 // Note that d0 will be accessible at fp - 2*kPointerSize - 655 // Note that d0 will be accessible at
649 // DwVfpRegister::kNumRegisters * kDoubleSize, since the code slot and the 656 // fp - 2 * kPointerSize - DwVfpRegister::kNumRegisters * kDoubleSize,
650 // alignment word were pushed after the fp. 657 // since the sp slot and code slot were pushed after the fp.
651 } 658 }
659
660 // Reserve place for the return address and align the frame preparing for
661 // calling the runtime function.
662 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
663 sub(sp, sp, Operand(kPointerSize));
664 if (frame_alignment > 0) {
665 ASSERT(IsPowerOf2(frame_alignment));
666 and_(sp, sp, Operand(-frame_alignment));
667 }
668
669 // Set the exit frame sp value to point just before the return address
670 // location.
671 add(ip, sp, Operand(kPointerSize));
672 str(ip, MemOperand(fp, ExitFrameConstants::kSPOffset));
652 } 673 }
653 674
654 675
655 void MacroAssembler::InitializeNewString(Register string, 676 void MacroAssembler::InitializeNewString(Register string,
656 Register length, 677 Register length,
657 Heap::RootListIndex map_index, 678 Heap::RootListIndex map_index,
658 Register scratch1, 679 Register scratch1,
659 Register scratch2) { 680 Register scratch2) {
660 mov(scratch1, Operand(length, LSL, kSmiTagSize)); 681 mov(scratch1, Operand(length, LSL, kSmiTagSize));
661 LoadRoot(scratch2, map_index); 682 LoadRoot(scratch2, map_index);
(...skipping 17 matching lines...) Expand all
679 // if the target platform will need alignment, so this is controlled from a 700 // if the target platform will need alignment, so this is controlled from a
680 // flag. 701 // flag.
681 return FLAG_sim_stack_alignment; 702 return FLAG_sim_stack_alignment;
682 #endif // defined(V8_HOST_ARCH_ARM) 703 #endif // defined(V8_HOST_ARCH_ARM)
683 } 704 }
684 705
685 706
686 void MacroAssembler::LeaveExitFrame(bool save_doubles) { 707 void MacroAssembler::LeaveExitFrame(bool save_doubles) {
687 // Optionally restore all double registers. 708 // Optionally restore all double registers.
688 if (save_doubles) { 709 if (save_doubles) {
689 // TODO(regis): Use vldrm instruction.
690 for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) { 710 for (int i = 0; i < DwVfpRegister::kNumRegisters; i++) {
691 DwVfpRegister reg = DwVfpRegister::from_code(i); 711 DwVfpRegister reg = DwVfpRegister::from_code(i);
692 // Register d15 is just below the marker. 712 const int offset = -2 * kPointerSize;
693 const int offset = ExitFrameConstants::kMarkerOffset; 713 vldr(reg, fp, offset - ((i + 1) * kDoubleSize));
694 vldr(reg, fp, (i - DwVfpRegister::kNumRegisters) * kDoubleSize + offset);
695 } 714 }
696 } 715 }
697 716
698 // Clear top frame. 717 // Clear top frame.
699 mov(r3, Operand(0, RelocInfo::NONE)); 718 mov(r3, Operand(0, RelocInfo::NONE));
700 mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address))); 719 mov(ip, Operand(ExternalReference(Isolate::k_c_entry_fp_address)));
701 str(r3, MemOperand(ip)); 720 str(r3, MemOperand(ip));
702 721
703 // Restore current context from top and clear it in debug mode. 722 // Restore current context from top and clear it in debug mode.
704 mov(ip, Operand(ExternalReference(Isolate::k_context_address))); 723 mov(ip, Operand(ExternalReference(Isolate::k_context_address)));
705 ldr(cp, MemOperand(ip)); 724 ldr(cp, MemOperand(ip));
706 #ifdef DEBUG 725 #ifdef DEBUG
707 str(r3, MemOperand(ip)); 726 str(r3, MemOperand(ip));
708 #endif 727 #endif
709 728
710 // Pop the arguments, restore registers, and return. 729 // Tear down the exit frame, pop the arguments, and return. Callee-saved
711 mov(sp, Operand(fp)); // respect ABI stack constraint 730 // register r4 still holds argc.
712 ldm(ia, sp, fp.bit() | sp.bit() | pc.bit()); 731 mov(sp, Operand(fp));
732 ldm(ia_w, sp, fp.bit() | lr.bit());
733 add(sp, sp, Operand(r4, LSL, kPointerSizeLog2));
734 mov(pc, lr);
713 } 735 }
714 736
715 737
716 void MacroAssembler::InvokePrologue(const ParameterCount& expected, 738 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
717 const ParameterCount& actual, 739 const ParameterCount& actual,
718 Handle<Code> code_constant, 740 Handle<Code> code_constant,
719 Register code_reg, 741 Register code_reg,
720 Label* done, 742 Label* done,
721 InvokeFlag flag, 743 InvokeFlag flag,
722 PostCallGenerator* post_call_generator) { 744 PostCallGenerator* post_call_generator) {
(...skipping 175 matching lines...) Expand 10 before | Expand all | Expand 10 after
898 920
899 921
900 void MacroAssembler::IsObjectJSStringType(Register object, 922 void MacroAssembler::IsObjectJSStringType(Register object,
901 Register scratch, 923 Register scratch,
902 Label* fail) { 924 Label* fail) {
903 ASSERT(kNotStringTag != 0); 925 ASSERT(kNotStringTag != 0);
904 926
905 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); 927 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
906 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 928 ldrb(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
907 tst(scratch, Operand(kIsNotStringMask)); 929 tst(scratch, Operand(kIsNotStringMask));
908 b(nz, fail); 930 b(ne, fail);
909 } 931 }
910 932
911 933
912 #ifdef ENABLE_DEBUGGER_SUPPORT 934 #ifdef ENABLE_DEBUGGER_SUPPORT
913 void MacroAssembler::DebugBreak() { 935 void MacroAssembler::DebugBreak() {
914 ASSERT(allow_stub_calls()); 936 ASSERT(allow_stub_calls());
915 mov(r0, Operand(0, RelocInfo::NONE)); 937 mov(r0, Operand(0, RelocInfo::NONE));
916 mov(r1, Operand(ExternalReference(Runtime::kDebugBreak))); 938 mov(r1, Operand(ExternalReference(Runtime::kDebugBreak)));
917 CEntryStub ces(1); 939 CEntryStub ces(1);
918 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); 940 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
(...skipping 438 matching lines...) Expand 10 before | Expand all | Expand 10 after
1357 cmp(type_reg, Operand(type)); 1379 cmp(type_reg, Operand(type));
1358 } 1380 }
1359 1381
1360 1382
1361 void MacroAssembler::CheckMap(Register obj, 1383 void MacroAssembler::CheckMap(Register obj,
1362 Register scratch, 1384 Register scratch,
1363 Handle<Map> map, 1385 Handle<Map> map,
1364 Label* fail, 1386 Label* fail,
1365 bool is_heap_object) { 1387 bool is_heap_object) {
1366 if (!is_heap_object) { 1388 if (!is_heap_object) {
1367 BranchOnSmi(obj, fail); 1389 JumpIfSmi(obj, fail);
1368 } 1390 }
1369 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); 1391 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
1370 mov(ip, Operand(map)); 1392 mov(ip, Operand(map));
1371 cmp(scratch, ip); 1393 cmp(scratch, ip);
1372 b(ne, fail); 1394 b(ne, fail);
1373 } 1395 }
1374 1396
1375 1397
1376 void MacroAssembler::CheckMap(Register obj, 1398 void MacroAssembler::CheckMap(Register obj,
1377 Register scratch, 1399 Register scratch,
1378 Heap::RootListIndex index, 1400 Heap::RootListIndex index,
1379 Label* fail, 1401 Label* fail,
1380 bool is_heap_object) { 1402 bool is_heap_object) {
1381 if (!is_heap_object) { 1403 if (!is_heap_object) {
1382 BranchOnSmi(obj, fail); 1404 JumpIfSmi(obj, fail);
1383 } 1405 }
1384 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); 1406 ldr(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
1385 LoadRoot(ip, index); 1407 LoadRoot(ip, index);
1386 cmp(scratch, ip); 1408 cmp(scratch, ip);
1387 b(ne, fail); 1409 b(ne, fail);
1388 } 1410 }
1389 1411
1390 1412
1391 void MacroAssembler::TryGetFunctionPrototype(Register function, 1413 void MacroAssembler::TryGetFunctionPrototype(Register function,
1392 Register result, 1414 Register result,
1393 Register scratch, 1415 Register scratch,
1394 Label* miss) { 1416 Label* miss) {
1395 // Check that the receiver isn't a smi. 1417 // Check that the receiver isn't a smi.
1396 BranchOnSmi(function, miss); 1418 JumpIfSmi(function, miss);
1397 1419
1398 // Check that the function really is a function. Load map into result reg. 1420 // Check that the function really is a function. Load map into result reg.
1399 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE); 1421 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
1400 b(ne, miss); 1422 b(ne, miss);
1401 1423
1402 // Make sure that the function has an instance prototype. 1424 // Make sure that the function has an instance prototype.
1403 Label non_instance; 1425 Label non_instance;
1404 ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); 1426 ldrb(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
1405 tst(scratch, Operand(1 << Map::kHasNonInstancePrototype)); 1427 tst(scratch, Operand(1 << Map::kHasNonInstancePrototype));
1406 b(ne, &non_instance); 1428 b(ne, &non_instance);
(...skipping 78 matching lines...) Expand 10 before | Expand all | Expand 10 after
1485 DwVfpRegister result, 1507 DwVfpRegister result,
1486 Register scratch1, 1508 Register scratch1,
1487 Register scratch2, 1509 Register scratch2,
1488 Register heap_number_map, 1510 Register heap_number_map,
1489 SwVfpRegister scratch3, 1511 SwVfpRegister scratch3,
1490 Label* not_number, 1512 Label* not_number,
1491 ObjectToDoubleFlags flags) { 1513 ObjectToDoubleFlags flags) {
1492 Label done; 1514 Label done;
1493 if ((flags & OBJECT_NOT_SMI) == 0) { 1515 if ((flags & OBJECT_NOT_SMI) == 0) {
1494 Label not_smi; 1516 Label not_smi;
1495 BranchOnNotSmi(object, &not_smi); 1517 JumpIfNotSmi(object, &not_smi);
1496 // Remove smi tag and convert to double. 1518 // Remove smi tag and convert to double.
1497 mov(scratch1, Operand(object, ASR, kSmiTagSize)); 1519 mov(scratch1, Operand(object, ASR, kSmiTagSize));
1498 vmov(scratch3, scratch1); 1520 vmov(scratch3, scratch1);
1499 vcvt_f64_s32(result, scratch3); 1521 vcvt_f64_s32(result, scratch3);
1500 b(&done); 1522 b(&done);
1501 bind(&not_smi); 1523 bind(&not_smi);
1502 } 1524 }
1503 // Check for heap number and load double value from it. 1525 // Check for heap number and load double value from it.
1504 ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset)); 1526 ldr(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
1505 sub(scratch2, object, Operand(kHeapObjectTag)); 1527 sub(scratch2, object, Operand(kHeapObjectTag));
(...skipping 273 matching lines...) Expand 10 before | Expand all | Expand 10 after
1779 ASSERT(value > 0); 1801 ASSERT(value > 0);
1780 if (FLAG_native_code_counters && counter->Enabled()) { 1802 if (FLAG_native_code_counters && counter->Enabled()) {
1781 mov(scratch2, Operand(ExternalReference(counter))); 1803 mov(scratch2, Operand(ExternalReference(counter)));
1782 ldr(scratch1, MemOperand(scratch2)); 1804 ldr(scratch1, MemOperand(scratch2));
1783 sub(scratch1, scratch1, Operand(value)); 1805 sub(scratch1, scratch1, Operand(value));
1784 str(scratch1, MemOperand(scratch2)); 1806 str(scratch1, MemOperand(scratch2));
1785 } 1807 }
1786 } 1808 }
1787 1809
1788 1810
1789 void MacroAssembler::Assert(Condition cc, const char* msg) { 1811 void MacroAssembler::Assert(Condition cond, const char* msg) {
1790 if (FLAG_debug_code) 1812 if (FLAG_debug_code)
1791 Check(cc, msg); 1813 Check(cond, msg);
1792 } 1814 }
1793 1815
1794 1816
1795 void MacroAssembler::AssertRegisterIsRoot(Register reg, 1817 void MacroAssembler::AssertRegisterIsRoot(Register reg,
1796 Heap::RootListIndex index) { 1818 Heap::RootListIndex index) {
1797 if (FLAG_debug_code) { 1819 if (FLAG_debug_code) {
1798 LoadRoot(ip, index); 1820 LoadRoot(ip, index);
1799 cmp(reg, ip); 1821 cmp(reg, ip);
1800 Check(eq, "Register did not match expected root"); 1822 Check(eq, "Register did not match expected root");
1801 } 1823 }
(...skipping 12 matching lines...) Expand all
1814 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex); 1836 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
1815 cmp(elements, ip); 1837 cmp(elements, ip);
1816 b(eq, &ok); 1838 b(eq, &ok);
1817 Abort("JSObject with fast elements map has slow elements"); 1839 Abort("JSObject with fast elements map has slow elements");
1818 bind(&ok); 1840 bind(&ok);
1819 pop(elements); 1841 pop(elements);
1820 } 1842 }
1821 } 1843 }
1822 1844
1823 1845
1824 void MacroAssembler::Check(Condition cc, const char* msg) { 1846 void MacroAssembler::Check(Condition cond, const char* msg) {
1825 Label L; 1847 Label L;
1826 b(cc, &L); 1848 b(cond, &L);
1827 Abort(msg); 1849 Abort(msg);
1828 // will not return here 1850 // will not return here
1829 bind(&L); 1851 bind(&L);
1830 } 1852 }
1831 1853
1832 1854
1833 void MacroAssembler::Abort(const char* msg) { 1855 void MacroAssembler::Abort(const char* msg) {
1834 Label abort_start; 1856 Label abort_start;
1835 bind(&abort_start); 1857 bind(&abort_start);
1836 // We want to pass the msg string like a smi to avoid GC 1858 // We want to pass the msg string like a smi to avoid GC
(...skipping 75 matching lines...) Expand 10 before | Expand all | Expand 10 after
1912 bind(&fail); 1934 bind(&fail);
1913 Abort("Global functions must have initial map"); 1935 Abort("Global functions must have initial map");
1914 bind(&ok); 1936 bind(&ok);
1915 } 1937 }
1916 } 1938 }
1917 1939
1918 1940
1919 void MacroAssembler::JumpIfNotBothSmi(Register reg1, 1941 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
1920 Register reg2, 1942 Register reg2,
1921 Label* on_not_both_smi) { 1943 Label* on_not_both_smi) {
1922 ASSERT_EQ(0, kSmiTag); 1944 STATIC_ASSERT(kSmiTag == 0);
1923 tst(reg1, Operand(kSmiTagMask)); 1945 tst(reg1, Operand(kSmiTagMask));
1924 tst(reg2, Operand(kSmiTagMask), eq); 1946 tst(reg2, Operand(kSmiTagMask), eq);
1925 b(ne, on_not_both_smi); 1947 b(ne, on_not_both_smi);
1926 } 1948 }
1927 1949
1928 1950
1929 void MacroAssembler::JumpIfEitherSmi(Register reg1, 1951 void MacroAssembler::JumpIfEitherSmi(Register reg1,
1930 Register reg2, 1952 Register reg2,
1931 Label* on_either_smi) { 1953 Label* on_either_smi) {
1932 ASSERT_EQ(0, kSmiTag); 1954 STATIC_ASSERT(kSmiTag == 0);
1933 tst(reg1, Operand(kSmiTagMask)); 1955 tst(reg1, Operand(kSmiTagMask));
1934 tst(reg2, Operand(kSmiTagMask), ne); 1956 tst(reg2, Operand(kSmiTagMask), ne);
1935 b(eq, on_either_smi); 1957 b(eq, on_either_smi);
1936 } 1958 }
1937 1959
1938 1960
1939 void MacroAssembler::AbortIfSmi(Register object) { 1961 void MacroAssembler::AbortIfSmi(Register object) {
1940 ASSERT_EQ(0, kSmiTag); 1962 STATIC_ASSERT(kSmiTag == 0);
1941 tst(object, Operand(kSmiTagMask)); 1963 tst(object, Operand(kSmiTagMask));
1942 Assert(ne, "Operand is a smi"); 1964 Assert(ne, "Operand is a smi");
1943 } 1965 }
1944 1966
1945 1967
1968 void MacroAssembler::AbortIfNotSmi(Register object) {
1969 STATIC_ASSERT(kSmiTag == 0);
1970 tst(object, Operand(kSmiTagMask));
1971 Assert(eq, "Operand is not smi");
1972 }
1973
1974
1975 void MacroAssembler::JumpIfNotHeapNumber(Register object,
1976 Register heap_number_map,
1977 Register scratch,
1978 Label* on_not_heap_number) {
1979 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1980 AssertRegisterIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
1981 cmp(scratch, heap_number_map);
1982 b(ne, on_not_heap_number);
1983 }
1984
1985
1946 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( 1986 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
1947 Register first, 1987 Register first,
1948 Register second, 1988 Register second,
1949 Register scratch1, 1989 Register scratch1,
1950 Register scratch2, 1990 Register scratch2,
1951 Label* failure) { 1991 Label* failure) {
1952 // Test that both first and second are sequential ASCII strings. 1992 // Test that both first and second are sequential ASCII strings.
1953 // Assume that they are non-smis. 1993 // Assume that they are non-smis.
1954 ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); 1994 ldr(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
1955 ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); 1995 ldr(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
1956 ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); 1996 ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
1957 ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); 1997 ldrb(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
1958 1998
1959 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1, 1999 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
1960 scratch2, 2000 scratch2,
1961 scratch1, 2001 scratch1,
1962 scratch2, 2002 scratch2,
1963 failure); 2003 failure);
1964 } 2004 }
1965 2005
1966 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first, 2006 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
1967 Register second, 2007 Register second,
1968 Register scratch1, 2008 Register scratch1,
1969 Register scratch2, 2009 Register scratch2,
1970 Label* failure) { 2010 Label* failure) {
1971 // Check that neither is a smi. 2011 // Check that neither is a smi.
1972 ASSERT_EQ(0, kSmiTag); 2012 STATIC_ASSERT(kSmiTag == 0);
1973 and_(scratch1, first, Operand(second)); 2013 and_(scratch1, first, Operand(second));
1974 tst(scratch1, Operand(kSmiTagMask)); 2014 tst(scratch1, Operand(kSmiTagMask));
1975 b(eq, failure); 2015 b(eq, failure);
1976 JumpIfNonSmisNotBothSequentialAsciiStrings(first, 2016 JumpIfNonSmisNotBothSequentialAsciiStrings(first,
1977 second, 2017 second,
1978 scratch1, 2018 scratch1,
1979 scratch2, 2019 scratch2,
1980 failure); 2020 failure);
1981 } 2021 }
1982 2022
(...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after
2196 int stack_passed_arguments = (num_arguments <= kRegisterPassedArguments) ? 2236 int stack_passed_arguments = (num_arguments <= kRegisterPassedArguments) ?
2197 0 : num_arguments - kRegisterPassedArguments; 2237 0 : num_arguments - kRegisterPassedArguments;
2198 if (OS::ActivationFrameAlignment() > kPointerSize) { 2238 if (OS::ActivationFrameAlignment() > kPointerSize) {
2199 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); 2239 ldr(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
2200 } else { 2240 } else {
2201 add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); 2241 add(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize)));
2202 } 2242 }
2203 } 2243 }
2204 2244
2205 2245
2246 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
2247 Register result) {
2248 const uint32_t kLdrOffsetMask = (1 << 12) - 1;
2249 const int32_t kPCRegOffset = 2 * kPointerSize;
2250 ldr(result, MemOperand(ldr_location));
2251 if (FLAG_debug_code) {
2252 // Check that the instruction is a ldr reg, [pc + offset] .
2253 and_(result, result, Operand(kLdrPCPattern));
2254 cmp(result, Operand(kLdrPCPattern));
2255 Check(eq, "The instruction to patch should be a load from pc.");
2256 // Result was clobbered. Restore it.
2257 ldr(result, MemOperand(ldr_location));
2258 }
2259 // Get the address of the constant.
2260 and_(result, result, Operand(kLdrOffsetMask));
2261 add(result, ldr_location, Operand(result));
2262 add(result, result, Operand(kPCRegOffset));
2263 }
2264
2265
2206 #ifdef ENABLE_DEBUGGER_SUPPORT 2266 #ifdef ENABLE_DEBUGGER_SUPPORT
2207 CodePatcher::CodePatcher(byte* address, int instructions) 2267 CodePatcher::CodePatcher(byte* address, int instructions)
2208 : address_(address), 2268 : address_(address),
2209 instructions_(instructions), 2269 instructions_(instructions),
2210 size_(instructions * Assembler::kInstrSize), 2270 size_(instructions * Assembler::kInstrSize),
2211 masm_(address, size_ + Assembler::kGap) { 2271 masm_(address, size_ + Assembler::kGap) {
2212 // Create a new macro assembler pointing to the address of the code to patch. 2272 // Create a new macro assembler pointing to the address of the code to patch.
2213 // The size is adjusted with kGap on order for the assembler to generate size 2273 // The size is adjusted with kGap on order for the assembler to generate size
2214 // bytes of instructions without failing with buffer size constraints. 2274 // bytes of instructions without failing with buffer size constraints.
2215 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 2275 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
(...skipping 17 matching lines...) Expand all
2233 2293
2234 void CodePatcher::Emit(Address addr) { 2294 void CodePatcher::Emit(Address addr) {
2235 masm()->emit(reinterpret_cast<Instr>(addr)); 2295 masm()->emit(reinterpret_cast<Instr>(addr));
2236 } 2296 }
2237 #endif // ENABLE_DEBUGGER_SUPPORT 2297 #endif // ENABLE_DEBUGGER_SUPPORT
2238 2298
2239 2299
2240 } } // namespace v8::internal 2300 } } // namespace v8::internal
2241 2301
2242 #endif // V8_TARGET_ARCH_ARM 2302 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/macro-assembler-arm.h ('k') | src/arm/simulator-arm.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698