Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(634)

Side by Side Diff: src/arm/macro-assembler-arm.cc

Issue 22715004: Version 3.20.15 (Closed) Base URL: https://v8.googlecode.com/svn/trunk
Patch Set: Add TypedArray API and correctness patches r16033 and r16084 Created 7 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/arm/macro-assembler-arm.h ('k') | src/array-iterator.js » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without 2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are 3 // modification, are permitted provided that the following conditions are
4 // met: 4 // met:
5 // 5 //
6 // * Redistributions of source code must retain the above copyright 6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer. 7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above 8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following 9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided 10 // disclaimer in the documentation and/or other materials provided
(...skipping 471 matching lines...) Expand 10 before | Expand all | Expand 10 after
482 RememberedSetAction remembered_set_action, 482 RememberedSetAction remembered_set_action,
483 SmiCheck smi_check) { 483 SmiCheck smi_check) {
484 // The compiled code assumes that record write doesn't change the 484 // The compiled code assumes that record write doesn't change the
485 // context register, so we check that none of the clobbered 485 // context register, so we check that none of the clobbered
486 // registers are cp. 486 // registers are cp.
487 ASSERT(!address.is(cp) && !value.is(cp)); 487 ASSERT(!address.is(cp) && !value.is(cp));
488 488
489 if (emit_debug_code()) { 489 if (emit_debug_code()) {
490 ldr(ip, MemOperand(address)); 490 ldr(ip, MemOperand(address));
491 cmp(ip, value); 491 cmp(ip, value);
492 Check(eq, kWrongAddressOrValuePassedToRecordWrite); 492 Check(eq, "Wrong address or value passed to RecordWrite");
493 } 493 }
494 494
495 Label done; 495 Label done;
496 496
497 if (smi_check == INLINE_SMI_CHECK) { 497 if (smi_check == INLINE_SMI_CHECK) {
498 JumpIfSmi(value, &done); 498 JumpIfSmi(value, &done);
499 } 499 }
500 500
501 CheckPageFlag(value, 501 CheckPageFlag(value,
502 value, // Used as scratch. 502 value, // Used as scratch.
(...skipping 980 matching lines...) Expand 10 before | Expand all | Expand 10 after
1483 1483
1484 ASSERT(!holder_reg.is(scratch)); 1484 ASSERT(!holder_reg.is(scratch));
1485 ASSERT(!holder_reg.is(ip)); 1485 ASSERT(!holder_reg.is(ip));
1486 ASSERT(!scratch.is(ip)); 1486 ASSERT(!scratch.is(ip));
1487 1487
1488 // Load current lexical context from the stack frame. 1488 // Load current lexical context from the stack frame.
1489 ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); 1489 ldr(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1490 // In debug mode, make sure the lexical context is set. 1490 // In debug mode, make sure the lexical context is set.
1491 #ifdef DEBUG 1491 #ifdef DEBUG
1492 cmp(scratch, Operand::Zero()); 1492 cmp(scratch, Operand::Zero());
1493 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext); 1493 Check(ne, "we should not have an empty lexical context");
1494 #endif 1494 #endif
1495 1495
1496 // Load the native context of the current context. 1496 // Load the native context of the current context.
1497 int offset = 1497 int offset =
1498 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; 1498 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1499 ldr(scratch, FieldMemOperand(scratch, offset)); 1499 ldr(scratch, FieldMemOperand(scratch, offset));
1500 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); 1500 ldr(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
1501 1501
1502 // Check the context is a native context. 1502 // Check the context is a native context.
1503 if (emit_debug_code()) { 1503 if (emit_debug_code()) {
1504 // Cannot use ip as a temporary in this verification code. Due to the fact 1504 // Cannot use ip as a temporary in this verification code. Due to the fact
1505 // that ip is clobbered as part of cmp with an object Operand. 1505 // that ip is clobbered as part of cmp with an object Operand.
1506 push(holder_reg); // Temporarily save holder on the stack. 1506 push(holder_reg); // Temporarily save holder on the stack.
1507 // Read the first word and compare to the native_context_map. 1507 // Read the first word and compare to the native_context_map.
1508 ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset)); 1508 ldr(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1509 LoadRoot(ip, Heap::kNativeContextMapRootIndex); 1509 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1510 cmp(holder_reg, ip); 1510 cmp(holder_reg, ip);
1511 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext); 1511 Check(eq, "JSGlobalObject::native_context should be a native context.");
1512 pop(holder_reg); // Restore holder. 1512 pop(holder_reg); // Restore holder.
1513 } 1513 }
1514 1514
1515 // Check if both contexts are the same. 1515 // Check if both contexts are the same.
1516 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); 1516 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1517 cmp(scratch, Operand(ip)); 1517 cmp(scratch, Operand(ip));
1518 b(eq, &same_contexts); 1518 b(eq, &same_contexts);
1519 1519
1520 // Check the context is a native context. 1520 // Check the context is a native context.
1521 if (emit_debug_code()) { 1521 if (emit_debug_code()) {
1522 // Cannot use ip as a temporary in this verification code. Due to the fact 1522 // Cannot use ip as a temporary in this verification code. Due to the fact
1523 // that ip is clobbered as part of cmp with an object Operand. 1523 // that ip is clobbered as part of cmp with an object Operand.
1524 push(holder_reg); // Temporarily save holder on the stack. 1524 push(holder_reg); // Temporarily save holder on the stack.
1525 mov(holder_reg, ip); // Move ip to its holding place. 1525 mov(holder_reg, ip); // Move ip to its holding place.
1526 LoadRoot(ip, Heap::kNullValueRootIndex); 1526 LoadRoot(ip, Heap::kNullValueRootIndex);
1527 cmp(holder_reg, ip); 1527 cmp(holder_reg, ip);
1528 Check(ne, kJSGlobalProxyContextShouldNotBeNull); 1528 Check(ne, "JSGlobalProxy::context() should not be null.");
1529 1529
1530 ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset)); 1530 ldr(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1531 LoadRoot(ip, Heap::kNativeContextMapRootIndex); 1531 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1532 cmp(holder_reg, ip); 1532 cmp(holder_reg, ip);
1533 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext); 1533 Check(eq, "JSGlobalObject::native_context should be a native context.");
1534 // Restore ip is not needed. ip is reloaded below. 1534 // Restore ip is not needed. ip is reloaded below.
1535 pop(holder_reg); // Restore holder. 1535 pop(holder_reg); // Restore holder.
1536 // Restore ip to holder's context. 1536 // Restore ip to holder's context.
1537 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); 1537 ldr(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1538 } 1538 }
1539 1539
1540 // Check that the security token in the calling global object is 1540 // Check that the security token in the calling global object is
1541 // compatible with the security token in the receiving global 1541 // compatible with the security token in the receiving global
1542 // object. 1542 // object.
1543 int token_offset = Context::kHeaderSize + 1543 int token_offset = Context::kHeaderSize +
(...skipping 176 matching lines...) Expand 10 before | Expand all | Expand 10 after
1720 if ((flags & RESULT_CONTAINS_TOP) == 0) { 1720 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1721 // Load allocation top into result and allocation limit into ip. 1721 // Load allocation top into result and allocation limit into ip.
1722 ldm(ia, topaddr, result.bit() | ip.bit()); 1722 ldm(ia, topaddr, result.bit() | ip.bit());
1723 } else { 1723 } else {
1724 if (emit_debug_code()) { 1724 if (emit_debug_code()) {
1725 // Assert that result actually contains top on entry. ip is used 1725 // Assert that result actually contains top on entry. ip is used
1726 // immediately below so this use of ip does not cause difference with 1726 // immediately below so this use of ip does not cause difference with
1727 // respect to register content between debug and release mode. 1727 // respect to register content between debug and release mode.
1728 ldr(ip, MemOperand(topaddr)); 1728 ldr(ip, MemOperand(topaddr));
1729 cmp(result, ip); 1729 cmp(result, ip);
1730 Check(eq, kUnexpectedAllocationTop); 1730 Check(eq, "Unexpected allocation top");
1731 } 1731 }
1732 // Load allocation limit into ip. Result already contains allocation top. 1732 // Load allocation limit into ip. Result already contains allocation top.
1733 ldr(ip, MemOperand(topaddr, limit - top)); 1733 ldr(ip, MemOperand(topaddr, limit - top));
1734 } 1734 }
1735 1735
1736 if ((flags & DOUBLE_ALIGNMENT) != 0) { 1736 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1737 // Align the next allocation. Storing the filler map without checking top is 1737 // Align the next allocation. Storing the filler map without checking top is
1738 // always safe because the limit of the heap is always aligned. 1738 // always safe because the limit of the heap is always aligned.
1739 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); 1739 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1740 ASSERT(kPointerAlignment * 2 == kDoubleAlignment); 1740 ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
(...skipping 77 matching lines...) Expand 10 before | Expand all | Expand 10 after
1818 if ((flags & RESULT_CONTAINS_TOP) == 0) { 1818 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1819 // Load allocation top into result and allocation limit into ip. 1819 // Load allocation top into result and allocation limit into ip.
1820 ldm(ia, topaddr, result.bit() | ip.bit()); 1820 ldm(ia, topaddr, result.bit() | ip.bit());
1821 } else { 1821 } else {
1822 if (emit_debug_code()) { 1822 if (emit_debug_code()) {
1823 // Assert that result actually contains top on entry. ip is used 1823 // Assert that result actually contains top on entry. ip is used
1824 // immediately below so this use of ip does not cause difference with 1824 // immediately below so this use of ip does not cause difference with
1825 // respect to register content between debug and release mode. 1825 // respect to register content between debug and release mode.
1826 ldr(ip, MemOperand(topaddr)); 1826 ldr(ip, MemOperand(topaddr));
1827 cmp(result, ip); 1827 cmp(result, ip);
1828 Check(eq, kUnexpectedAllocationTop); 1828 Check(eq, "Unexpected allocation top");
1829 } 1829 }
1830 // Load allocation limit into ip. Result already contains allocation top. 1830 // Load allocation limit into ip. Result already contains allocation top.
1831 ldr(ip, MemOperand(topaddr, limit - top)); 1831 ldr(ip, MemOperand(topaddr, limit - top));
1832 } 1832 }
1833 1833
1834 if ((flags & DOUBLE_ALIGNMENT) != 0) { 1834 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1835 // Align the next allocation. Storing the filler map without checking top is 1835 // Align the next allocation. Storing the filler map without checking top is
1836 // always safe because the limit of the heap is always aligned. 1836 // always safe because the limit of the heap is always aligned.
1837 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); 1837 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1838 ASSERT(kPointerAlignment * 2 == kDoubleAlignment); 1838 ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
(...skipping 13 matching lines...) Expand all
1852 } else { 1852 } else {
1853 add(scratch2, result, Operand(object_size), SetCC); 1853 add(scratch2, result, Operand(object_size), SetCC);
1854 } 1854 }
1855 b(cs, gc_required); 1855 b(cs, gc_required);
1856 cmp(scratch2, Operand(ip)); 1856 cmp(scratch2, Operand(ip));
1857 b(hi, gc_required); 1857 b(hi, gc_required);
1858 1858
1859 // Update allocation top. result temporarily holds the new top. 1859 // Update allocation top. result temporarily holds the new top.
1860 if (emit_debug_code()) { 1860 if (emit_debug_code()) {
1861 tst(scratch2, Operand(kObjectAlignmentMask)); 1861 tst(scratch2, Operand(kObjectAlignmentMask));
1862 Check(eq, kUnalignedAllocationInNewSpace); 1862 Check(eq, "Unaligned allocation in new space");
1863 } 1863 }
1864 str(scratch2, MemOperand(topaddr)); 1864 str(scratch2, MemOperand(topaddr));
1865 1865
1866 // Tag object if requested. 1866 // Tag object if requested.
1867 if ((flags & TAG_OBJECT) != 0) { 1867 if ((flags & TAG_OBJECT) != 0) {
1868 add(result, result, Operand(kHeapObjectTag)); 1868 add(result, result, Operand(kHeapObjectTag));
1869 } 1869 }
1870 } 1870 }
1871 1871
1872 1872
1873 void MacroAssembler::UndoAllocationInNewSpace(Register object, 1873 void MacroAssembler::UndoAllocationInNewSpace(Register object,
1874 Register scratch) { 1874 Register scratch) {
1875 ExternalReference new_space_allocation_top = 1875 ExternalReference new_space_allocation_top =
1876 ExternalReference::new_space_allocation_top_address(isolate()); 1876 ExternalReference::new_space_allocation_top_address(isolate());
1877 1877
1878 // Make sure the object has no tag before resetting top. 1878 // Make sure the object has no tag before resetting top.
1879 and_(object, object, Operand(~kHeapObjectTagMask)); 1879 and_(object, object, Operand(~kHeapObjectTagMask));
1880 #ifdef DEBUG 1880 #ifdef DEBUG
1881 // Check that the object un-allocated is below the current top. 1881 // Check that the object un-allocated is below the current top.
1882 mov(scratch, Operand(new_space_allocation_top)); 1882 mov(scratch, Operand(new_space_allocation_top));
1883 ldr(scratch, MemOperand(scratch)); 1883 ldr(scratch, MemOperand(scratch));
1884 cmp(object, scratch); 1884 cmp(object, scratch);
1885 Check(lt, kUndoAllocationOfNonAllocatedMemory); 1885 Check(lt, "Undo allocation of non allocated memory");
1886 #endif 1886 #endif
1887 // Write the address of the object to un-allocate as the current top. 1887 // Write the address of the object to un-allocate as the current top.
1888 mov(scratch, Operand(new_space_allocation_top)); 1888 mov(scratch, Operand(new_space_allocation_top));
1889 str(object, MemOperand(scratch)); 1889 str(object, MemOperand(scratch));
1890 } 1890 }
1891 1891
1892 1892
1893 void MacroAssembler::AllocateTwoByteString(Register result, 1893 void MacroAssembler::AllocateTwoByteString(Register result,
1894 Register length, 1894 Register length,
1895 Register scratch1, 1895 Register scratch1,
(...skipping 228 matching lines...) Expand 10 before | Expand all | Expand 10 after
2124 scratch1, 2124 scratch1,
2125 isolate()->factory()->heap_number_map(), 2125 isolate()->factory()->heap_number_map(),
2126 fail, 2126 fail,
2127 DONT_DO_SMI_CHECK); 2127 DONT_DO_SMI_CHECK);
2128 2128
2129 vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); 2129 vldr(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
2130 // Force a canonical NaN. 2130 // Force a canonical NaN.
2131 if (emit_debug_code()) { 2131 if (emit_debug_code()) {
2132 vmrs(ip); 2132 vmrs(ip);
2133 tst(ip, Operand(kVFPDefaultNaNModeControlBit)); 2133 tst(ip, Operand(kVFPDefaultNaNModeControlBit));
2134 Assert(ne, kDefaultNaNModeNotSet); 2134 Assert(ne, "Default NaN mode not set");
2135 } 2135 }
2136 VFPCanonicalizeNaN(double_scratch); 2136 VFPCanonicalizeNaN(double_scratch);
2137 b(&store); 2137 b(&store);
2138 2138
2139 bind(&smi_value); 2139 bind(&smi_value);
2140 SmiToDouble(double_scratch, value_reg); 2140 SmiToDouble(double_scratch, value_reg);
2141 2141
2142 bind(&store); 2142 bind(&store);
2143 add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg)); 2143 add(scratch1, elements_reg, Operand::DoubleOffsetFromSmiKey(key_reg));
2144 vstr(double_scratch, 2144 vstr(double_scratch,
(...skipping 229 matching lines...) Expand 10 before | Expand all | Expand 10 after
2374 } 2374 }
2375 // load value from ReturnValue 2375 // load value from ReturnValue
2376 ldr(r0, MemOperand(fp, return_value_offset*kPointerSize)); 2376 ldr(r0, MemOperand(fp, return_value_offset*kPointerSize));
2377 bind(&return_value_loaded); 2377 bind(&return_value_loaded);
2378 // No more valid handles (the result handle was the last one). Restore 2378 // No more valid handles (the result handle was the last one). Restore
2379 // previous handle scope. 2379 // previous handle scope.
2380 str(r4, MemOperand(r7, kNextOffset)); 2380 str(r4, MemOperand(r7, kNextOffset));
2381 if (emit_debug_code()) { 2381 if (emit_debug_code()) {
2382 ldr(r1, MemOperand(r7, kLevelOffset)); 2382 ldr(r1, MemOperand(r7, kLevelOffset));
2383 cmp(r1, r6); 2383 cmp(r1, r6);
2384 Check(eq, kUnexpectedLevelAfterReturnFromApiCall); 2384 Check(eq, "Unexpected level after return from api call");
2385 } 2385 }
2386 sub(r6, r6, Operand(1)); 2386 sub(r6, r6, Operand(1));
2387 str(r6, MemOperand(r7, kLevelOffset)); 2387 str(r6, MemOperand(r7, kLevelOffset));
2388 ldr(ip, MemOperand(r7, kLimitOffset)); 2388 ldr(ip, MemOperand(r7, kLimitOffset));
2389 cmp(r5, ip); 2389 cmp(r5, ip);
2390 b(ne, &delete_allocated_handles); 2390 b(ne, &delete_allocated_handles);
2391 2391
2392 // Check if the function scheduled an exception. 2392 // Check if the function scheduled an exception.
2393 bind(&leave_exit_frame); 2393 bind(&leave_exit_frame);
2394 LoadRoot(r4, Heap::kTheHoleValueRootIndex); 2394 LoadRoot(r4, Heap::kTheHoleValueRootIndex);
(...skipping 380 matching lines...) Expand 10 before | Expand all | Expand 10 after
2775 ASSERT(value > 0); 2775 ASSERT(value > 0);
2776 if (FLAG_native_code_counters && counter->Enabled()) { 2776 if (FLAG_native_code_counters && counter->Enabled()) {
2777 mov(scratch2, Operand(ExternalReference(counter))); 2777 mov(scratch2, Operand(ExternalReference(counter)));
2778 ldr(scratch1, MemOperand(scratch2)); 2778 ldr(scratch1, MemOperand(scratch2));
2779 sub(scratch1, scratch1, Operand(value)); 2779 sub(scratch1, scratch1, Operand(value));
2780 str(scratch1, MemOperand(scratch2)); 2780 str(scratch1, MemOperand(scratch2));
2781 } 2781 }
2782 } 2782 }
2783 2783
2784 2784
2785 void MacroAssembler::Assert(Condition cond, BailoutReason reason) { 2785 void MacroAssembler::Assert(Condition cond, const char* msg) {
2786 if (emit_debug_code()) 2786 if (emit_debug_code())
2787 Check(cond, reason); 2787 Check(cond, msg);
2788 } 2788 }
2789 2789
2790 2790
2791 void MacroAssembler::AssertFastElements(Register elements) { 2791 void MacroAssembler::AssertFastElements(Register elements) {
2792 if (emit_debug_code()) { 2792 if (emit_debug_code()) {
2793 ASSERT(!elements.is(ip)); 2793 ASSERT(!elements.is(ip));
2794 Label ok; 2794 Label ok;
2795 push(elements); 2795 push(elements);
2796 ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); 2796 ldr(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2797 LoadRoot(ip, Heap::kFixedArrayMapRootIndex); 2797 LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2798 cmp(elements, ip); 2798 cmp(elements, ip);
2799 b(eq, &ok); 2799 b(eq, &ok);
2800 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex); 2800 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
2801 cmp(elements, ip); 2801 cmp(elements, ip);
2802 b(eq, &ok); 2802 b(eq, &ok);
2803 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex); 2803 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2804 cmp(elements, ip); 2804 cmp(elements, ip);
2805 b(eq, &ok); 2805 b(eq, &ok);
2806 Abort(kJSObjectWithFastElementsMapHasSlowElements); 2806 Abort("JSObject with fast elements map has slow elements");
2807 bind(&ok); 2807 bind(&ok);
2808 pop(elements); 2808 pop(elements);
2809 } 2809 }
2810 } 2810 }
2811 2811
2812 2812
2813 void MacroAssembler::Check(Condition cond, BailoutReason reason) { 2813 void MacroAssembler::Check(Condition cond, const char* msg) {
2814 Label L; 2814 Label L;
2815 b(cond, &L); 2815 b(cond, &L);
2816 Abort(reason); 2816 Abort(msg);
2817 // will not return here 2817 // will not return here
2818 bind(&L); 2818 bind(&L);
2819 } 2819 }
2820 2820
2821 2821
2822 void MacroAssembler::Abort(BailoutReason reason) { 2822 void MacroAssembler::Abort(const char* msg) {
2823 Label abort_start; 2823 Label abort_start;
2824 bind(&abort_start); 2824 bind(&abort_start);
2825 // We want to pass the msg string like a smi to avoid GC 2825 // We want to pass the msg string like a smi to avoid GC
2826 // problems, however msg is not guaranteed to be aligned 2826 // problems, however msg is not guaranteed to be aligned
2827 // properly. Instead, we pass an aligned pointer that is 2827 // properly. Instead, we pass an aligned pointer that is
2828 // a proper v8 smi, but also pass the alignment difference 2828 // a proper v8 smi, but also pass the alignment difference
2829 // from the real pointer as a smi. 2829 // from the real pointer as a smi.
2830 const char* msg = GetBailoutReason(reason);
2831 intptr_t p1 = reinterpret_cast<intptr_t>(msg); 2830 intptr_t p1 = reinterpret_cast<intptr_t>(msg);
2832 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag; 2831 intptr_t p0 = (p1 & ~kSmiTagMask) + kSmiTag;
2833 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi()); 2832 ASSERT(reinterpret_cast<Object*>(p0)->IsSmi());
2834 #ifdef DEBUG 2833 #ifdef DEBUG
2835 if (msg != NULL) { 2834 if (msg != NULL) {
2836 RecordComment("Abort message: "); 2835 RecordComment("Abort message: ");
2837 RecordComment(msg); 2836 RecordComment(msg);
2838 } 2837 }
2839 #endif 2838 #endif
2840 2839
(...skipping 122 matching lines...) Expand 10 before | Expand all | Expand 10 after
2963 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, 2962 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2964 Register map, 2963 Register map,
2965 Register scratch) { 2964 Register scratch) {
2966 // Load the initial map. The global functions all have initial maps. 2965 // Load the initial map. The global functions all have initial maps.
2967 ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 2966 ldr(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2968 if (emit_debug_code()) { 2967 if (emit_debug_code()) {
2969 Label ok, fail; 2968 Label ok, fail;
2970 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); 2969 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2971 b(&ok); 2970 b(&ok);
2972 bind(&fail); 2971 bind(&fail);
2973 Abort(kGlobalFunctionsMustHaveInitialMap); 2972 Abort("Global functions must have initial map");
2974 bind(&ok); 2973 bind(&ok);
2975 } 2974 }
2976 } 2975 }
2977 2976
2978 2977
2979 void MacroAssembler::JumpIfNotPowerOfTwoOrZero( 2978 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2980 Register reg, 2979 Register reg,
2981 Register scratch, 2980 Register scratch,
2982 Label* not_power_of_two_or_zero) { 2981 Label* not_power_of_two_or_zero) {
2983 sub(scratch, reg, Operand(1), SetCC); 2982 sub(scratch, reg, Operand(1), SetCC);
(...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after
3032 tst(reg1, Operand(kSmiTagMask)); 3031 tst(reg1, Operand(kSmiTagMask));
3033 tst(reg2, Operand(kSmiTagMask), ne); 3032 tst(reg2, Operand(kSmiTagMask), ne);
3034 b(eq, on_either_smi); 3033 b(eq, on_either_smi);
3035 } 3034 }
3036 3035
3037 3036
3038 void MacroAssembler::AssertNotSmi(Register object) { 3037 void MacroAssembler::AssertNotSmi(Register object) {
3039 if (emit_debug_code()) { 3038 if (emit_debug_code()) {
3040 STATIC_ASSERT(kSmiTag == 0); 3039 STATIC_ASSERT(kSmiTag == 0);
3041 tst(object, Operand(kSmiTagMask)); 3040 tst(object, Operand(kSmiTagMask));
3042 Check(ne, kOperandIsASmi); 3041 Check(ne, "Operand is a smi");
3043 } 3042 }
3044 } 3043 }
3045 3044
3046 3045
3047 void MacroAssembler::AssertSmi(Register object) { 3046 void MacroAssembler::AssertSmi(Register object) {
3048 if (emit_debug_code()) { 3047 if (emit_debug_code()) {
3049 STATIC_ASSERT(kSmiTag == 0); 3048 STATIC_ASSERT(kSmiTag == 0);
3050 tst(object, Operand(kSmiTagMask)); 3049 tst(object, Operand(kSmiTagMask));
3051 Check(eq, kOperandIsNotSmi); 3050 Check(eq, "Operand is not smi");
3052 } 3051 }
3053 } 3052 }
3054 3053
3055 3054
3056 void MacroAssembler::AssertString(Register object) { 3055 void MacroAssembler::AssertString(Register object) {
3057 if (emit_debug_code()) { 3056 if (emit_debug_code()) {
3058 STATIC_ASSERT(kSmiTag == 0); 3057 STATIC_ASSERT(kSmiTag == 0);
3059 tst(object, Operand(kSmiTagMask)); 3058 tst(object, Operand(kSmiTagMask));
3060 Check(ne, kOperandIsASmiAndNotAString); 3059 Check(ne, "Operand is a smi and not a string");
3061 push(object); 3060 push(object);
3062 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); 3061 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3063 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE); 3062 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
3064 pop(object); 3063 pop(object);
3065 Check(lo, kOperandIsNotAString); 3064 Check(lo, "Operand is not a string");
3066 } 3065 }
3067 } 3066 }
3068 3067
3069 3068
3070 void MacroAssembler::AssertName(Register object) { 3069 void MacroAssembler::AssertName(Register object) {
3071 if (emit_debug_code()) { 3070 if (emit_debug_code()) {
3072 STATIC_ASSERT(kSmiTag == 0); 3071 STATIC_ASSERT(kSmiTag == 0);
3073 tst(object, Operand(kSmiTagMask)); 3072 tst(object, Operand(kSmiTagMask));
3074 Check(ne, kOperandIsASmiAndNotAName); 3073 Check(ne, "Operand is a smi and not a name");
3075 push(object); 3074 push(object);
3076 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset)); 3075 ldr(object, FieldMemOperand(object, HeapObject::kMapOffset));
3077 CompareInstanceType(object, object, LAST_NAME_TYPE); 3076 CompareInstanceType(object, object, LAST_NAME_TYPE);
3078 pop(object); 3077 pop(object);
3079 Check(le, kOperandIsNotAName); 3078 Check(le, "Operand is not a name");
3080 } 3079 }
3081 } 3080 }
3082 3081
3083 3082
3084 3083
3085 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) { 3084 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
3086 if (emit_debug_code()) { 3085 if (emit_debug_code()) {
3087 CompareRoot(reg, index); 3086 CompareRoot(reg, index);
3088 Check(eq, kHeapNumberMapRegisterClobbered); 3087 Check(eq, "HeapNumberMap register clobbered.");
3089 } 3088 }
3090 } 3089 }
3091 3090
3092 3091
3093 void MacroAssembler::JumpIfNotHeapNumber(Register object, 3092 void MacroAssembler::JumpIfNotHeapNumber(Register object,
3094 Register heap_number_map, 3093 Register heap_number_map,
3095 Register scratch, 3094 Register scratch,
3096 Label* on_not_heap_number) { 3095 Label* on_not_heap_number) {
3097 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); 3096 ldr(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3098 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 3097 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
(...skipping 125 matching lines...) Expand 10 before | Expand all | Expand 10 after
3224 b(eq, &word_loop); 3223 b(eq, &word_loop);
3225 ldrb(scratch, MemOperand(src, 1, PostIndex)); 3224 ldrb(scratch, MemOperand(src, 1, PostIndex));
3226 strb(scratch, MemOperand(dst, 1, PostIndex)); 3225 strb(scratch, MemOperand(dst, 1, PostIndex));
3227 sub(length, length, Operand(1), SetCC); 3226 sub(length, length, Operand(1), SetCC);
3228 b(ne, &byte_loop_1); 3227 b(ne, &byte_loop_1);
3229 3228
3230 // Copy bytes in word size chunks. 3229 // Copy bytes in word size chunks.
3231 bind(&word_loop); 3230 bind(&word_loop);
3232 if (emit_debug_code()) { 3231 if (emit_debug_code()) {
3233 tst(src, Operand(kPointerSize - 1)); 3232 tst(src, Operand(kPointerSize - 1));
3234 Assert(eq, kExpectingAlignmentForCopyBytes); 3233 Assert(eq, "Expecting alignment for CopyBytes");
3235 } 3234 }
3236 cmp(length, Operand(kPointerSize)); 3235 cmp(length, Operand(kPointerSize));
3237 b(lt, &byte_loop); 3236 b(lt, &byte_loop);
3238 ldr(scratch, MemOperand(src, kPointerSize, PostIndex)); 3237 ldr(scratch, MemOperand(src, kPointerSize, PostIndex));
3239 if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) { 3238 if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
3240 str(scratch, MemOperand(dst, kPointerSize, PostIndex)); 3239 str(scratch, MemOperand(dst, kPointerSize, PostIndex));
3241 } else { 3240 } else {
3242 strb(scratch, MemOperand(dst, 1, PostIndex)); 3241 strb(scratch, MemOperand(dst, 1, PostIndex));
3243 mov(scratch, Operand(scratch, LSR, 8)); 3242 mov(scratch, Operand(scratch, LSR, 8));
3244 strb(scratch, MemOperand(dst, 1, PostIndex)); 3243 strb(scratch, MemOperand(dst, 1, PostIndex));
(...skipping 243 matching lines...) Expand 10 before | Expand all | Expand 10 after
3488 3487
3489 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location, 3488 void MacroAssembler::GetRelocatedValueLocation(Register ldr_location,
3490 Register result) { 3489 Register result) {
3491 const uint32_t kLdrOffsetMask = (1 << 12) - 1; 3490 const uint32_t kLdrOffsetMask = (1 << 12) - 1;
3492 const int32_t kPCRegOffset = 2 * kPointerSize; 3491 const int32_t kPCRegOffset = 2 * kPointerSize;
3493 ldr(result, MemOperand(ldr_location)); 3492 ldr(result, MemOperand(ldr_location));
3494 if (emit_debug_code()) { 3493 if (emit_debug_code()) {
3495 // Check that the instruction is a ldr reg, [pc + offset] . 3494 // Check that the instruction is a ldr reg, [pc + offset] .
3496 and_(result, result, Operand(kLdrPCPattern)); 3495 and_(result, result, Operand(kLdrPCPattern));
3497 cmp(result, Operand(kLdrPCPattern)); 3496 cmp(result, Operand(kLdrPCPattern));
3498 Check(eq, kTheInstructionToPatchShouldBeALoadFromPc); 3497 Check(eq, "The instruction to patch should be a load from pc.");
3499 // Result was clobbered. Restore it. 3498 // Result was clobbered. Restore it.
3500 ldr(result, MemOperand(ldr_location)); 3499 ldr(result, MemOperand(ldr_location));
3501 } 3500 }
3502 // Get the address of the constant. 3501 // Get the address of the constant.
3503 and_(result, result, Operand(kLdrOffsetMask)); 3502 and_(result, result, Operand(kLdrOffsetMask));
3504 add(result, ldr_location, Operand(result)); 3503 add(result, ldr_location, Operand(result));
3505 add(result, result, Operand(kPCRegOffset)); 3504 add(result, result, Operand(kPCRegOffset));
3506 } 3505 }
3507 3506
3508 3507
(...skipping 368 matching lines...) Expand 10 before | Expand all | Expand 10 after
3877 void CodePatcher::EmitCondition(Condition cond) { 3876 void CodePatcher::EmitCondition(Condition cond) {
3878 Instr instr = Assembler::instr_at(masm_.pc_); 3877 Instr instr = Assembler::instr_at(masm_.pc_);
3879 instr = (instr & ~kCondMask) | cond; 3878 instr = (instr & ~kCondMask) | cond;
3880 masm_.emit(instr); 3879 masm_.emit(instr);
3881 } 3880 }
3882 3881
3883 3882
3884 } } // namespace v8::internal 3883 } } // namespace v8::internal
3885 3884
3886 #endif // V8_TARGET_ARCH_ARM 3885 #endif // V8_TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « src/arm/macro-assembler-arm.h ('k') | src/array-iterator.js » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698