| OLD | NEW |
| 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 | 4 |
| 5 #include "vm/globals.h" | 5 #include "vm/globals.h" |
| 6 #if defined(TARGET_ARCH_ARM) | 6 #if defined(TARGET_ARCH_ARM) |
| 7 | 7 |
| 8 #include "vm/assembler.h" | 8 #include "vm/assembler.h" |
| 9 #include "vm/compiler.h" | 9 #include "vm/compiler.h" |
| 10 #include "vm/cpu.h" | 10 #include "vm/cpu.h" |
| (...skipping 685 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 696 | 696 |
| 697 const intptr_t fixed_size_plus_alignment_padding = | 697 const intptr_t fixed_size_plus_alignment_padding = |
| 698 sizeof(RawArray) + kObjectAlignment - 1; | 698 sizeof(RawArray) + kObjectAlignment - 1; |
| 699 __ LoadImmediate(R9, fixed_size_plus_alignment_padding); | 699 __ LoadImmediate(R9, fixed_size_plus_alignment_padding); |
| 700 __ add(R9, R9, Operand(R3, LSL, 1)); // R3 is a Smi. | 700 __ add(R9, R9, Operand(R3, LSL, 1)); // R3 is a Smi. |
| 701 ASSERT(kSmiTagShift == 1); | 701 ASSERT(kSmiTagShift == 1); |
| 702 __ bic(R9, R9, Operand(kObjectAlignment - 1)); | 702 __ bic(R9, R9, Operand(kObjectAlignment - 1)); |
| 703 | 703 |
| 704 // R9: Allocation size. | 704 // R9: Allocation size. |
| 705 Heap::Space space = Heap::kNew; | 705 Heap::Space space = Heap::kNew; |
| 706 __ ldr(R8, Address(THR, Thread::heap_offset())); | |
| 707 // Potential new object start. | 706 // Potential new object start. |
| 708 __ ldr(R0, Address(R8, Heap::TopOffset(space))); | 707 __ ldr(R0, Address(THR, Thread::top_offset())); |
| 709 __ adds(NOTFP, R0, Operand(R9)); // Potential next object start. | 708 __ adds(NOTFP, R0, Operand(R9)); // Potential next object start. |
| 710 __ b(&slow_case, CS); // Branch if unsigned overflow. | 709 __ b(&slow_case, CS); // Branch if unsigned overflow. |
| 711 | 710 |
| 712 // Check if the allocation fits into the remaining space. | 711 // Check if the allocation fits into the remaining space. |
| 713 // R0: potential new object start. | 712 // R0: potential new object start. |
| 714 // NOTFP: potential next object start. | 713 // NOTFP: potential next object start. |
| 715 // R9: allocation size. | 714 // R9: allocation size. |
| 716 __ ldr(R3, Address(R8, Heap::EndOffset(space))); | 715 __ ldr(R3, Address(THR, Thread::end_offset())); |
| 717 __ cmp(NOTFP, Operand(R3)); | 716 __ cmp(NOTFP, Operand(R3)); |
| 718 __ b(&slow_case, CS); | 717 __ b(&slow_case, CS); |
| 719 | 718 |
| 720 // Successfully allocated the object(s), now update top to point to | 719 // Successfully allocated the object(s), now update top to point to |
| 721 // next object start and initialize the object. | 720 // next object start and initialize the object. |
| 722 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R3, cid)); | 721 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R3, cid)); |
| 723 __ str(NOTFP, Address(R8, Heap::TopOffset(space))); | 722 __ str(R7, Address(THR, Thread::top_offset())); |
| 724 __ add(R0, R0, Operand(kHeapObjectTag)); | 723 __ add(R0, R0, Operand(kHeapObjectTag)); |
| 725 | 724 |
| 726 // Initialize the tags. | 725 // Initialize the tags. |
| 727 // R0: new object start as a tagged pointer. | 726 // R0: new object start as a tagged pointer. |
| 728 // R3: allocation stats address. | 727 // R3: allocation stats address. |
| 729 // NOTFP: new object end address. | 728 // NOTFP: new object end address. |
| 730 // R9: allocation size. | 729 // R9: allocation size. |
| 731 { | 730 { |
| 732 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; | 731 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; |
| 733 | 732 |
| (...skipping 189 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 923 __ add(R2, R2, Operand(R1, LSL, 2)); | 922 __ add(R2, R2, Operand(R1, LSL, 2)); |
| 924 ASSERT(kSmiTagShift == 1); | 923 ASSERT(kSmiTagShift == 1); |
| 925 __ bic(R2, R2, Operand(kObjectAlignment - 1)); | 924 __ bic(R2, R2, Operand(kObjectAlignment - 1)); |
| 926 | 925 |
| 927 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, R8, &slow_case)); | 926 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, R8, &slow_case)); |
| 928 // Now allocate the object. | 927 // Now allocate the object. |
| 929 // R1: number of context variables. | 928 // R1: number of context variables. |
| 930 // R2: object size. | 929 // R2: object size. |
| 931 const intptr_t cid = kContextCid; | 930 const intptr_t cid = kContextCid; |
| 932 Heap::Space space = Heap::kNew; | 931 Heap::Space space = Heap::kNew; |
| 933 __ ldr(R9, Address(THR, Thread::heap_offset())); | 932 __ ldr(R0, Address(THR, Thread::top_offset())); |
| 934 __ ldr(R0, Address(R9, Heap::TopOffset(space))); | |
| 935 __ add(R3, R2, Operand(R0)); | 933 __ add(R3, R2, Operand(R0)); |
| 936 // Check if the allocation fits into the remaining space. | 934 // Check if the allocation fits into the remaining space. |
| 937 // R0: potential new object. | 935 // R0: potential new object. |
| 938 // R1: number of context variables. | 936 // R1: number of context variables. |
| 939 // R2: object size. | 937 // R2: object size. |
| 940 // R3: potential next object start. | 938 // R3: potential next object start. |
| 941 // R9: heap. | 939 // R9: heap. |
| 942 __ ldr(IP, Address(R9, Heap::EndOffset(space))); | 940 __ ldr(IP, Address(THR, Thread::end_offset())); |
| 943 __ cmp(R3, Operand(IP)); | 941 __ cmp(R3, Operand(IP)); |
| 944 if (FLAG_use_slow_path) { | 942 if (FLAG_use_slow_path) { |
| 945 __ b(&slow_case); | 943 __ b(&slow_case); |
| 946 } else { | 944 } else { |
| 947 __ b(&slow_case, CS); // Branch if unsigned higher or equal. | 945 __ b(&slow_case, CS); // Branch if unsigned higher or equal. |
| 948 } | 946 } |
| 949 | 947 |
| 950 // Successfully allocated the object, now update top to point to | 948 // Successfully allocated the object, now update top to point to |
| 951 // next object start and initialize the object. | 949 // next object start and initialize the object. |
| 952 // R0: new object start (untagged). | 950 // R0: new object start (untagged). |
| 953 // R1: number of context variables. | 951 // R1: number of context variables. |
| 954 // R2: object size. | 952 // R2: object size. |
| 955 // R3: next object start. | 953 // R3: next object start. |
| 956 // R9: heap. | 954 // R9: heap. |
| 957 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R4, cid)); | 955 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R4, cid)); |
| 958 __ str(R3, Address(R9, Heap::TopOffset(space))); | 956 __ str(R3, Address(THR, Thread::top_offset())); |
| 959 __ add(R0, R0, Operand(kHeapObjectTag)); | 957 __ add(R0, R0, Operand(kHeapObjectTag)); |
| 960 | 958 |
| 961 // Calculate the size tag. | 959 // Calculate the size tag. |
| 962 // R0: new object (tagged). | 960 // R0: new object (tagged). |
| 963 // R1: number of context variables. | 961 // R1: number of context variables. |
| 964 // R2: object size. | 962 // R2: object size. |
| 965 // R3: next object start. | 963 // R3: next object start. |
| 966 // R4: allocation stats address. | 964 // R4: allocation stats address. |
| 967 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; | 965 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; |
| 968 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag); | 966 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag); |
| (...skipping 152 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1121 const int kInlineInstanceSize = 12; | 1119 const int kInlineInstanceSize = 12; |
| 1122 const intptr_t instance_size = cls.instance_size(); | 1120 const intptr_t instance_size = cls.instance_size(); |
| 1123 ASSERT(instance_size > 0); | 1121 ASSERT(instance_size > 0); |
| 1124 Isolate* isolate = Isolate::Current(); | 1122 Isolate* isolate = Isolate::Current(); |
| 1125 if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size) && | 1123 if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size) && |
| 1126 !cls.TraceAllocation(isolate)) { | 1124 !cls.TraceAllocation(isolate)) { |
| 1127 Label slow_case; | 1125 Label slow_case; |
| 1128 // Allocate the object and update top to point to | 1126 // Allocate the object and update top to point to |
| 1129 // next object start and initialize the allocated object. | 1127 // next object start and initialize the allocated object. |
| 1130 Heap::Space space = Heap::kNew; | 1128 Heap::Space space = Heap::kNew; |
| 1131 __ ldr(R9, Address(THR, Thread::heap_offset())); | 1129 __ ldr(R0, Address(THR, Thread::top_offset())); |
| 1132 __ ldr(R0, Address(R9, Heap::TopOffset(space))); | |
| 1133 __ AddImmediate(R1, R0, instance_size); | 1130 __ AddImmediate(R1, R0, instance_size); |
| 1134 // Check if the allocation fits into the remaining space. | 1131 // Check if the allocation fits into the remaining space. |
| 1135 // R0: potential new object start. | 1132 // R0: potential new object start. |
| 1136 // R1: potential next object start. | 1133 // R1: potential next object start. |
| 1137 // R9: heap. | 1134 // R9: heap. |
| 1138 __ ldr(IP, Address(R9, Heap::EndOffset(space))); | 1135 __ ldr(IP, Address(THR, Thread::end_offset())); |
| 1139 __ cmp(R1, Operand(IP)); | 1136 __ cmp(R1, Operand(IP)); |
| 1140 if (FLAG_use_slow_path) { | 1137 if (FLAG_use_slow_path) { |
| 1141 __ b(&slow_case); | 1138 __ b(&slow_case); |
| 1142 } else { | 1139 } else { |
| 1143 __ b(&slow_case, CS); // Unsigned higher or equal. | 1140 __ b(&slow_case, CS); // Unsigned higher or equal. |
| 1144 } | 1141 } |
| 1145 __ str(R1, Address(R9, Heap::TopOffset(space))); | 1142 __ str(R1, Address(THR, Thread::top_offset())); |
| 1146 | 1143 |
| 1147 // Load the address of the allocation stats table. We split up the load | 1144 // Load the address of the allocation stats table. We split up the load |
| 1148 // and the increment so that the dependent load is not too nearby. | 1145 // and the increment so that the dependent load is not too nearby. |
| 1149 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R9, cls.id())); | 1146 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R9, cls.id())); |
| 1150 | 1147 |
| 1151 // R0: new object start. | 1148 // R0: new object start. |
| 1152 // R1: next object start. | 1149 // R1: next object start. |
| 1153 // R9: allocation stats table. | 1150 // R9: allocation stats table. |
| 1154 // Set the tags. | 1151 // Set the tags. |
| 1155 uword tags = 0; | 1152 uword tags = 0; |
| (...skipping 1171 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2327 } | 2324 } |
| 2328 | 2325 |
| 2329 | 2326 |
| 2330 void StubCode::GenerateAsynchronousGapMarkerStub(Assembler* assembler) { | 2327 void StubCode::GenerateAsynchronousGapMarkerStub(Assembler* assembler) { |
| 2331 __ bkpt(0); | 2328 __ bkpt(0); |
| 2332 } | 2329 } |
| 2333 | 2330 |
| 2334 } // namespace dart | 2331 } // namespace dart |
| 2335 | 2332 |
| 2336 #endif // defined TARGET_ARCH_ARM | 2333 #endif // defined TARGET_ARCH_ARM |
| OLD | NEW |