Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(138)

Side by Side Diff: runtime/vm/stub_code_arm.cc

Issue 2951333002: Moves the top_ and end_ words of the Scavenger into mutator thread. (Closed)
Patch Set: Removes the ZeroSizeScavenger test. Proper testing requires a second vm isolate. Created 3 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « runtime/vm/scavenger_test.cc ('k') | runtime/vm/stub_code_arm64.cc » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" 5 #include "vm/globals.h"
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/assembler.h" 8 #include "vm/assembler.h"
9 #include "vm/compiler.h" 9 #include "vm/compiler.h"
10 #include "vm/cpu.h" 10 #include "vm/cpu.h"
(...skipping 684 matching lines...) Expand 10 before | Expand all | Expand 10 after
695 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, R4, &slow_case)); 695 NOT_IN_PRODUCT(__ MaybeTraceAllocation(cid, R4, &slow_case));
696 696
697 const intptr_t fixed_size_plus_alignment_padding = 697 const intptr_t fixed_size_plus_alignment_padding =
698 sizeof(RawArray) + kObjectAlignment - 1; 698 sizeof(RawArray) + kObjectAlignment - 1;
699 __ LoadImmediate(R9, fixed_size_plus_alignment_padding); 699 __ LoadImmediate(R9, fixed_size_plus_alignment_padding);
700 __ add(R9, R9, Operand(R3, LSL, 1)); // R3 is a Smi. 700 __ add(R9, R9, Operand(R3, LSL, 1)); // R3 is a Smi.
701 ASSERT(kSmiTagShift == 1); 701 ASSERT(kSmiTagShift == 1);
702 __ bic(R9, R9, Operand(kObjectAlignment - 1)); 702 __ bic(R9, R9, Operand(kObjectAlignment - 1));
703 703
704 // R9: Allocation size. 704 // R9: Allocation size.
705 Heap::Space space = Heap::kNew; 705 NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
706 __ ldr(R8, Address(THR, Thread::heap_offset()));
707 // Potential new object start. 706 // Potential new object start.
708 __ ldr(R0, Address(R8, Heap::TopOffset(space))); 707 __ ldr(R0, Address(THR, Thread::top_offset()));
709 __ adds(NOTFP, R0, Operand(R9)); // Potential next object start. 708 __ adds(NOTFP, R0, Operand(R9)); // Potential next object start.
710 __ b(&slow_case, CS); // Branch if unsigned overflow. 709 __ b(&slow_case, CS); // Branch if unsigned overflow.
711 710
712 // Check if the allocation fits into the remaining space. 711 // Check if the allocation fits into the remaining space.
713 // R0: potential new object start. 712 // R0: potential new object start.
714 // NOTFP: potential next object start. 713 // NOTFP: potential next object start.
715 // R9: allocation size. 714 // R9: allocation size.
716 __ ldr(R3, Address(R8, Heap::EndOffset(space))); 715 __ ldr(R3, Address(THR, Thread::end_offset()));
717 __ cmp(NOTFP, Operand(R3)); 716 __ cmp(NOTFP, Operand(R3));
718 __ b(&slow_case, CS); 717 __ b(&slow_case, CS);
719 718
720 // Successfully allocated the object(s), now update top to point to 719 // Successfully allocated the object(s), now update top to point to
721 // next object start and initialize the object. 720 // next object start and initialize the object.
722 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R3, cid)); 721 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R3, cid));
723 __ str(NOTFP, Address(R8, Heap::TopOffset(space))); 722 __ str(R7, Address(THR, Thread::top_offset()));
724 __ add(R0, R0, Operand(kHeapObjectTag)); 723 __ add(R0, R0, Operand(kHeapObjectTag));
725 724
726 // Initialize the tags. 725 // Initialize the tags.
727 // R0: new object start as a tagged pointer. 726 // R0: new object start as a tagged pointer.
728 // R3: allocation stats address. 727 // R3: allocation stats address.
729 // NOTFP: new object end address. 728 // NOTFP: new object end address.
730 // R9: allocation size. 729 // R9: allocation size.
731 { 730 {
732 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; 731 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2;
733 732
(...skipping 188 matching lines...) Expand 10 before | Expand all | Expand 10 after
922 __ LoadImmediate(R2, fixed_size_plus_alignment_padding); 921 __ LoadImmediate(R2, fixed_size_plus_alignment_padding);
923 __ add(R2, R2, Operand(R1, LSL, 2)); 922 __ add(R2, R2, Operand(R1, LSL, 2));
924 ASSERT(kSmiTagShift == 1); 923 ASSERT(kSmiTagShift == 1);
925 __ bic(R2, R2, Operand(kObjectAlignment - 1)); 924 __ bic(R2, R2, Operand(kObjectAlignment - 1));
926 925
927 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, R8, &slow_case)); 926 NOT_IN_PRODUCT(__ MaybeTraceAllocation(kContextCid, R8, &slow_case));
928 // Now allocate the object. 927 // Now allocate the object.
929 // R1: number of context variables. 928 // R1: number of context variables.
930 // R2: object size. 929 // R2: object size.
931 const intptr_t cid = kContextCid; 930 const intptr_t cid = kContextCid;
932 Heap::Space space = Heap::kNew; 931 NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
933 __ ldr(R9, Address(THR, Thread::heap_offset())); 932 __ ldr(R0, Address(THR, Thread::top_offset()));
934 __ ldr(R0, Address(R9, Heap::TopOffset(space)));
935 __ add(R3, R2, Operand(R0)); 933 __ add(R3, R2, Operand(R0));
936 // Check if the allocation fits into the remaining space. 934 // Check if the allocation fits into the remaining space.
937 // R0: potential new object. 935 // R0: potential new object.
938 // R1: number of context variables. 936 // R1: number of context variables.
939 // R2: object size. 937 // R2: object size.
940 // R3: potential next object start. 938 // R3: potential next object start.
941 // R9: heap. 939 __ ldr(IP, Address(THR, Thread::end_offset()));
942 __ ldr(IP, Address(R9, Heap::EndOffset(space)));
943 __ cmp(R3, Operand(IP)); 940 __ cmp(R3, Operand(IP));
944 if (FLAG_use_slow_path) { 941 if (FLAG_use_slow_path) {
945 __ b(&slow_case); 942 __ b(&slow_case);
946 } else { 943 } else {
947 __ b(&slow_case, CS); // Branch if unsigned higher or equal. 944 __ b(&slow_case, CS); // Branch if unsigned higher or equal.
948 } 945 }
949 946
950 // Successfully allocated the object, now update top to point to 947 // Successfully allocated the object, now update top to point to
951 // next object start and initialize the object. 948 // next object start and initialize the object.
952 // R0: new object start (untagged). 949 // R0: new object start (untagged).
953 // R1: number of context variables. 950 // R1: number of context variables.
954 // R2: object size. 951 // R2: object size.
955 // R3: next object start. 952 // R3: next object start.
956 // R9: heap.
957 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R4, cid)); 953 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R4, cid));
958 __ str(R3, Address(R9, Heap::TopOffset(space))); 954 __ str(R3, Address(THR, Thread::top_offset()));
959 __ add(R0, R0, Operand(kHeapObjectTag)); 955 __ add(R0, R0, Operand(kHeapObjectTag));
960 956
961 // Calculate the size tag. 957 // Calculate the size tag.
962 // R0: new object (tagged). 958 // R0: new object (tagged).
963 // R1: number of context variables. 959 // R1: number of context variables.
964 // R2: object size. 960 // R2: object size.
965 // R3: next object start. 961 // R3: next object start.
966 // R4: allocation stats address. 962 // R4: allocation stats address.
967 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; 963 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2;
968 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag); 964 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag);
(...skipping 151 matching lines...) Expand 10 before | Expand all | Expand 10 after
1120 // straight line code. 1116 // straight line code.
1121 const int kInlineInstanceSize = 12; 1117 const int kInlineInstanceSize = 12;
1122 const intptr_t instance_size = cls.instance_size(); 1118 const intptr_t instance_size = cls.instance_size();
1123 ASSERT(instance_size > 0); 1119 ASSERT(instance_size > 0);
1124 Isolate* isolate = Isolate::Current(); 1120 Isolate* isolate = Isolate::Current();
1125 if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size) && 1121 if (FLAG_inline_alloc && Heap::IsAllocatableInNewSpace(instance_size) &&
1126 !cls.TraceAllocation(isolate)) { 1122 !cls.TraceAllocation(isolate)) {
1127 Label slow_case; 1123 Label slow_case;
1128 // Allocate the object and update top to point to 1124 // Allocate the object and update top to point to
1129 // next object start and initialize the allocated object. 1125 // next object start and initialize the allocated object.
1130 Heap::Space space = Heap::kNew; 1126 NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
1131 __ ldr(R9, Address(THR, Thread::heap_offset())); 1127 __ ldr(R0, Address(THR, Thread::top_offset()));
1132 __ ldr(R0, Address(R9, Heap::TopOffset(space)));
1133 __ AddImmediate(R1, R0, instance_size); 1128 __ AddImmediate(R1, R0, instance_size);
1134 // Check if the allocation fits into the remaining space. 1129 // Check if the allocation fits into the remaining space.
1135 // R0: potential new object start. 1130 // R0: potential new object start.
1136 // R1: potential next object start. 1131 // R1: potential next object start.
1137 // R9: heap. 1132 __ ldr(IP, Address(THR, Thread::end_offset()));
1138 __ ldr(IP, Address(R9, Heap::EndOffset(space)));
1139 __ cmp(R1, Operand(IP)); 1133 __ cmp(R1, Operand(IP));
1140 if (FLAG_use_slow_path) { 1134 if (FLAG_use_slow_path) {
1141 __ b(&slow_case); 1135 __ b(&slow_case);
1142 } else { 1136 } else {
1143 __ b(&slow_case, CS); // Unsigned higher or equal. 1137 __ b(&slow_case, CS); // Unsigned higher or equal.
1144 } 1138 }
1145 __ str(R1, Address(R9, Heap::TopOffset(space))); 1139 __ str(R1, Address(THR, Thread::top_offset()));
1146 1140
1147 // Load the address of the allocation stats table. We split up the load 1141 // Load the address of the allocation stats table. We split up the load
1148 // and the increment so that the dependent load is not too nearby. 1142 // and the increment so that the dependent load is not too nearby.
1149 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R9, cls.id())); 1143 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R9, cls.id()));
1150 1144
1151 // R0: new object start. 1145 // R0: new object start.
1152 // R1: next object start. 1146 // R1: next object start.
1153 // R9: allocation stats table. 1147 // R9: allocation stats table.
1154 // Set the tags. 1148 // Set the tags.
1155 uint32_t tags = 0; 1149 uint32_t tags = 0;
(...skipping 1171 matching lines...) Expand 10 before | Expand all | Expand 10 after
2327 } 2321 }
2328 2322
2329 2323
2330 void StubCode::GenerateAsynchronousGapMarkerStub(Assembler* assembler) { 2324 void StubCode::GenerateAsynchronousGapMarkerStub(Assembler* assembler) {
2331 __ bkpt(0); 2325 __ bkpt(0);
2332 } 2326 }
2333 2327
2334 } // namespace dart 2328 } // namespace dart
2335 2329
2336 #endif // defined TARGET_ARCH_ARM 2330 #endif // defined TARGET_ARCH_ARM
OLDNEW
« no previous file with comments | « runtime/vm/scavenger_test.cc ('k') | runtime/vm/stub_code_arm64.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698