Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(74)

Side by Side Diff: runtime/vm/intrinsifier_arm.cc

Issue 2951333002: Moves the top_ and end_ words of the Scavenger into mutator thread. (Closed)
Patch Set: Full removal of heap's top/end offsets. Changed allocs in other archs. Created 3 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
OLDNEW
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file
2 // for details. All rights reserved. Use of this source code is governed by a 2 // for details. All rights reserved. Use of this source code is governed by a
3 // BSD-style license that can be found in the LICENSE file. 3 // BSD-style license that can be found in the LICENSE file.
4 4
5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM. 5 #include "vm/globals.h" // Needed here to get TARGET_ARCH_ARM.
6 #if defined(TARGET_ARCH_ARM) 6 #if defined(TARGET_ARCH_ARM)
7 7
8 #include "vm/intrinsifier.h" 8 #include "vm/intrinsifier.h"
9 9
10 #include "vm/assembler.h" 10 #include "vm/assembler.h"
(...skipping 163 matching lines...) Expand 10 before | Expand all | Expand 10 after
174 /* Check for maximum allowed length. */ \ 174 /* Check for maximum allowed length. */ \
175 /* R2: untagged array length. */ \ 175 /* R2: untagged array length. */ \
176 __ CompareImmediate(R2, max_len); \ 176 __ CompareImmediate(R2, max_len); \
177 __ b(&fall_through, GT); \ 177 __ b(&fall_through, GT); \
178 __ mov(R2, Operand(R2, LSL, scale_shift)); \ 178 __ mov(R2, Operand(R2, LSL, scale_shift)); \
179 const intptr_t fixed_size_plus_alignment_padding = \ 179 const intptr_t fixed_size_plus_alignment_padding = \
180 sizeof(Raw##type_name) + kObjectAlignment - 1; \ 180 sizeof(Raw##type_name) + kObjectAlignment - 1; \
181 __ AddImmediate(R2, fixed_size_plus_alignment_padding); \ 181 __ AddImmediate(R2, fixed_size_plus_alignment_padding); \
182 __ bic(R2, R2, Operand(kObjectAlignment - 1)); \ 182 __ bic(R2, R2, Operand(kObjectAlignment - 1)); \
183 Heap::Space space = Heap::kNew; \ 183 Heap::Space space = Heap::kNew; \
184 __ ldr(R3, Address(THR, Thread::heap_offset())); \ 184 __ ldr(R0, Address(THR, Thread::top_offset())); \
185 __ ldr(R0, Address(R3, Heap::TopOffset(space))); \
186 \ 185 \
187 /* R2: allocation size. */ \ 186 /* R2: allocation size. */ \
188 __ adds(R1, R0, Operand(R2)); \ 187 __ adds(R1, R0, Operand(R2)); \
189 __ b(&fall_through, CS); /* Fail on unsigned overflow. */ \ 188 __ b(&fall_through, CS); /* Fail on unsigned overflow. */ \
190 \ 189 \
191 /* Check if the allocation fits into the remaining space. */ \ 190 /* Check if the allocation fits into the remaining space. */ \
192 /* R0: potential new object start. */ \ 191 /* R0: potential new object start. */ \
193 /* R1: potential next object start. */ \ 192 /* R1: potential next object start. */ \
194 /* R2: allocation size. */ \ 193 /* R2: allocation size. */ \
195 /* R3: heap. */ \ 194 /* R3: heap. */ \
196 __ ldr(IP, Address(R3, Heap::EndOffset(space))); \ 195 __ ldr(IP, Address(THR, Thread::end_offset())); \
197 __ cmp(R1, Operand(IP)); \ 196 __ cmp(R1, Operand(IP)); \
198 __ b(&fall_through, CS); \ 197 __ b(&fall_through, CS); \
199 \ 198 \
200 /* Successfully allocated the object(s), now update top to point to */ \ 199 /* Successfully allocated the object(s), now update top to point to */ \
201 /* next object start and initialize the object. */ \ 200 /* next object start and initialize the object. */ \
202 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R4, cid)); \ 201 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R4, cid)); \
203 __ str(R1, Address(R3, Heap::TopOffset(space))); \ 202 __ str(R1, Address(THR, Thread::top_offset())); \
204 __ AddImmediate(R0, kHeapObjectTag); \ 203 __ AddImmediate(R0, kHeapObjectTag); \
205 /* Initialize the tags. */ \ 204 /* Initialize the tags. */ \
206 /* R0: new object start as a tagged pointer. */ \ 205 /* R0: new object start as a tagged pointer. */ \
207 /* R1: new object end address. */ \ 206 /* R1: new object end address. */ \
208 /* R2: allocation size. */ \ 207 /* R2: allocation size. */ \
209 /* R4: allocation stats address */ \ 208 /* R4: allocation stats address */ \
210 { \ 209 { \
211 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag); \ 210 __ CompareImmediate(R2, RawObject::SizeTag::kMaxSizeTag); \
212 __ mov(R3, \ 211 __ mov(R3, \
213 Operand(R2, LSL, RawObject::kSizeTagPos - kObjectAlignmentLog2), \ 212 Operand(R2, LSL, RawObject::kSizeTagPos - kObjectAlignmentLog2), \
(...skipping 1784 matching lines...) Expand 10 before | Expand all | Expand 10 after
1998 __ mov(R8, Operand(length_reg)); // Save the length register. 1997 __ mov(R8, Operand(length_reg)); // Save the length register.
1999 // TODO(koda): Protect against negative length and overflow here. 1998 // TODO(koda): Protect against negative length and overflow here.
2000 __ SmiUntag(length_reg); 1999 __ SmiUntag(length_reg);
2001 const intptr_t fixed_size_plus_alignment_padding = 2000 const intptr_t fixed_size_plus_alignment_padding =
2002 sizeof(RawString) + kObjectAlignment - 1; 2001 sizeof(RawString) + kObjectAlignment - 1;
2003 __ AddImmediate(length_reg, fixed_size_plus_alignment_padding); 2002 __ AddImmediate(length_reg, fixed_size_plus_alignment_padding);
2004 __ bic(length_reg, length_reg, Operand(kObjectAlignment - 1)); 2003 __ bic(length_reg, length_reg, Operand(kObjectAlignment - 1));
2005 2004
2006 const intptr_t cid = kOneByteStringCid; 2005 const intptr_t cid = kOneByteStringCid;
2007 Heap::Space space = Heap::kNew; 2006 Heap::Space space = Heap::kNew;
2008 __ ldr(R3, Address(THR, Thread::heap_offset())); 2007 __ ldr(R0, Address(THR, Thread::top_offset()));
2009 __ ldr(R0, Address(R3, Heap::TopOffset(space)));
2010 2008
2011 // length_reg: allocation size. 2009 // length_reg: allocation size.
2012 __ adds(R1, R0, Operand(length_reg)); 2010 __ adds(R1, R0, Operand(length_reg));
2013 __ b(&fail, CS); // Fail on unsigned overflow. 2011 __ b(&fail, CS); // Fail on unsigned overflow.
2014 2012
2015 // Check if the allocation fits into the remaining space. 2013 // Check if the allocation fits into the remaining space.
2016 // R0: potential new object start. 2014 // R0: potential new object start.
2017 // R1: potential next object start. 2015 // R1: potential next object start.
2018 // R2: allocation size. 2016 // R2: allocation size.
2019 // R3: heap. 2017 // R3: heap.
2020 __ ldr(NOTFP, Address(R3, Heap::EndOffset(space))); 2018 __ ldr(NOTFP, Address(THR, Thread::end_offset()));
2021 __ cmp(R1, Operand(NOTFP)); 2019 __ cmp(R1, Operand(NOTFP));
2022 __ b(&fail, CS); 2020 __ b(&fail, CS);
2023 2021
2024 // Successfully allocated the object(s), now update top to point to 2022 // Successfully allocated the object(s), now update top to point to
2025 // next object start and initialize the object. 2023 // next object start and initialize the object.
2026 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R4, cid)); 2024 NOT_IN_PRODUCT(__ LoadAllocationStatsAddress(R4, cid));
2027 __ str(R1, Address(R3, Heap::TopOffset(space))); 2025 __ str(R1, Address(THR, Thread::top_offset()));
2028 __ AddImmediate(R0, kHeapObjectTag); 2026 __ AddImmediate(R0, kHeapObjectTag);
2029 2027
2030 // Initialize the tags. 2028 // Initialize the tags.
2031 // R0: new object start as a tagged pointer. 2029 // R0: new object start as a tagged pointer.
2032 // R1: new object end address. 2030 // R1: new object end address.
2033 // R2: allocation size. 2031 // R2: allocation size.
2034 // R4: allocation stats address. 2032 // R4: allocation stats address.
2035 { 2033 {
2036 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2; 2034 const intptr_t shift = RawObject::kSizeTagPos - kObjectAlignmentLog2;
2037 2035
(...skipping 274 matching lines...) Expand 10 before | Expand all | Expand 10 after
2312 2310
2313 void Intrinsifier::SetAsyncThreadStackTrace(Assembler* assembler) { 2311 void Intrinsifier::SetAsyncThreadStackTrace(Assembler* assembler) {
2314 __ ldr(R0, Address(THR, Thread::async_stack_trace_offset())); 2312 __ ldr(R0, Address(THR, Thread::async_stack_trace_offset()));
2315 __ LoadObject(R0, Object::null_object()); 2313 __ LoadObject(R0, Object::null_object());
2316 __ Ret(); 2314 __ Ret();
2317 } 2315 }
2318 2316
2319 } // namespace dart 2317 } // namespace dart
2320 2318
2321 #endif // defined TARGET_ARCH_ARM 2319 #endif // defined TARGET_ARCH_ARM
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698