Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1275)

Unified Diff: runtime/vm/stub_code_arm64.cc

Issue 2951333002: Moves the top_ and end_ words of the Scavenger into mutator thread. (Closed)
Patch Set: Full removal of heap's top/end offsets. Changed allocs in other archs. Created 3 years, 6 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: runtime/vm/stub_code_arm64.cc
diff --git a/runtime/vm/stub_code_arm64.cc b/runtime/vm/stub_code_arm64.cc
index c59e6f217c94c2cd8da978d8bff0de336c9ae48f..c1a3623331bf7d2431bbc835770ef8e56ecbeb03 100644
--- a/runtime/vm/stub_code_arm64.cc
+++ b/runtime/vm/stub_code_arm64.cc
@@ -722,14 +722,13 @@ void StubCode::GenerateAllocateArrayStub(Assembler* assembler) {
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, R4, &slow_case));
Heap::Space space = Heap::kNew;
- __ ldr(R8, Address(THR, Thread::heap_offset()));
// Calculate and align allocation size.
// Load new object start and calculate next object start.
// R1: array element type.
// R2: array length as Smi.
// R8: heap.
- __ LoadFromOffset(R0, R8, Heap::TopOffset(space));
+ __ ldr(R0, Address(THR, Thread::top_offset()));
intptr_t fixed_size_plus_alignment_padding =
sizeof(RawArray) + kObjectAlignment - 1;
__ LoadImmediate(R3, fixed_size_plus_alignment_padding);
@@ -748,7 +747,7 @@ void StubCode::GenerateAllocateArrayStub(Assembler* assembler) {
// R3: array size.
// R7: potential next object start.
// R8: heap.
- __ LoadFromOffset(TMP, R8, Heap::EndOffset(space));
+ __ LoadFromOffset(TMP, THR, Thread::end_offset());
__ CompareRegisters(R7, TMP);
__ b(&slow_case, CS); // Branch if unsigned higher or equal.
@@ -758,7 +757,7 @@ void StubCode::GenerateAllocateArrayStub(Assembler* assembler) {
// R3: array size.
// R7: potential next object start.
// R8: heap.
- __ StoreToOffset(R7, R8, Heap::TopOffset(space));
+ __ str(R7, Address(THR, Thread::top_offset()));
__ add(R0, R0, Operand(kHeapObjectTag));
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R3, space));
@@ -991,8 +990,7 @@ void StubCode::GenerateAllocateContextStub(Assembler* assembler) {
// R2: object size.
const intptr_t cid = kContextCid;
Heap::Space space = Heap::kNew;
- __ ldr(R5, Address(THR, Thread::heap_offset()));
- __ ldr(R0, Address(R5, Heap::TopOffset(space)));
+ __ ldr(R0, Address(THR, Thread::top_offset()));
__ add(R3, R2, Operand(R0));
// Check if the allocation fits into the remaining space.
// R0: potential new object.
@@ -1000,7 +998,7 @@ void StubCode::GenerateAllocateContextStub(Assembler* assembler) {
// R2: object size.
// R3: potential next object start.
// R5: heap.
- __ ldr(TMP, Address(R5, Heap::EndOffset(space)));
+ __ ldr(TMP, Address(THR, Thread::end_offset()));
__ CompareRegisters(R3, TMP);
if (FLAG_use_slow_path) {
__ b(&slow_case);
@@ -1015,7 +1013,7 @@ void StubCode::GenerateAllocateContextStub(Assembler* assembler) {
// R2: object size.
// R3: next object start.
// R5: heap.
- __ str(R3, Address(R5, Heap::TopOffset(space)));
+ __ str(R3, Address(THR, Thread::top_offset()));
__ add(R0, R0, Operand(kHeapObjectTag));
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R2, space));
@@ -1175,21 +1173,20 @@ void StubCode::GenerateAllocationStubForClass(Assembler* assembler,
// next object start and initialize the allocated object.
// R1: instantiated type arguments (if is_cls_parameterized).
Heap::Space space = Heap::kNew;
- __ ldr(R5, Address(THR, Thread::heap_offset()));
- __ ldr(R2, Address(R5, Heap::TopOffset(space)));
+ __ ldr(R2, Address(THR, Thread::top_offset()));
__ AddImmediate(R3, R2, instance_size);
// Check if the allocation fits into the remaining space.
// R2: potential new object start.
// R3: potential next object start.
// R5: heap.
- __ ldr(TMP, Address(R5, Heap::EndOffset(space)));
+ __ ldr(TMP, Address(THR, Thread::end_offset()));
__ CompareRegisters(R3, TMP);
if (FLAG_use_slow_path) {
__ b(&slow_case);
} else {
__ b(&slow_case, CS); // Unsigned higher or equal.
}
- __ str(R3, Address(R5, Heap::TopOffset(space)));
+ __ str(R3, Address(THR, Thread::top_offset()));
NOT_IN_PRODUCT(__ UpdateAllocationStats(cls.id(), space));
// R2: new object start.

Powered by Google App Engine
This is Rietveld 408576698