Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(524)

Unified Diff: runtime/vm/stub_code_arm64.cc

Issue 2980033002: Moves the top_ and end_ words of the Scavenger into mutator thread. (Closed)
Patch Set: Created 3 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
« no previous file with comments | « runtime/vm/stub_code_arm.cc ('k') | runtime/vm/stub_code_ia32.cc » ('j') | no next file with comments »
Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
Index: runtime/vm/stub_code_arm64.cc
diff --git a/runtime/vm/stub_code_arm64.cc b/runtime/vm/stub_code_arm64.cc
index bc582e5f9f349be4d416b2c63485bb9b580713c8..0b15d0d24daa2d44ad76a759fae7756ca0802a6e 100644
--- a/runtime/vm/stub_code_arm64.cc
+++ b/runtime/vm/stub_code_arm64.cc
@@ -704,15 +704,13 @@ void StubCode::GenerateAllocateArrayStub(Assembler* assembler) {
const intptr_t cid = kArrayCid;
NOT_IN_PRODUCT(__ MaybeTraceAllocation(kArrayCid, R4, &slow_case));
- Heap::Space space = Heap::kNew;
- __ ldr(R8, Address(THR, Thread::heap_offset()));
+ NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
// Calculate and align allocation size.
// Load new object start and calculate next object start.
// R1: array element type.
// R2: array length as Smi.
- // R8: heap.
- __ LoadFromOffset(R0, R8, Heap::TopOffset(space));
+ __ ldr(R0, Address(THR, Thread::top_offset()));
intptr_t fixed_size_plus_alignment_padding =
sizeof(RawArray) + kObjectAlignment - 1;
__ LoadImmediate(R3, fixed_size_plus_alignment_padding);
@@ -730,8 +728,7 @@ void StubCode::GenerateAllocateArrayStub(Assembler* assembler) {
// R2: array length as Smi.
// R3: array size.
// R7: potential next object start.
- // R8: heap.
- __ LoadFromOffset(TMP, R8, Heap::EndOffset(space));
+ __ LoadFromOffset(TMP, THR, Thread::end_offset());
__ CompareRegisters(R7, TMP);
__ b(&slow_case, CS); // Branch if unsigned higher or equal.
@@ -740,8 +737,7 @@ void StubCode::GenerateAllocateArrayStub(Assembler* assembler) {
// R0: potential new object start.
// R3: array size.
// R7: potential next object start.
- // R8: heap.
- __ StoreToOffset(R7, R8, Heap::TopOffset(space));
+ __ str(R7, Address(THR, Thread::top_offset()));
__ add(R0, R0, Operand(kHeapObjectTag));
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R3, space));
@@ -971,17 +967,15 @@ void StubCode::GenerateAllocateContextStub(Assembler* assembler) {
// R1: number of context variables.
// R2: object size.
const intptr_t cid = kContextCid;
- Heap::Space space = Heap::kNew;
- __ ldr(R5, Address(THR, Thread::heap_offset()));
- __ ldr(R0, Address(R5, Heap::TopOffset(space)));
+ NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
+ __ ldr(R0, Address(THR, Thread::top_offset()));
__ add(R3, R2, Operand(R0));
// Check if the allocation fits into the remaining space.
// R0: potential new object.
// R1: number of context variables.
// R2: object size.
// R3: potential next object start.
- // R5: heap.
- __ ldr(TMP, Address(R5, Heap::EndOffset(space)));
+ __ ldr(TMP, Address(THR, Thread::end_offset()));
__ CompareRegisters(R3, TMP);
if (FLAG_use_slow_path) {
__ b(&slow_case);
@@ -995,8 +989,7 @@ void StubCode::GenerateAllocateContextStub(Assembler* assembler) {
// R1: number of context variables.
// R2: object size.
// R3: next object start.
- // R5: heap.
- __ str(R3, Address(R5, Heap::TopOffset(space)));
+ __ str(R3, Address(THR, Thread::top_offset()));
__ add(R0, R0, Operand(kHeapObjectTag));
NOT_IN_PRODUCT(__ UpdateAllocationStatsWithSize(cid, R2, space));
@@ -1155,22 +1148,20 @@ void StubCode::GenerateAllocationStubForClass(Assembler* assembler,
// Allocate the object and update top to point to
// next object start and initialize the allocated object.
// R1: instantiated type arguments (if is_cls_parameterized).
- Heap::Space space = Heap::kNew;
- __ ldr(R5, Address(THR, Thread::heap_offset()));
- __ ldr(R2, Address(R5, Heap::TopOffset(space)));
+ NOT_IN_PRODUCT(Heap::Space space = Heap::kNew);
+ __ ldr(R2, Address(THR, Thread::top_offset()));
__ AddImmediate(R3, R2, instance_size);
// Check if the allocation fits into the remaining space.
// R2: potential new object start.
// R3: potential next object start.
- // R5: heap.
- __ ldr(TMP, Address(R5, Heap::EndOffset(space)));
+ __ ldr(TMP, Address(THR, Thread::end_offset()));
__ CompareRegisters(R3, TMP);
if (FLAG_use_slow_path) {
__ b(&slow_case);
} else {
__ b(&slow_case, CS); // Unsigned higher or equal.
}
- __ str(R3, Address(R5, Heap::TopOffset(space)));
+ __ str(R3, Address(THR, Thread::top_offset()));
NOT_IN_PRODUCT(__ UpdateAllocationStats(cls.id(), space));
// R2: new object start.
« no previous file with comments | « runtime/vm/stub_code_arm.cc ('k') | runtime/vm/stub_code_ia32.cc » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698