| OLD | NEW |
| 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
| 2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
| 3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
| 4 | 4 |
| 5 #include "vm/globals.h" // NOLINT | 5 #include "vm/globals.h" // NOLINT |
| 6 #if defined(TARGET_ARCH_MIPS) | 6 #if defined(TARGET_ARCH_MIPS) |
| 7 | 7 |
| 8 #include "vm/assembler.h" | 8 #include "vm/assembler.h" |
| 9 #include "vm/longjump.h" | 9 #include "vm/longjump.h" |
| 10 #include "vm/runtime_entry.h" | 10 #include "vm/runtime_entry.h" |
| (...skipping 908 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 919 void Assembler::TryAllocate(const Class& cls, | 919 void Assembler::TryAllocate(const Class& cls, |
| 920 Label* failure, | 920 Label* failure, |
| 921 Register instance_reg, | 921 Register instance_reg, |
| 922 Register temp_reg) { | 922 Register temp_reg) { |
| 923 ASSERT(!in_delay_slot_); | 923 ASSERT(!in_delay_slot_); |
| 924 ASSERT(failure != NULL); | 924 ASSERT(failure != NULL); |
| 925 if (FLAG_inline_alloc) { | 925 if (FLAG_inline_alloc) { |
| 926 // If this allocation is traced, program will jump to failure path | 926 // If this allocation is traced, program will jump to failure path |
| 927 // (i.e. the allocation stub) which will allocate the object and trace the | 927 // (i.e. the allocation stub) which will allocate the object and trace the |
| 928 // allocation call site. | 928 // allocation call site. |
| 929 MaybeTraceAllocation(cls.id(), temp_reg, failure); | 929 MaybeTraceAllocation(cls.id(), temp_reg, failure, |
| 930 /* inline_isolate = */ false); |
| 930 const intptr_t instance_size = cls.instance_size(); | 931 const intptr_t instance_size = cls.instance_size(); |
| 931 Heap* heap = Isolate::Current()->heap(); | 932 Heap::Space space = Heap::SpaceForAllocation(cls.id()); |
| 932 Heap::Space space = heap->SpaceForAllocation(cls.id()); | 933 lw(temp_reg, Address(THR, Thread::heap_offset())); |
| 933 const uword top_address = heap->TopAddress(space); | 934 lw(instance_reg, Address(temp_reg, Heap::TopOffset(space))); |
| 934 LoadImmediate(temp_reg, top_address); | |
| 935 lw(instance_reg, Address(temp_reg)); | |
| 936 // TODO(koda): Protect against unsigned overflow here. | 935 // TODO(koda): Protect against unsigned overflow here. |
| 937 AddImmediate(instance_reg, instance_size); | 936 AddImmediate(instance_reg, instance_size); |
| 938 | 937 |
| 939 // instance_reg: potential next object start. | 938 // instance_reg: potential next object start. |
| 940 const uword end_address = heap->EndAddress(space); | 939 lw(TMP, Address(temp_reg, Heap::EndOffset(space))); |
| 941 ASSERT(top_address < end_address); | |
| 942 lw(TMP, Address(temp_reg, end_address - top_address)); | |
| 943 // Fail if heap end unsigned less than or equal to instance_reg. | 940 // Fail if heap end unsigned less than or equal to instance_reg. |
| 944 BranchUnsignedLessEqual(TMP, instance_reg, failure); | 941 BranchUnsignedLessEqual(TMP, instance_reg, failure); |
| 945 | 942 |
| 946 // Successfully allocated the object, now update top to point to | 943 // Successfully allocated the object, now update top to point to |
| 947 // next object start and store the class in the class field of object. | 944 // next object start and store the class in the class field of object. |
| 948 sw(instance_reg, Address(temp_reg)); | 945 sw(instance_reg, Address(temp_reg, Heap::TopOffset(space))); |
| 949 | 946 |
| 950 ASSERT(instance_size >= kHeapObjectTag); | 947 ASSERT(instance_size >= kHeapObjectTag); |
| 951 AddImmediate(instance_reg, -instance_size + kHeapObjectTag); | 948 AddImmediate(instance_reg, -instance_size + kHeapObjectTag); |
| 952 UpdateAllocationStats(cls.id(), temp_reg, space); | 949 UpdateAllocationStats(cls.id(), temp_reg, space, |
| 950 /* inline_isolate = */ false); |
| 953 uword tags = 0; | 951 uword tags = 0; |
| 954 tags = RawObject::SizeTag::update(instance_size, tags); | 952 tags = RawObject::SizeTag::update(instance_size, tags); |
| 955 ASSERT(cls.id() != kIllegalCid); | 953 ASSERT(cls.id() != kIllegalCid); |
| 956 tags = RawObject::ClassIdTag::update(cls.id(), tags); | 954 tags = RawObject::ClassIdTag::update(cls.id(), tags); |
| 957 LoadImmediate(TMP, tags); | 955 LoadImmediate(TMP, tags); |
| 958 sw(TMP, FieldAddress(instance_reg, Object::tags_offset())); | 956 sw(TMP, FieldAddress(instance_reg, Object::tags_offset())); |
| 959 } else { | 957 } else { |
| 960 b(failure); | 958 b(failure); |
| 961 } | 959 } |
| 962 } | 960 } |
| 963 | 961 |
| 964 | 962 |
| 965 void Assembler::TryAllocateArray(intptr_t cid, | 963 void Assembler::TryAllocateArray(intptr_t cid, |
| 966 intptr_t instance_size, | 964 intptr_t instance_size, |
| 967 Label* failure, | 965 Label* failure, |
| 968 Register instance, | 966 Register instance, |
| 969 Register end_address, | 967 Register end_address, |
| 970 Register temp1, | 968 Register temp1, |
| 971 Register temp2) { | 969 Register temp2) { |
| 972 if (FLAG_inline_alloc) { | 970 if (FLAG_inline_alloc) { |
| 973 // If this allocation is traced, program will jump to failure path | 971 // If this allocation is traced, program will jump to failure path |
| 974 // (i.e. the allocation stub) which will allocate the object and trace the | 972 // (i.e. the allocation stub) which will allocate the object and trace the |
| 975 // allocation call site. | 973 // allocation call site. |
| 976 MaybeTraceAllocation(cid, temp1, failure); | 974 MaybeTraceAllocation(cid, temp1, failure); |
| 977 Isolate* isolate = Isolate::Current(); | 975 Isolate* isolate = Isolate::Current(); |
| 978 Heap* heap = isolate->heap(); | 976 Heap* heap = isolate->heap(); |
| 979 Heap::Space space = heap->SpaceForAllocation(cid); | 977 Heap::Space space = heap->SpaceForAllocation(cid); |
| 980 LoadImmediate(temp1, heap->TopAddress(space)); | 978 lw(temp1, Address(THR, Thread::heap_offset())); |
| 981 lw(instance, Address(temp1, 0)); // Potential new object start. | 979 // Potential new object start. |
| 980 lw(instance, Address(temp1, heap->TopOffset(space))); |
| 982 // Potential next object start. | 981 // Potential next object start. |
| 983 AddImmediate(end_address, instance, instance_size); | 982 AddImmediate(end_address, instance, instance_size); |
| 984 // Branch on unsigned overflow. | 983 // Branch on unsigned overflow. |
| 985 BranchUnsignedLess(end_address, instance, failure); | 984 BranchUnsignedLess(end_address, instance, failure); |
| 986 | 985 |
| 987 // Check if the allocation fits into the remaining space. | 986 // Check if the allocation fits into the remaining space. |
| 988 // instance: potential new object start. | 987 // instance: potential new object start, /* inline_isolate = */ false. |
| 989 // end_address: potential next object start. | 988 // end_address: potential next object start. |
| 990 LoadImmediate(temp2, heap->EndAddress(space)); | 989 lw(temp2, Address(temp1, Heap::EndOffset(space))); |
| 991 lw(temp2, Address(temp2, 0)); | |
| 992 BranchUnsignedGreaterEqual(end_address, temp2, failure); | 990 BranchUnsignedGreaterEqual(end_address, temp2, failure); |
| 993 | 991 |
| 994 | |
| 995 // Successfully allocated the object(s), now update top to point to | 992 // Successfully allocated the object(s), now update top to point to |
| 996 // next object start and initialize the object. | 993 // next object start and initialize the object. |
| 997 sw(end_address, Address(temp1, 0)); | 994 sw(end_address, Address(temp1, Heap::TopOffset(space))); |
| 998 addiu(instance, instance, Immediate(kHeapObjectTag)); | 995 addiu(instance, instance, Immediate(kHeapObjectTag)); |
| 999 LoadImmediate(temp1, instance_size); | 996 LoadImmediate(temp1, instance_size); |
| 1000 UpdateAllocationStatsWithSize(cid, temp1, temp2, space); | 997 UpdateAllocationStatsWithSize(cid, temp1, temp2, space, |
| 998 /* inline_isolate = */ false); |
| 1001 | 999 |
| 1002 // Initialize the tags. | 1000 // Initialize the tags. |
| 1003 // instance: new object start as a tagged pointer. | 1001 // instance: new object start as a tagged pointer. |
| 1004 uword tags = 0; | 1002 uword tags = 0; |
| 1005 tags = RawObject::ClassIdTag::update(cid, tags); | 1003 tags = RawObject::ClassIdTag::update(cid, tags); |
| 1006 tags = RawObject::SizeTag::update(instance_size, tags); | 1004 tags = RawObject::SizeTag::update(instance_size, tags); |
| 1007 LoadImmediate(temp1, tags); | 1005 LoadImmediate(temp1, tags); |
| 1008 sw(temp1, FieldAddress(instance, Array::tags_offset())); // Store tags. | 1006 sw(temp1, FieldAddress(instance, Array::tags_offset())); // Store tags. |
| 1009 } else { | 1007 } else { |
| 1010 b(failure); | 1008 b(failure); |
| (...skipping 265 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1276 Label stop; | 1274 Label stop; |
| 1277 b(&stop); | 1275 b(&stop); |
| 1278 Emit(reinterpret_cast<int32_t>(message)); | 1276 Emit(reinterpret_cast<int32_t>(message)); |
| 1279 Bind(&stop); | 1277 Bind(&stop); |
| 1280 break_(Instr::kStopMessageCode); | 1278 break_(Instr::kStopMessageCode); |
| 1281 } | 1279 } |
| 1282 | 1280 |
| 1283 } // namespace dart | 1281 } // namespace dart |
| 1284 | 1282 |
| 1285 #endif // defined TARGET_ARCH_MIPS | 1283 #endif // defined TARGET_ARCH_MIPS |
| OLD | NEW |