OLD | NEW |
1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file | 1 // Copyright (c) 2013, the Dart project authors. Please see the AUTHORS file |
2 // for details. All rights reserved. Use of this source code is governed by a | 2 // for details. All rights reserved. Use of this source code is governed by a |
3 // BSD-style license that can be found in the LICENSE file. | 3 // BSD-style license that can be found in the LICENSE file. |
4 | 4 |
5 #include "vm/globals.h" | 5 #include "vm/globals.h" |
6 #if defined(TARGET_ARCH_MIPS) | 6 #if defined(TARGET_ARCH_MIPS) |
7 | 7 |
8 #include "vm/assembler.h" | 8 #include "vm/assembler.h" |
9 #include "vm/runtime_entry.h" | 9 #include "vm/runtime_entry.h" |
10 #include "vm/simulator.h" | 10 #include "vm/simulator.h" |
(...skipping 62 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
73 addu(rd, rd, PP); | 73 addu(rd, rd, PP); |
74 lw(rd, Address(rd, offset_low)); | 74 lw(rd, Address(rd, offset_low)); |
75 } else { | 75 } else { |
76 lw(rd, Address(PP, offset_low)); | 76 lw(rd, Address(PP, offset_low)); |
77 } | 77 } |
78 } | 78 } |
79 } | 79 } |
80 | 80 |
81 | 81 |
82 void Assembler::AdduDetectOverflow(Register rd, Register rs, Register rt, | 82 void Assembler::AdduDetectOverflow(Register rd, Register rs, Register rt, |
83 Register ro, Register scratch) { | 83 Register ro) { |
84 ASSERT(rd != ro); | 84 ASSERT(rd != ro); |
85 ASSERT(rd != TMP); | 85 ASSERT(rd != TMP1); |
86 ASSERT(ro != TMP); | 86 ASSERT(ro != TMP1); |
87 ASSERT(ro != rs); | 87 ASSERT(ro != rs); |
88 ASSERT(ro != rt); | 88 ASSERT(ro != rt); |
89 | 89 |
90 if ((rs == rt) && (rd == rs)) { | 90 if ((rs == rt) && (rd == rs)) { |
91 ASSERT(scratch != kNoRegister); | 91 ASSERT(rd != TMP2); |
92 ASSERT(rd != scratch); | 92 ASSERT(ro != TMP2); |
93 ASSERT(ro != scratch); | 93 ASSERT(rs != TMP2); |
94 ASSERT(scratch != TMP); | 94 ASSERT(rt != TMP2); |
95 mov(scratch, rt); | 95 mov(TMP2, rt); |
96 rt = scratch; | 96 rt = TMP2; |
97 } | 97 } |
98 | 98 |
99 if (rd == rs) { | 99 if (rd == rs) { |
100 mov(TMP, rs); // Preserve rs. | 100 mov(TMP1, rs); // Preserve rs. |
101 addu(rd, rs, rt); // rs is overwritten. | 101 addu(rd, rs, rt); // rs is overwritten. |
102 xor_(TMP, rd, TMP); // Original rs. | 102 xor_(TMP1, rd, TMP1); // Original rs. |
103 xor_(ro, rd, rt); | 103 xor_(ro, rd, rt); |
104 and_(ro, ro, TMP); | 104 and_(ro, ro, TMP1); |
105 } else if (rd == rt) { | 105 } else if (rd == rt) { |
106 mov(TMP, rt); // Preserve rt. | 106 mov(TMP1, rt); // Preserve rt. |
107 addu(rd, rs, rt); // rt is overwritten. | 107 addu(rd, rs, rt); // rt is overwritten. |
108 xor_(TMP, rd, TMP); // Original rt. | 108 xor_(TMP1, rd, TMP1); // Original rt. |
109 xor_(ro, rd, rs); | 109 xor_(ro, rd, rs); |
110 and_(ro, ro, TMP); | 110 and_(ro, ro, TMP1); |
111 } else { | 111 } else { |
112 addu(rd, rs, rt); | 112 addu(rd, rs, rt); |
113 xor_(ro, rd, rs); | 113 xor_(ro, rd, rs); |
114 xor_(TMP, rd, rt); | 114 xor_(TMP1, rd, rt); |
115 and_(ro, TMP, ro); | 115 and_(ro, TMP1, ro); |
116 } | 116 } |
117 } | 117 } |
118 | 118 |
119 | 119 |
120 void Assembler::LoadObject(Register rd, const Object& object) { | 120 void Assembler::LoadObject(Register rd, const Object& object) { |
121 // Smi's and VM heap objects are never relocated; do not use object pool. | 121 // Smi's and VM heap objects are never relocated; do not use object pool. |
122 if (object.IsSmi()) { | 122 if (object.IsSmi()) { |
123 LoadImmediate(rd, reinterpret_cast<int32_t>(object.raw())); | 123 LoadImmediate(rd, reinterpret_cast<int32_t>(object.raw())); |
124 } else if (object.InVMHeap()) { | 124 } else if (object.InVMHeap()) { |
125 // Make sure that class CallPattern is able to decode this load immediate. | 125 // Make sure that class CallPattern is able to decode this load immediate. |
(...skipping 24 matching lines...) Expand all Loading... |
150 if (object_pool_.At(i) == obj.raw()) { | 150 if (object_pool_.At(i) == obj.raw()) { |
151 return i; | 151 return i; |
152 } | 152 } |
153 } | 153 } |
154 object_pool_.Add(obj, Heap::kOld); | 154 object_pool_.Add(obj, Heap::kOld); |
155 return object_pool_.Length() - 1; | 155 return object_pool_.Length() - 1; |
156 } | 156 } |
157 | 157 |
158 | 158 |
159 void Assembler::PushObject(const Object& object) { | 159 void Assembler::PushObject(const Object& object) { |
160 LoadObject(TMP, object); | 160 LoadObject(TMP1, object); |
161 Push(TMP); | 161 Push(TMP1); |
162 } | 162 } |
163 | 163 |
164 | 164 |
165 void Assembler::CompareObject(Register rd, Register rn, const Object& object) { | 165 void Assembler::CompareObject(Register rd, Register rn, const Object& object) { |
166 ASSERT(rn != TMP); | 166 ASSERT(rn != TMP1); |
167 LoadObject(TMP, object); | 167 LoadObject(TMP1, object); |
168 subu(rd, rn, TMP); | 168 subu(rd, rn, TMP1); |
169 } | 169 } |
170 | 170 |
171 | 171 |
172 void Assembler::LoadClassId(Register result, Register object) { | 172 void Assembler::LoadClassId(Register result, Register object) { |
173 ASSERT(RawObject::kClassIdTagBit == 16); | 173 ASSERT(RawObject::kClassIdTagBit == 16); |
174 ASSERT(RawObject::kClassIdTagSize == 16); | 174 ASSERT(RawObject::kClassIdTagSize == 16); |
175 const intptr_t class_id_offset = Object::tags_offset() + | 175 const intptr_t class_id_offset = Object::tags_offset() + |
176 RawObject::kClassIdTagBit / kBitsPerByte; | 176 RawObject::kClassIdTagBit / kBitsPerByte; |
177 lhu(result, FieldAddress(object, class_id_offset)); | 177 lhu(result, FieldAddress(object, class_id_offset)); |
178 } | 178 } |
179 | 179 |
180 | 180 |
181 void Assembler::LoadClassById(Register result, Register class_id) { | 181 void Assembler::LoadClassById(Register result, Register class_id) { |
182 ASSERT(result != class_id); | 182 ASSERT(result != class_id); |
183 lw(result, FieldAddress(CTX, Context::isolate_offset())); | 183 lw(result, FieldAddress(CTX, Context::isolate_offset())); |
184 const intptr_t table_offset_in_isolate = | 184 const intptr_t table_offset_in_isolate = |
185 Isolate::class_table_offset() + ClassTable::table_offset(); | 185 Isolate::class_table_offset() + ClassTable::table_offset(); |
186 lw(result, Address(result, table_offset_in_isolate)); | 186 lw(result, Address(result, table_offset_in_isolate)); |
187 sll(TMP, class_id, 2); | 187 sll(TMP1, class_id, 2); |
188 addu(result, result, TMP); | 188 addu(result, result, TMP1); |
189 lw(result, Address(result)); | 189 lw(result, Address(result)); |
190 } | 190 } |
191 | 191 |
192 | 192 |
193 void Assembler::LoadClass(Register result, Register object, Register scratch) { | 193 void Assembler::LoadClass(Register result, Register object) { |
194 ASSERT(scratch != result); | 194 ASSERT(TMP1 != result); |
195 LoadClassId(scratch, object); | 195 LoadClassId(TMP1, object); |
196 | 196 |
197 lw(result, FieldAddress(CTX, Context::isolate_offset())); | 197 lw(result, FieldAddress(CTX, Context::isolate_offset())); |
198 const intptr_t table_offset_in_isolate = | 198 const intptr_t table_offset_in_isolate = |
199 Isolate::class_table_offset() + ClassTable::table_offset(); | 199 Isolate::class_table_offset() + ClassTable::table_offset(); |
200 lw(result, Address(result, table_offset_in_isolate)); | 200 lw(result, Address(result, table_offset_in_isolate)); |
201 sll(scratch, scratch, 2); | 201 sll(TMP1, TMP1, 2); |
202 addu(result, result, scratch); | 202 addu(result, result, TMP1); |
203 lw(result, Address(result)); | 203 lw(result, Address(result)); |
204 } | 204 } |
205 | 205 |
206 | 206 |
207 void Assembler::EnterStubFrame() { | 207 void Assembler::EnterStubFrame() { |
208 addiu(SP, SP, Immediate(-3 * kWordSize)); | 208 addiu(SP, SP, Immediate(-3 * kWordSize)); |
209 sw(ZR, Address(SP, 2 * kWordSize)); // PC marker is 0 in stubs. | 209 sw(ZR, Address(SP, 2 * kWordSize)); // PC marker is 0 in stubs. |
210 sw(RA, Address(SP, 1 * kWordSize)); | 210 sw(RA, Address(SP, 1 * kWordSize)); |
211 sw(FP, Address(SP, 0 * kWordSize)); | 211 sw(FP, Address(SP, 0 * kWordSize)); |
212 mov(FP, SP); | 212 mov(FP, SP); |
(...skipping 67 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
280 // Adjust SP for PC pushed in EnterDartFrame. | 280 // Adjust SP for PC pushed in EnterDartFrame. |
281 addiu(SP, SP, Immediate(4 * kWordSize)); | 281 addiu(SP, SP, Immediate(4 * kWordSize)); |
282 } | 282 } |
283 | 283 |
284 | 284 |
285 void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) { | 285 void Assembler::ReserveAlignedFrameSpace(intptr_t frame_space) { |
286 // Reserve space for arguments and align frame before entering | 286 // Reserve space for arguments and align frame before entering |
287 // the C++ world. | 287 // the C++ world. |
288 addiu(SP, SP, Immediate(-frame_space)); | 288 addiu(SP, SP, Immediate(-frame_space)); |
289 if (OS::ActivationFrameAlignment() > 0) { | 289 if (OS::ActivationFrameAlignment() > 0) { |
290 LoadImmediate(TMP, ~(OS::ActivationFrameAlignment() - 1)); | 290 LoadImmediate(TMP1, ~(OS::ActivationFrameAlignment() - 1)); |
291 and_(SP, SP, TMP); | 291 and_(SP, SP, TMP1); |
292 } | 292 } |
293 } | 293 } |
294 | 294 |
295 | 295 |
296 int32_t Assembler::AddExternalLabel(const ExternalLabel* label) { | 296 int32_t Assembler::AddExternalLabel(const ExternalLabel* label) { |
297 if (object_pool_.IsNull()) { | 297 if (object_pool_.IsNull()) { |
298 // The object pool cannot be used in the vm isolate. | 298 // The object pool cannot be used in the vm isolate. |
299 ASSERT(Isolate::Current() != Dart::vm_isolate()); | 299 ASSERT(Isolate::Current() != Dart::vm_isolate()); |
300 object_pool_ = GrowableObjectArray::New(Heap::kOld); | 300 object_pool_ = GrowableObjectArray::New(Heap::kOld); |
301 } | 301 } |
(...skipping 16 matching lines...) Expand all Loading... |
318 b(&stop); | 318 b(&stop); |
319 Emit(reinterpret_cast<int32_t>(message)); | 319 Emit(reinterpret_cast<int32_t>(message)); |
320 Bind(&stop); | 320 Bind(&stop); |
321 break_(Instr::kStopMessageCode); | 321 break_(Instr::kStopMessageCode); |
322 } | 322 } |
323 | 323 |
324 } // namespace dart | 324 } // namespace dart |
325 | 325 |
326 #endif // defined TARGET_ARCH_MIPS | 326 #endif // defined TARGET_ARCH_MIPS |
327 | 327 |
OLD | NEW |