OLD | NEW |
1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2012 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #include <limits.h> // For LONG_MIN, LONG_MAX. | 5 #include <limits.h> // For LONG_MIN, LONG_MAX. |
6 | 6 |
7 #if V8_TARGET_ARCH_MIPS64 | 7 #if V8_TARGET_ARCH_MIPS64 |
8 | 8 |
9 #include "src/base/division-by-constant.h" | 9 #include "src/base/division-by-constant.h" |
10 #include "src/bootstrapper.h" | 10 #include "src/bootstrapper.h" |
(...skipping 29 matching lines...) Expand all Loading... |
40 code_object_ = | 40 code_object_ = |
41 Handle<Object>::New(isolate_->heap()->undefined_value(), isolate_); | 41 Handle<Object>::New(isolate_->heap()->undefined_value(), isolate_); |
42 } | 42 } |
43 } | 43 } |
44 | 44 |
45 void MacroAssembler::Load(Register dst, | 45 void MacroAssembler::Load(Register dst, |
46 const MemOperand& src, | 46 const MemOperand& src, |
47 Representation r) { | 47 Representation r) { |
48 DCHECK(!r.IsDouble()); | 48 DCHECK(!r.IsDouble()); |
49 if (r.IsInteger8()) { | 49 if (r.IsInteger8()) { |
50 lb(dst, src); | 50 Lb(dst, src); |
51 } else if (r.IsUInteger8()) { | 51 } else if (r.IsUInteger8()) { |
52 lbu(dst, src); | 52 Lbu(dst, src); |
53 } else if (r.IsInteger16()) { | 53 } else if (r.IsInteger16()) { |
54 lh(dst, src); | 54 Lh(dst, src); |
55 } else if (r.IsUInteger16()) { | 55 } else if (r.IsUInteger16()) { |
56 lhu(dst, src); | 56 Lhu(dst, src); |
57 } else if (r.IsInteger32()) { | 57 } else if (r.IsInteger32()) { |
58 lw(dst, src); | 58 Lw(dst, src); |
59 } else { | 59 } else { |
60 ld(dst, src); | 60 Ld(dst, src); |
61 } | 61 } |
62 } | 62 } |
63 | 63 |
64 | 64 |
65 void MacroAssembler::Store(Register src, | 65 void MacroAssembler::Store(Register src, |
66 const MemOperand& dst, | 66 const MemOperand& dst, |
67 Representation r) { | 67 Representation r) { |
68 DCHECK(!r.IsDouble()); | 68 DCHECK(!r.IsDouble()); |
69 if (r.IsInteger8() || r.IsUInteger8()) { | 69 if (r.IsInteger8() || r.IsUInteger8()) { |
70 sb(src, dst); | 70 Sb(src, dst); |
71 } else if (r.IsInteger16() || r.IsUInteger16()) { | 71 } else if (r.IsInteger16() || r.IsUInteger16()) { |
72 sh(src, dst); | 72 Sh(src, dst); |
73 } else if (r.IsInteger32()) { | 73 } else if (r.IsInteger32()) { |
74 sw(src, dst); | 74 Sw(src, dst); |
75 } else { | 75 } else { |
76 if (r.IsHeapObject()) { | 76 if (r.IsHeapObject()) { |
77 AssertNotSmi(src); | 77 AssertNotSmi(src); |
78 } else if (r.IsSmi()) { | 78 } else if (r.IsSmi()) { |
79 AssertSmi(src); | 79 AssertSmi(src); |
80 } | 80 } |
81 sd(src, dst); | 81 Sd(src, dst); |
82 } | 82 } |
83 } | 83 } |
84 | 84 |
85 | 85 |
86 void MacroAssembler::LoadRoot(Register destination, | 86 void MacroAssembler::LoadRoot(Register destination, |
87 Heap::RootListIndex index) { | 87 Heap::RootListIndex index) { |
88 ld(destination, MemOperand(s6, index << kPointerSizeLog2)); | 88 Ld(destination, MemOperand(s6, index << kPointerSizeLog2)); |
89 } | 89 } |
90 | 90 |
91 | 91 |
92 void MacroAssembler::LoadRoot(Register destination, | 92 void MacroAssembler::LoadRoot(Register destination, |
93 Heap::RootListIndex index, | 93 Heap::RootListIndex index, |
94 Condition cond, | 94 Condition cond, |
95 Register src1, const Operand& src2) { | 95 Register src1, const Operand& src2) { |
96 Branch(2, NegateCondition(cond), src1, src2); | 96 Branch(2, NegateCondition(cond), src1, src2); |
97 ld(destination, MemOperand(s6, index << kPointerSizeLog2)); | 97 Ld(destination, MemOperand(s6, index << kPointerSizeLog2)); |
98 } | 98 } |
99 | 99 |
100 | 100 |
101 void MacroAssembler::StoreRoot(Register source, | 101 void MacroAssembler::StoreRoot(Register source, |
102 Heap::RootListIndex index) { | 102 Heap::RootListIndex index) { |
103 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index)); | 103 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index)); |
104 sd(source, MemOperand(s6, index << kPointerSizeLog2)); | 104 Sd(source, MemOperand(s6, index << kPointerSizeLog2)); |
105 } | 105 } |
106 | 106 |
107 | 107 |
108 void MacroAssembler::StoreRoot(Register source, | 108 void MacroAssembler::StoreRoot(Register source, |
109 Heap::RootListIndex index, | 109 Heap::RootListIndex index, |
110 Condition cond, | 110 Condition cond, |
111 Register src1, const Operand& src2) { | 111 Register src1, const Operand& src2) { |
112 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index)); | 112 DCHECK(Heap::RootCanBeWrittenAfterInitialization(index)); |
113 Branch(2, NegateCondition(cond), src1, src2); | 113 Branch(2, NegateCondition(cond), src1, src2); |
114 sd(source, MemOperand(s6, index << kPointerSizeLog2)); | 114 Sd(source, MemOperand(s6, index << kPointerSizeLog2)); |
115 } | 115 } |
116 | 116 |
117 void MacroAssembler::PushCommonFrame(Register marker_reg) { | 117 void MacroAssembler::PushCommonFrame(Register marker_reg) { |
118 if (marker_reg.is_valid()) { | 118 if (marker_reg.is_valid()) { |
119 Push(ra, fp, marker_reg); | 119 Push(ra, fp, marker_reg); |
120 Daddu(fp, sp, Operand(kPointerSize)); | 120 Daddu(fp, sp, Operand(kPointerSize)); |
121 } else { | 121 } else { |
122 Push(ra, fp); | 122 Push(ra, fp); |
123 mov(fp, sp); | 123 mov(fp, sp); |
124 } | 124 } |
(...skipping 34 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
159 void MacroAssembler::PopSafepointRegisters() { | 159 void MacroAssembler::PopSafepointRegisters() { |
160 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; | 160 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; |
161 MultiPop(kSafepointSavedRegisters); | 161 MultiPop(kSafepointSavedRegisters); |
162 if (num_unsaved > 0) { | 162 if (num_unsaved > 0) { |
163 Daddu(sp, sp, Operand(num_unsaved * kPointerSize)); | 163 Daddu(sp, sp, Operand(num_unsaved * kPointerSize)); |
164 } | 164 } |
165 } | 165 } |
166 | 166 |
167 | 167 |
168 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { | 168 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { |
169 sd(src, SafepointRegisterSlot(dst)); | 169 Sd(src, SafepointRegisterSlot(dst)); |
170 } | 170 } |
171 | 171 |
172 | 172 |
173 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { | 173 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { |
174 ld(dst, SafepointRegisterSlot(src)); | 174 Ld(dst, SafepointRegisterSlot(src)); |
175 } | 175 } |
176 | 176 |
177 | 177 |
178 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { | 178 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { |
179 // The registers are pushed starting with the highest encoding, | 179 // The registers are pushed starting with the highest encoding, |
180 // which means that lowest encodings are closest to the stack pointer. | 180 // which means that lowest encodings are closest to the stack pointer. |
181 return kSafepointRegisterStackIndexMap[reg_code]; | 181 return kSafepointRegisterStackIndexMap[reg_code]; |
182 } | 182 } |
183 | 183 |
184 | 184 |
185 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { | 185 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) { |
186 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); | 186 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize); |
187 } | 187 } |
188 | 188 |
189 | 189 |
190 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { | 190 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { |
191 UNIMPLEMENTED_MIPS(); | 191 UNIMPLEMENTED_MIPS(); |
192 // General purpose registers are pushed last on the stack. | 192 // General purpose registers are pushed last on the stack. |
193 int doubles_size = DoubleRegister::kMaxNumRegisters * kDoubleSize; | 193 int doubles_size = DoubleRegister::kMaxNumRegisters * kDoubleSize; |
194 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; | 194 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; |
195 return MemOperand(sp, doubles_size + register_offset); | 195 return MemOperand(sp, doubles_size + register_offset); |
196 } | 196 } |
197 | 197 |
| 198 // Helper for base-reg + offset, when offset is larger than int16. |
| 199 void MacroAssembler::LoadRegPlusOffsetToAt(const MemOperand& src) { |
| 200 DCHECK(!src.rm().is(at)); |
| 201 DCHECK(is_int32(src.offset())); |
| 202 |
| 203 if (kArchVariant == kMips64r6) { |
| 204 int32_t hi = (src.offset() >> kLuiShift) & kImm16Mask; |
| 205 if (src.offset() & kNegOffset) { |
| 206 if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) { |
| 207 lui(at, (src.offset() >> kLuiShift) & kImm16Mask); |
| 208 ori(at, at, src.offset() & kImm16Mask); // Load 32-bit offset. |
| 209 daddu(at, at, src.rm()); // Add base register. |
| 210 return; |
| 211 } |
| 212 |
| 213 hi += 1; |
| 214 } |
| 215 |
| 216 daui(at, src.rm(), hi); |
| 217 daddiu(at, at, src.offset() & kImm16Mask); |
| 218 } else { |
| 219 lui(at, (src.offset() >> kLuiShift) & kImm16Mask); |
| 220 ori(at, at, src.offset() & kImm16Mask); // Load 32-bit offset. |
| 221 daddu(at, at, src.rm()); // Add base register. |
| 222 } |
| 223 } |
| 224 |
| 225 // Helper for base-reg + upper part of offset, when offset is larger than int16. |
| 226 // Loads higher part of the offset to AT register. |
| 227 // Returns lower part of the offset to be used as offset |
| 228 // in Load/Store instructions |
| 229 int32_t MacroAssembler::LoadRegPlusUpperOffsetPartToAt(const MemOperand& src) { |
| 230 DCHECK(!src.rm().is(at)); |
| 231 DCHECK(is_int32(src.offset())); |
| 232 int32_t hi = (src.offset() >> kLuiShift) & kImm16Mask; |
| 233 // If the highest bit of the lower part of the offset is 1, this would make |
| 234 // the offset in the load/store instruction negative. We need to compensate |
| 235 // for this by adding 1 to the upper part of the offset. |
| 236 if (src.offset() & kNegOffset) { |
| 237 if ((hi & kNegOffset) != ((hi + 1) & kNegOffset)) { |
| 238 LoadRegPlusOffsetToAt(src); |
| 239 return 0; |
| 240 } |
| 241 |
| 242 hi += 1; |
| 243 } |
| 244 |
| 245 if (kArchVariant == kMips64r6) { |
| 246 daui(at, src.rm(), hi); |
| 247 } else { |
| 248 lui(at, hi); |
| 249 daddu(at, at, src.rm()); |
| 250 } |
| 251 return (src.offset() & kImm16Mask); |
| 252 } |
198 | 253 |
199 void MacroAssembler::InNewSpace(Register object, | 254 void MacroAssembler::InNewSpace(Register object, |
200 Register scratch, | 255 Register scratch, |
201 Condition cc, | 256 Condition cc, |
202 Label* branch) { | 257 Label* branch) { |
203 DCHECK(cc == eq || cc == ne); | 258 DCHECK(cc == eq || cc == ne); |
204 CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc, branch); | 259 CheckPageFlag(object, scratch, MemoryChunk::kIsInNewSpaceMask, cc, branch); |
205 } | 260 } |
206 | 261 |
207 | 262 |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
262 | 317 |
263 | 318 |
264 // Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved) | 319 // Clobbers object, dst, map, and ra, if (ra_status == kRAHasBeenSaved) |
265 void MacroAssembler::RecordWriteForMap(Register object, | 320 void MacroAssembler::RecordWriteForMap(Register object, |
266 Register map, | 321 Register map, |
267 Register dst, | 322 Register dst, |
268 RAStatus ra_status, | 323 RAStatus ra_status, |
269 SaveFPRegsMode fp_mode) { | 324 SaveFPRegsMode fp_mode) { |
270 if (emit_debug_code()) { | 325 if (emit_debug_code()) { |
271 DCHECK(!dst.is(at)); | 326 DCHECK(!dst.is(at)); |
272 ld(dst, FieldMemOperand(map, HeapObject::kMapOffset)); | 327 Ld(dst, FieldMemOperand(map, HeapObject::kMapOffset)); |
273 Check(eq, | 328 Check(eq, |
274 kWrongAddressOrValuePassedToRecordWrite, | 329 kWrongAddressOrValuePassedToRecordWrite, |
275 dst, | 330 dst, |
276 Operand(isolate()->factory()->meta_map())); | 331 Operand(isolate()->factory()->meta_map())); |
277 } | 332 } |
278 | 333 |
279 if (!FLAG_incremental_marking) { | 334 if (!FLAG_incremental_marking) { |
280 return; | 335 return; |
281 } | 336 } |
282 | 337 |
283 if (emit_debug_code()) { | 338 if (emit_debug_code()) { |
284 ld(at, FieldMemOperand(object, HeapObject::kMapOffset)); | 339 Ld(at, FieldMemOperand(object, HeapObject::kMapOffset)); |
285 Check(eq, | 340 Check(eq, |
286 kWrongAddressOrValuePassedToRecordWrite, | 341 kWrongAddressOrValuePassedToRecordWrite, |
287 map, | 342 map, |
288 Operand(at)); | 343 Operand(at)); |
289 } | 344 } |
290 | 345 |
291 Label done; | 346 Label done; |
292 | 347 |
293 // A single check of the map's pages interesting flag suffices, since it is | 348 // A single check of the map's pages interesting flag suffices, since it is |
294 // only set during incremental collection, and then it's also guaranteed that | 349 // only set during incremental collection, and then it's also guaranteed that |
(...skipping 49 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
344 Register value, | 399 Register value, |
345 RAStatus ra_status, | 400 RAStatus ra_status, |
346 SaveFPRegsMode fp_mode, | 401 SaveFPRegsMode fp_mode, |
347 RememberedSetAction remembered_set_action, | 402 RememberedSetAction remembered_set_action, |
348 SmiCheck smi_check, | 403 SmiCheck smi_check, |
349 PointersToHereCheck pointers_to_here_check_for_value) { | 404 PointersToHereCheck pointers_to_here_check_for_value) { |
350 DCHECK(!AreAliased(object, address, value, t8)); | 405 DCHECK(!AreAliased(object, address, value, t8)); |
351 DCHECK(!AreAliased(object, address, value, t9)); | 406 DCHECK(!AreAliased(object, address, value, t9)); |
352 | 407 |
353 if (emit_debug_code()) { | 408 if (emit_debug_code()) { |
354 ld(at, MemOperand(address)); | 409 Ld(at, MemOperand(address)); |
355 Assert( | 410 Assert( |
356 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value)); | 411 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value)); |
357 } | 412 } |
358 | 413 |
359 if (remembered_set_action == OMIT_REMEMBERED_SET && | 414 if (remembered_set_action == OMIT_REMEMBERED_SET && |
360 !FLAG_incremental_marking) { | 415 !FLAG_incremental_marking) { |
361 return; | 416 return; |
362 } | 417 } |
363 | 418 |
364 // First, check if a write barrier is even needed. The tests below | 419 // First, check if a write barrier is even needed. The tests below |
(...skipping 54 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
419 // do. | 474 // do. |
420 if (!FLAG_incremental_marking) return; | 475 if (!FLAG_incremental_marking) return; |
421 | 476 |
422 DCHECK(js_function.is(a1)); | 477 DCHECK(js_function.is(a1)); |
423 DCHECK(code_entry.is(a4)); | 478 DCHECK(code_entry.is(a4)); |
424 DCHECK(scratch.is(a5)); | 479 DCHECK(scratch.is(a5)); |
425 AssertNotSmi(js_function); | 480 AssertNotSmi(js_function); |
426 | 481 |
427 if (emit_debug_code()) { | 482 if (emit_debug_code()) { |
428 Daddu(scratch, js_function, Operand(offset - kHeapObjectTag)); | 483 Daddu(scratch, js_function, Operand(offset - kHeapObjectTag)); |
429 ld(at, MemOperand(scratch)); | 484 Ld(at, MemOperand(scratch)); |
430 Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at, | 485 Assert(eq, kWrongAddressOrValuePassedToRecordWrite, at, |
431 Operand(code_entry)); | 486 Operand(code_entry)); |
432 } | 487 } |
433 | 488 |
434 // First, check if a write barrier is even needed. The tests below | 489 // First, check if a write barrier is even needed. The tests below |
435 // catch stores of Smis and stores into young gen. | 490 // catch stores of Smis and stores into young gen. |
436 Label done; | 491 Label done; |
437 | 492 |
438 CheckPageFlag(code_entry, scratch, | 493 CheckPageFlag(code_entry, scratch, |
439 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done); | 494 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done); |
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
480 if (emit_debug_code()) { | 535 if (emit_debug_code()) { |
481 Label ok; | 536 Label ok; |
482 JumpIfNotInNewSpace(object, scratch, &ok); | 537 JumpIfNotInNewSpace(object, scratch, &ok); |
483 stop("Remembered set pointer is in new space"); | 538 stop("Remembered set pointer is in new space"); |
484 bind(&ok); | 539 bind(&ok); |
485 } | 540 } |
486 // Load store buffer top. | 541 // Load store buffer top. |
487 ExternalReference store_buffer = | 542 ExternalReference store_buffer = |
488 ExternalReference::store_buffer_top(isolate()); | 543 ExternalReference::store_buffer_top(isolate()); |
489 li(t8, Operand(store_buffer)); | 544 li(t8, Operand(store_buffer)); |
490 ld(scratch, MemOperand(t8)); | 545 Ld(scratch, MemOperand(t8)); |
491 // Store pointer to buffer and increment buffer top. | 546 // Store pointer to buffer and increment buffer top. |
492 sd(address, MemOperand(scratch)); | 547 Sd(address, MemOperand(scratch)); |
493 Daddu(scratch, scratch, kPointerSize); | 548 Daddu(scratch, scratch, kPointerSize); |
494 // Write back new top of buffer. | 549 // Write back new top of buffer. |
495 sd(scratch, MemOperand(t8)); | 550 Sd(scratch, MemOperand(t8)); |
496 // Call stub on end of buffer. | 551 // Call stub on end of buffer. |
497 // Check for end of buffer. | 552 // Check for end of buffer. |
498 And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask)); | 553 And(t8, scratch, Operand(StoreBuffer::kStoreBufferMask)); |
499 DCHECK(!scratch.is(t8)); | 554 DCHECK(!scratch.is(t8)); |
500 if (and_then == kFallThroughAtEnd) { | 555 if (and_then == kFallThroughAtEnd) { |
501 Branch(&done, ne, t8, Operand(zero_reg)); | 556 Branch(&done, ne, t8, Operand(zero_reg)); |
502 } else { | 557 } else { |
503 DCHECK(and_then == kReturnAtEnd); | 558 DCHECK(and_then == kReturnAtEnd); |
504 Ret(ne, t8, Operand(zero_reg)); | 559 Ret(ne, t8, Operand(zero_reg)); |
505 } | 560 } |
(...skipping 736 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1242 dsrl32(src, src, 0); | 1297 dsrl32(src, src, 0); |
1243 dsbh(dest, src); | 1298 dsbh(dest, src); |
1244 dshd(dest, dest); | 1299 dshd(dest, dest); |
1245 } | 1300 } |
1246 } | 1301 } |
1247 | 1302 |
1248 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { | 1303 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { |
1249 DCHECK(!rd.is(at)); | 1304 DCHECK(!rd.is(at)); |
1250 DCHECK(!rs.rm().is(at)); | 1305 DCHECK(!rs.rm().is(at)); |
1251 if (kArchVariant == kMips64r6) { | 1306 if (kArchVariant == kMips64r6) { |
1252 lw(rd, rs); | 1307 Lw(rd, rs); |
1253 } else { | 1308 } else { |
1254 DCHECK(kArchVariant == kMips64r2); | 1309 DCHECK(kArchVariant == kMips64r2); |
1255 if (is_int16(rs.offset() + kMipsLwrOffset) && | 1310 if (is_int16(rs.offset() + kMipsLwrOffset) && |
1256 is_int16(rs.offset() + kMipsLwlOffset)) { | 1311 is_int16(rs.offset() + kMipsLwlOffset)) { |
1257 if (!rd.is(rs.rm())) { | 1312 if (!rd.is(rs.rm())) { |
1258 lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset)); | 1313 lwr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset)); |
1259 lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset)); | 1314 lwl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset)); |
1260 } else { | 1315 } else { |
1261 lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset)); | 1316 lwr(at, MemOperand(rs.rm(), rs.offset() + kMipsLwrOffset)); |
1262 lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset)); | 1317 lwl(at, MemOperand(rs.rm(), rs.offset() + kMipsLwlOffset)); |
1263 mov(rd, at); | 1318 mov(rd, at); |
1264 } | 1319 } |
1265 } else { // Offset > 16 bits, use multiple instructions to load. | 1320 } else { // Offset > 16 bits, use multiple instructions to load. |
1266 LoadRegPlusOffsetToAt(rs); | 1321 LoadRegPlusOffsetToAt(rs); |
1267 lwr(rd, MemOperand(at, kMipsLwrOffset)); | 1322 lwr(rd, MemOperand(at, kMipsLwrOffset)); |
1268 lwl(rd, MemOperand(at, kMipsLwlOffset)); | 1323 lwl(rd, MemOperand(at, kMipsLwlOffset)); |
1269 } | 1324 } |
1270 } | 1325 } |
1271 } | 1326 } |
1272 | 1327 |
1273 void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) { | 1328 void MacroAssembler::Ulwu(Register rd, const MemOperand& rs) { |
1274 if (kArchVariant == kMips64r6) { | 1329 if (kArchVariant == kMips64r6) { |
1275 lwu(rd, rs); | 1330 Lwu(rd, rs); |
1276 } else { | 1331 } else { |
1277 DCHECK(kArchVariant == kMips64r2); | 1332 DCHECK(kArchVariant == kMips64r2); |
1278 Ulw(rd, rs); | 1333 Ulw(rd, rs); |
1279 Dext(rd, rd, 0, 32); | 1334 Dext(rd, rd, 0, 32); |
1280 } | 1335 } |
1281 } | 1336 } |
1282 | 1337 |
1283 | 1338 |
1284 void MacroAssembler::Usw(Register rd, const MemOperand& rs) { | 1339 void MacroAssembler::Usw(Register rd, const MemOperand& rs) { |
1285 DCHECK(!rd.is(at)); | 1340 DCHECK(!rd.is(at)); |
1286 DCHECK(!rs.rm().is(at)); | 1341 DCHECK(!rs.rm().is(at)); |
1287 if (kArchVariant == kMips64r6) { | 1342 if (kArchVariant == kMips64r6) { |
1288 sw(rd, rs); | 1343 Sw(rd, rs); |
1289 } else { | 1344 } else { |
1290 DCHECK(kArchVariant == kMips64r2); | 1345 DCHECK(kArchVariant == kMips64r2); |
1291 if (is_int16(rs.offset() + kMipsSwrOffset) && | 1346 if (is_int16(rs.offset() + kMipsSwrOffset) && |
1292 is_int16(rs.offset() + kMipsSwlOffset)) { | 1347 is_int16(rs.offset() + kMipsSwlOffset)) { |
1293 swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset)); | 1348 swr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwrOffset)); |
1294 swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset)); | 1349 swl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSwlOffset)); |
1295 } else { | 1350 } else { |
1296 LoadRegPlusOffsetToAt(rs); | 1351 LoadRegPlusOffsetToAt(rs); |
1297 swr(rd, MemOperand(at, kMipsSwrOffset)); | 1352 swr(rd, MemOperand(at, kMipsSwrOffset)); |
1298 swl(rd, MemOperand(at, kMipsSwlOffset)); | 1353 swl(rd, MemOperand(at, kMipsSwlOffset)); |
1299 } | 1354 } |
1300 } | 1355 } |
1301 } | 1356 } |
1302 | 1357 |
1303 void MacroAssembler::Ulh(Register rd, const MemOperand& rs) { | 1358 void MacroAssembler::Ulh(Register rd, const MemOperand& rs) { |
1304 DCHECK(!rd.is(at)); | 1359 DCHECK(!rd.is(at)); |
1305 DCHECK(!rs.rm().is(at)); | 1360 DCHECK(!rs.rm().is(at)); |
1306 if (kArchVariant == kMips64r6) { | 1361 if (kArchVariant == kMips64r6) { |
1307 lh(rd, rs); | 1362 Lh(rd, rs); |
1308 } else { | 1363 } else { |
1309 DCHECK(kArchVariant == kMips64r2); | 1364 DCHECK(kArchVariant == kMips64r2); |
1310 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) { | 1365 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) { |
1311 #if defined(V8_TARGET_LITTLE_ENDIAN) | 1366 #if defined(V8_TARGET_LITTLE_ENDIAN) |
1312 lbu(at, rs); | 1367 Lbu(at, rs); |
1313 lb(rd, MemOperand(rs.rm(), rs.offset() + 1)); | 1368 Lb(rd, MemOperand(rs.rm(), rs.offset() + 1)); |
1314 #elif defined(V8_TARGET_BIG_ENDIAN) | 1369 #elif defined(V8_TARGET_BIG_ENDIAN) |
1315 lbu(at, MemOperand(rs.rm(), rs.offset() + 1)); | 1370 Lbu(at, MemOperand(rs.rm(), rs.offset() + 1)); |
1316 lb(rd, rs); | 1371 Lb(rd, rs); |
1317 #endif | 1372 #endif |
1318 } else { // Offset > 16 bits, use multiple instructions to load. | 1373 } else { // Offset > 16 bits, use multiple instructions to load. |
1319 LoadRegPlusOffsetToAt(rs); | 1374 LoadRegPlusOffsetToAt(rs); |
1320 #if defined(V8_TARGET_LITTLE_ENDIAN) | 1375 #if defined(V8_TARGET_LITTLE_ENDIAN) |
1321 lb(rd, MemOperand(at, 1)); | 1376 Lb(rd, MemOperand(at, 1)); |
1322 lbu(at, MemOperand(at, 0)); | 1377 Lbu(at, MemOperand(at, 0)); |
1323 #elif defined(V8_TARGET_BIG_ENDIAN) | 1378 #elif defined(V8_TARGET_BIG_ENDIAN) |
1324 lb(rd, MemOperand(at, 0)); | 1379 Lb(rd, MemOperand(at, 0)); |
1325 lbu(at, MemOperand(at, 1)); | 1380 Lbu(at, MemOperand(at, 1)); |
1326 #endif | 1381 #endif |
1327 } | 1382 } |
1328 dsll(rd, rd, 8); | 1383 dsll(rd, rd, 8); |
1329 or_(rd, rd, at); | 1384 or_(rd, rd, at); |
1330 } | 1385 } |
1331 } | 1386 } |
1332 | 1387 |
1333 void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) { | 1388 void MacroAssembler::Ulhu(Register rd, const MemOperand& rs) { |
1334 DCHECK(!rd.is(at)); | 1389 DCHECK(!rd.is(at)); |
1335 DCHECK(!rs.rm().is(at)); | 1390 DCHECK(!rs.rm().is(at)); |
1336 if (kArchVariant == kMips64r6) { | 1391 if (kArchVariant == kMips64r6) { |
1337 lhu(rd, rs); | 1392 Lhu(rd, rs); |
1338 } else { | 1393 } else { |
1339 DCHECK(kArchVariant == kMips64r2); | 1394 DCHECK(kArchVariant == kMips64r2); |
1340 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) { | 1395 if (is_int16(rs.offset()) && is_int16(rs.offset() + 1)) { |
1341 #if defined(V8_TARGET_LITTLE_ENDIAN) | 1396 #if defined(V8_TARGET_LITTLE_ENDIAN) |
1342 lbu(at, rs); | 1397 Lbu(at, rs); |
1343 lbu(rd, MemOperand(rs.rm(), rs.offset() + 1)); | 1398 Lbu(rd, MemOperand(rs.rm(), rs.offset() + 1)); |
1344 #elif defined(V8_TARGET_BIG_ENDIAN) | 1399 #elif defined(V8_TARGET_BIG_ENDIAN) |
1345 lbu(at, MemOperand(rs.rm(), rs.offset() + 1)); | 1400 Lbu(at, MemOperand(rs.rm(), rs.offset() + 1)); |
1346 lbu(rd, rs); | 1401 Lbu(rd, rs); |
1347 #endif | 1402 #endif |
1348 } else { // Offset > 16 bits, use multiple instructions to load. | 1403 } else { // Offset > 16 bits, use multiple instructions to load. |
1349 LoadRegPlusOffsetToAt(rs); | 1404 LoadRegPlusOffsetToAt(rs); |
1350 #if defined(V8_TARGET_LITTLE_ENDIAN) | 1405 #if defined(V8_TARGET_LITTLE_ENDIAN) |
1351 lbu(rd, MemOperand(at, 1)); | 1406 Lbu(rd, MemOperand(at, 1)); |
1352 lbu(at, MemOperand(at, 0)); | 1407 Lbu(at, MemOperand(at, 0)); |
1353 #elif defined(V8_TARGET_BIG_ENDIAN) | 1408 #elif defined(V8_TARGET_BIG_ENDIAN) |
1354 lbu(rd, MemOperand(at, 0)); | 1409 Lbu(rd, MemOperand(at, 0)); |
1355 lbu(at, MemOperand(at, 1)); | 1410 Lbu(at, MemOperand(at, 1)); |
1356 #endif | 1411 #endif |
1357 } | 1412 } |
1358 dsll(rd, rd, 8); | 1413 dsll(rd, rd, 8); |
1359 or_(rd, rd, at); | 1414 or_(rd, rd, at); |
1360 } | 1415 } |
1361 } | 1416 } |
1362 | 1417 |
1363 void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) { | 1418 void MacroAssembler::Ush(Register rd, const MemOperand& rs, Register scratch) { |
1364 DCHECK(!rd.is(at)); | 1419 DCHECK(!rd.is(at)); |
1365 DCHECK(!rs.rm().is(at)); | 1420 DCHECK(!rs.rm().is(at)); |
1366 DCHECK(!rs.rm().is(scratch)); | 1421 DCHECK(!rs.rm().is(scratch)); |
1367 DCHECK(!scratch.is(at)); | 1422 DCHECK(!scratch.is(at)); |
1368 if (kArchVariant == kMips64r6) { | 1423 if (kArchVariant == kMips64r6) { |
1369 sh(rd, rs); | 1424 Sh(rd, rs); |
1370 } else { | 1425 } else { |
1371 DCHECK(kArchVariant == kMips64r2); | 1426 DCHECK(kArchVariant == kMips64r2); |
1372 MemOperand source = rs; | 1427 MemOperand source = rs; |
1373 // If offset > 16 bits, load address to at with offset 0. | 1428 // If offset > 16 bits, load address to at with offset 0. |
1374 if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) { | 1429 if (!is_int16(rs.offset()) || !is_int16(rs.offset() + 1)) { |
1375 LoadRegPlusOffsetToAt(rs); | 1430 LoadRegPlusOffsetToAt(rs); |
1376 source = MemOperand(at, 0); | 1431 source = MemOperand(at, 0); |
1377 } | 1432 } |
1378 | 1433 |
1379 if (!scratch.is(rd)) { | 1434 if (!scratch.is(rd)) { |
1380 mov(scratch, rd); | 1435 mov(scratch, rd); |
1381 } | 1436 } |
1382 | 1437 |
1383 #if defined(V8_TARGET_LITTLE_ENDIAN) | 1438 #if defined(V8_TARGET_LITTLE_ENDIAN) |
1384 sb(scratch, source); | 1439 Sb(scratch, source); |
1385 srl(scratch, scratch, 8); | 1440 srl(scratch, scratch, 8); |
1386 sb(scratch, MemOperand(source.rm(), source.offset() + 1)); | 1441 Sb(scratch, MemOperand(source.rm(), source.offset() + 1)); |
1387 #elif defined(V8_TARGET_BIG_ENDIAN) | 1442 #elif defined(V8_TARGET_BIG_ENDIAN) |
1388 sb(scratch, MemOperand(source.rm(), source.offset() + 1)); | 1443 Sb(scratch, MemOperand(source.rm(), source.offset() + 1)); |
1389 srl(scratch, scratch, 8); | 1444 srl(scratch, scratch, 8); |
1390 sb(scratch, source); | 1445 Sb(scratch, source); |
1391 #endif | 1446 #endif |
1392 } | 1447 } |
1393 } | 1448 } |
1394 | 1449 |
1395 void MacroAssembler::Uld(Register rd, const MemOperand& rs) { | 1450 void MacroAssembler::Uld(Register rd, const MemOperand& rs) { |
1396 DCHECK(!rd.is(at)); | 1451 DCHECK(!rd.is(at)); |
1397 DCHECK(!rs.rm().is(at)); | 1452 DCHECK(!rs.rm().is(at)); |
1398 if (kArchVariant == kMips64r6) { | 1453 if (kArchVariant == kMips64r6) { |
1399 ld(rd, rs); | 1454 Ld(rd, rs); |
1400 } else { | 1455 } else { |
1401 DCHECK(kArchVariant == kMips64r2); | 1456 DCHECK(kArchVariant == kMips64r2); |
1402 if (is_int16(rs.offset() + kMipsLdrOffset) && | 1457 if (is_int16(rs.offset() + kMipsLdrOffset) && |
1403 is_int16(rs.offset() + kMipsLdlOffset)) { | 1458 is_int16(rs.offset() + kMipsLdlOffset)) { |
1404 if (!rd.is(rs.rm())) { | 1459 if (!rd.is(rs.rm())) { |
1405 ldr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset)); | 1460 ldr(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset)); |
1406 ldl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset)); | 1461 ldl(rd, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset)); |
1407 } else { | 1462 } else { |
1408 ldr(at, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset)); | 1463 ldr(at, MemOperand(rs.rm(), rs.offset() + kMipsLdrOffset)); |
1409 ldl(at, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset)); | 1464 ldl(at, MemOperand(rs.rm(), rs.offset() + kMipsLdlOffset)); |
1410 mov(rd, at); | 1465 mov(rd, at); |
1411 } | 1466 } |
1412 } else { // Offset > 16 bits, use multiple instructions to load. | 1467 } else { // Offset > 16 bits, use multiple instructions to load. |
1413 LoadRegPlusOffsetToAt(rs); | 1468 LoadRegPlusOffsetToAt(rs); |
1414 ldr(rd, MemOperand(at, kMipsLdrOffset)); | 1469 ldr(rd, MemOperand(at, kMipsLdrOffset)); |
1415 ldl(rd, MemOperand(at, kMipsLdlOffset)); | 1470 ldl(rd, MemOperand(at, kMipsLdlOffset)); |
1416 } | 1471 } |
1417 } | 1472 } |
1418 } | 1473 } |
1419 | 1474 |
1420 | 1475 |
1421 // Load consequent 32-bit word pair in 64-bit reg. and put first word in low | 1476 // Load consequent 32-bit word pair in 64-bit reg. and put first word in low |
1422 // bits, | 1477 // bits, |
1423 // second word in high bits. | 1478 // second word in high bits. |
1424 void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs, | 1479 void MacroAssembler::LoadWordPair(Register rd, const MemOperand& rs, |
1425 Register scratch) { | 1480 Register scratch) { |
1426 lwu(rd, rs); | 1481 Lwu(rd, rs); |
1427 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2)); | 1482 Lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2)); |
1428 dsll32(scratch, scratch, 0); | 1483 dsll32(scratch, scratch, 0); |
1429 Daddu(rd, rd, scratch); | 1484 Daddu(rd, rd, scratch); |
1430 } | 1485 } |
1431 | 1486 |
1432 void MacroAssembler::Usd(Register rd, const MemOperand& rs) { | 1487 void MacroAssembler::Usd(Register rd, const MemOperand& rs) { |
1433 DCHECK(!rd.is(at)); | 1488 DCHECK(!rd.is(at)); |
1434 DCHECK(!rs.rm().is(at)); | 1489 DCHECK(!rs.rm().is(at)); |
1435 if (kArchVariant == kMips64r6) { | 1490 if (kArchVariant == kMips64r6) { |
1436 sd(rd, rs); | 1491 Sd(rd, rs); |
1437 } else { | 1492 } else { |
1438 DCHECK(kArchVariant == kMips64r2); | 1493 DCHECK(kArchVariant == kMips64r2); |
1439 if (is_int16(rs.offset() + kMipsSdrOffset) && | 1494 if (is_int16(rs.offset() + kMipsSdrOffset) && |
1440 is_int16(rs.offset() + kMipsSdlOffset)) { | 1495 is_int16(rs.offset() + kMipsSdlOffset)) { |
1441 sdr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdrOffset)); | 1496 sdr(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdrOffset)); |
1442 sdl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdlOffset)); | 1497 sdl(rd, MemOperand(rs.rm(), rs.offset() + kMipsSdlOffset)); |
1443 } else { | 1498 } else { |
1444 LoadRegPlusOffsetToAt(rs); | 1499 LoadRegPlusOffsetToAt(rs); |
1445 sdr(rd, MemOperand(at, kMipsSdrOffset)); | 1500 sdr(rd, MemOperand(at, kMipsSdrOffset)); |
1446 sdl(rd, MemOperand(at, kMipsSdlOffset)); | 1501 sdl(rd, MemOperand(at, kMipsSdlOffset)); |
1447 } | 1502 } |
1448 } | 1503 } |
1449 } | 1504 } |
1450 | 1505 |
1451 | 1506 |
1452 // Do 64-bit store as two consequent 32-bit stores to unaligned address. | 1507 // Do 64-bit store as two consequent 32-bit stores to unaligned address. |
1453 void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs, | 1508 void MacroAssembler::StoreWordPair(Register rd, const MemOperand& rs, |
1454 Register scratch) { | 1509 Register scratch) { |
1455 sw(rd, rs); | 1510 Sw(rd, rs); |
1456 dsrl32(scratch, rd, 0); | 1511 dsrl32(scratch, rd, 0); |
1457 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2)); | 1512 Sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2)); |
1458 } | 1513 } |
1459 | 1514 |
1460 void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs, | 1515 void MacroAssembler::Ulwc1(FPURegister fd, const MemOperand& rs, |
1461 Register scratch) { | 1516 Register scratch) { |
1462 if (kArchVariant == kMips64r6) { | 1517 if (kArchVariant == kMips64r6) { |
1463 lwc1(fd, rs); | 1518 Lwc1(fd, rs); |
1464 } else { | 1519 } else { |
1465 DCHECK(kArchVariant == kMips64r2); | 1520 DCHECK(kArchVariant == kMips64r2); |
1466 Ulw(scratch, rs); | 1521 Ulw(scratch, rs); |
1467 mtc1(scratch, fd); | 1522 mtc1(scratch, fd); |
1468 } | 1523 } |
1469 } | 1524 } |
1470 | 1525 |
1471 void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs, | 1526 void MacroAssembler::Uswc1(FPURegister fd, const MemOperand& rs, |
1472 Register scratch) { | 1527 Register scratch) { |
1473 if (kArchVariant == kMips64r6) { | 1528 if (kArchVariant == kMips64r6) { |
1474 swc1(fd, rs); | 1529 Swc1(fd, rs); |
1475 } else { | 1530 } else { |
1476 DCHECK(kArchVariant == kMips64r2); | 1531 DCHECK(kArchVariant == kMips64r2); |
1477 mfc1(scratch, fd); | 1532 mfc1(scratch, fd); |
1478 Usw(scratch, rs); | 1533 Usw(scratch, rs); |
1479 } | 1534 } |
1480 } | 1535 } |
1481 | 1536 |
1482 void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs, | 1537 void MacroAssembler::Uldc1(FPURegister fd, const MemOperand& rs, |
1483 Register scratch) { | 1538 Register scratch) { |
1484 DCHECK(!scratch.is(at)); | 1539 DCHECK(!scratch.is(at)); |
1485 if (kArchVariant == kMips64r6) { | 1540 if (kArchVariant == kMips64r6) { |
1486 ldc1(fd, rs); | 1541 Ldc1(fd, rs); |
1487 } else { | 1542 } else { |
1488 DCHECK(kArchVariant == kMips64r2); | 1543 DCHECK(kArchVariant == kMips64r2); |
1489 Uld(scratch, rs); | 1544 Uld(scratch, rs); |
1490 dmtc1(scratch, fd); | 1545 dmtc1(scratch, fd); |
1491 } | 1546 } |
1492 } | 1547 } |
1493 | 1548 |
1494 void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs, | 1549 void MacroAssembler::Usdc1(FPURegister fd, const MemOperand& rs, |
1495 Register scratch) { | 1550 Register scratch) { |
1496 DCHECK(!scratch.is(at)); | 1551 DCHECK(!scratch.is(at)); |
1497 if (kArchVariant == kMips64r6) { | 1552 if (kArchVariant == kMips64r6) { |
1498 sdc1(fd, rs); | 1553 Sdc1(fd, rs); |
1499 } else { | 1554 } else { |
1500 DCHECK(kArchVariant == kMips64r2); | 1555 DCHECK(kArchVariant == kMips64r2); |
1501 dmfc1(scratch, fd); | 1556 dmfc1(scratch, fd); |
1502 Usd(scratch, rs); | 1557 Usd(scratch, rs); |
1503 } | 1558 } |
1504 } | 1559 } |
1505 | 1560 |
| 1561 void MacroAssembler::Lb(Register rd, const MemOperand& rs) { |
| 1562 if (is_int16(rs.offset())) { |
| 1563 lb(rd, rs); |
| 1564 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1565 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
| 1566 lb(rd, MemOperand(at, off16)); |
| 1567 } |
| 1568 } |
| 1569 |
| 1570 void MacroAssembler::Lbu(Register rd, const MemOperand& rs) { |
| 1571 if (is_int16(rs.offset())) { |
| 1572 lbu(rd, rs); |
| 1573 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1574 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
| 1575 lbu(rd, MemOperand(at, off16)); |
| 1576 } |
| 1577 } |
| 1578 |
| 1579 void MacroAssembler::Sb(Register rd, const MemOperand& rs) { |
| 1580 if (is_int16(rs.offset())) { |
| 1581 sb(rd, rs); |
| 1582 } else { // Offset > 16 bits, use multiple instructions to store. |
| 1583 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
| 1584 sb(rd, MemOperand(at, off16)); |
| 1585 } |
| 1586 } |
| 1587 |
| 1588 void MacroAssembler::Lh(Register rd, const MemOperand& rs) { |
| 1589 if (is_int16(rs.offset())) { |
| 1590 lh(rd, rs); |
| 1591 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1592 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
| 1593 lh(rd, MemOperand(at, off16)); |
| 1594 } |
| 1595 } |
| 1596 |
| 1597 void MacroAssembler::Lhu(Register rd, const MemOperand& rs) { |
| 1598 if (is_int16(rs.offset())) { |
| 1599 lhu(rd, rs); |
| 1600 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1601 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
| 1602 lhu(rd, MemOperand(at, off16)); |
| 1603 } |
| 1604 } |
| 1605 |
| 1606 void MacroAssembler::Sh(Register rd, const MemOperand& rs) { |
| 1607 if (is_int16(rs.offset())) { |
| 1608 sh(rd, rs); |
| 1609 } else { // Offset > 16 bits, use multiple instructions to store. |
| 1610 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
| 1611 sh(rd, MemOperand(at, off16)); |
| 1612 } |
| 1613 } |
| 1614 |
| 1615 void MacroAssembler::Lw(Register rd, const MemOperand& rs) { |
| 1616 if (is_int16(rs.offset())) { |
| 1617 lw(rd, rs); |
| 1618 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1619 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
| 1620 lw(rd, MemOperand(at, off16)); |
| 1621 } |
| 1622 } |
| 1623 |
| 1624 void MacroAssembler::Lwu(Register rd, const MemOperand& rs) { |
| 1625 if (is_int16(rs.offset())) { |
| 1626 lwu(rd, rs); |
| 1627 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1628 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
| 1629 lwu(rd, MemOperand(at, off16)); |
| 1630 } |
| 1631 } |
| 1632 |
| 1633 void MacroAssembler::Sw(Register rd, const MemOperand& rs) { |
| 1634 if (is_int16(rs.offset())) { |
| 1635 sw(rd, rs); |
| 1636 } else { // Offset > 16 bits, use multiple instructions to store. |
| 1637 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
| 1638 sw(rd, MemOperand(at, off16)); |
| 1639 } |
| 1640 } |
| 1641 |
| 1642 void MacroAssembler::Ld(Register rd, const MemOperand& rs) { |
| 1643 if (is_int16(rs.offset())) { |
| 1644 ld(rd, rs); |
| 1645 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1646 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
| 1647 ld(rd, MemOperand(at, off16)); |
| 1648 } |
| 1649 } |
| 1650 |
| 1651 void MacroAssembler::Sd(Register rd, const MemOperand& rs) { |
| 1652 if (is_int16(rs.offset())) { |
| 1653 sd(rd, rs); |
| 1654 } else { // Offset > 16 bits, use multiple instructions to store. |
| 1655 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(rs); |
| 1656 sd(rd, MemOperand(at, off16)); |
| 1657 } |
| 1658 } |
| 1659 |
| 1660 void MacroAssembler::Lwc1(FPURegister fd, const MemOperand& src) { |
| 1661 if (is_int16(src.offset())) { |
| 1662 lwc1(fd, src); |
| 1663 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1664 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src); |
| 1665 lwc1(fd, MemOperand(at, off16)); |
| 1666 } |
| 1667 } |
| 1668 |
| 1669 void MacroAssembler::Swc1(FPURegister fs, const MemOperand& src) { |
| 1670 if (is_int16(src.offset())) { |
| 1671 swc1(fs, src); |
| 1672 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1673 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src); |
| 1674 swc1(fs, MemOperand(at, off16)); |
| 1675 } |
| 1676 } |
| 1677 |
| 1678 void MacroAssembler::Ldc1(FPURegister fd, const MemOperand& src) { |
| 1679 if (is_int16(src.offset())) { |
| 1680 ldc1(fd, src); |
| 1681 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1682 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src); |
| 1683 ldc1(fd, MemOperand(at, off16)); |
| 1684 } |
| 1685 } |
| 1686 |
| 1687 void MacroAssembler::Sdc1(FPURegister fs, const MemOperand& src) { |
| 1688 DCHECK(!src.rm().is(at)); |
| 1689 if (is_int16(src.offset())) { |
| 1690 sdc1(fs, src); |
| 1691 } else { // Offset > 16 bits, use multiple instructions to load. |
| 1692 int32_t off16 = LoadRegPlusUpperOffsetPartToAt(src); |
| 1693 sdc1(fs, MemOperand(at, off16)); |
| 1694 } |
| 1695 } |
| 1696 |
1506 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { | 1697 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { |
1507 li(dst, Operand(value), mode); | 1698 li(dst, Operand(value), mode); |
1508 } | 1699 } |
1509 | 1700 |
1510 static inline int64_t ShiftAndFixSignExtension(int64_t imm, int bitnum) { | 1701 static inline int64_t ShiftAndFixSignExtension(int64_t imm, int bitnum) { |
1511 if ((imm >> (bitnum - 1)) & 0x1) { | 1702 if ((imm >> (bitnum - 1)) & 0x1) { |
1512 imm = (imm >> bitnum) + 1; | 1703 imm = (imm >> bitnum) + 1; |
1513 } else { | 1704 } else { |
1514 imm = imm >> bitnum; | 1705 imm = imm >> bitnum; |
1515 } | 1706 } |
(...skipping 127 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
1643 | 1834 |
1644 | 1835 |
1645 void MacroAssembler::MultiPush(RegList regs) { | 1836 void MacroAssembler::MultiPush(RegList regs) { |
1646 int16_t num_to_push = NumberOfBitsSet(regs); | 1837 int16_t num_to_push = NumberOfBitsSet(regs); |
1647 int16_t stack_offset = num_to_push * kPointerSize; | 1838 int16_t stack_offset = num_to_push * kPointerSize; |
1648 | 1839 |
1649 Dsubu(sp, sp, Operand(stack_offset)); | 1840 Dsubu(sp, sp, Operand(stack_offset)); |
1650 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { | 1841 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { |
1651 if ((regs & (1 << i)) != 0) { | 1842 if ((regs & (1 << i)) != 0) { |
1652 stack_offset -= kPointerSize; | 1843 stack_offset -= kPointerSize; |
1653 sd(ToRegister(i), MemOperand(sp, stack_offset)); | 1844 Sd(ToRegister(i), MemOperand(sp, stack_offset)); |
1654 } | 1845 } |
1655 } | 1846 } |
1656 } | 1847 } |
1657 | 1848 |
1658 | 1849 |
1659 void MacroAssembler::MultiPushReversed(RegList regs) { | 1850 void MacroAssembler::MultiPushReversed(RegList regs) { |
1660 int16_t num_to_push = NumberOfBitsSet(regs); | 1851 int16_t num_to_push = NumberOfBitsSet(regs); |
1661 int16_t stack_offset = num_to_push * kPointerSize; | 1852 int16_t stack_offset = num_to_push * kPointerSize; |
1662 | 1853 |
1663 Dsubu(sp, sp, Operand(stack_offset)); | 1854 Dsubu(sp, sp, Operand(stack_offset)); |
1664 for (int16_t i = 0; i < kNumRegisters; i++) { | 1855 for (int16_t i = 0; i < kNumRegisters; i++) { |
1665 if ((regs & (1 << i)) != 0) { | 1856 if ((regs & (1 << i)) != 0) { |
1666 stack_offset -= kPointerSize; | 1857 stack_offset -= kPointerSize; |
1667 sd(ToRegister(i), MemOperand(sp, stack_offset)); | 1858 Sd(ToRegister(i), MemOperand(sp, stack_offset)); |
1668 } | 1859 } |
1669 } | 1860 } |
1670 } | 1861 } |
1671 | 1862 |
1672 | 1863 |
1673 void MacroAssembler::MultiPop(RegList regs) { | 1864 void MacroAssembler::MultiPop(RegList regs) { |
1674 int16_t stack_offset = 0; | 1865 int16_t stack_offset = 0; |
1675 | 1866 |
1676 for (int16_t i = 0; i < kNumRegisters; i++) { | 1867 for (int16_t i = 0; i < kNumRegisters; i++) { |
1677 if ((regs & (1 << i)) != 0) { | 1868 if ((regs & (1 << i)) != 0) { |
1678 ld(ToRegister(i), MemOperand(sp, stack_offset)); | 1869 Ld(ToRegister(i), MemOperand(sp, stack_offset)); |
1679 stack_offset += kPointerSize; | 1870 stack_offset += kPointerSize; |
1680 } | 1871 } |
1681 } | 1872 } |
1682 daddiu(sp, sp, stack_offset); | 1873 daddiu(sp, sp, stack_offset); |
1683 } | 1874 } |
1684 | 1875 |
1685 | 1876 |
1686 void MacroAssembler::MultiPopReversed(RegList regs) { | 1877 void MacroAssembler::MultiPopReversed(RegList regs) { |
1687 int16_t stack_offset = 0; | 1878 int16_t stack_offset = 0; |
1688 | 1879 |
1689 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { | 1880 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { |
1690 if ((regs & (1 << i)) != 0) { | 1881 if ((regs & (1 << i)) != 0) { |
1691 ld(ToRegister(i), MemOperand(sp, stack_offset)); | 1882 Ld(ToRegister(i), MemOperand(sp, stack_offset)); |
1692 stack_offset += kPointerSize; | 1883 stack_offset += kPointerSize; |
1693 } | 1884 } |
1694 } | 1885 } |
1695 daddiu(sp, sp, stack_offset); | 1886 daddiu(sp, sp, stack_offset); |
1696 } | 1887 } |
1697 | 1888 |
1698 | 1889 |
1699 void MacroAssembler::MultiPushFPU(RegList regs) { | 1890 void MacroAssembler::MultiPushFPU(RegList regs) { |
1700 int16_t num_to_push = NumberOfBitsSet(regs); | 1891 int16_t num_to_push = NumberOfBitsSet(regs); |
1701 int16_t stack_offset = num_to_push * kDoubleSize; | 1892 int16_t stack_offset = num_to_push * kDoubleSize; |
1702 | 1893 |
1703 Dsubu(sp, sp, Operand(stack_offset)); | 1894 Dsubu(sp, sp, Operand(stack_offset)); |
1704 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { | 1895 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { |
1705 if ((regs & (1 << i)) != 0) { | 1896 if ((regs & (1 << i)) != 0) { |
1706 stack_offset -= kDoubleSize; | 1897 stack_offset -= kDoubleSize; |
1707 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); | 1898 Sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); |
1708 } | 1899 } |
1709 } | 1900 } |
1710 } | 1901 } |
1711 | 1902 |
1712 | 1903 |
1713 void MacroAssembler::MultiPushReversedFPU(RegList regs) { | 1904 void MacroAssembler::MultiPushReversedFPU(RegList regs) { |
1714 int16_t num_to_push = NumberOfBitsSet(regs); | 1905 int16_t num_to_push = NumberOfBitsSet(regs); |
1715 int16_t stack_offset = num_to_push * kDoubleSize; | 1906 int16_t stack_offset = num_to_push * kDoubleSize; |
1716 | 1907 |
1717 Dsubu(sp, sp, Operand(stack_offset)); | 1908 Dsubu(sp, sp, Operand(stack_offset)); |
1718 for (int16_t i = 0; i < kNumRegisters; i++) { | 1909 for (int16_t i = 0; i < kNumRegisters; i++) { |
1719 if ((regs & (1 << i)) != 0) { | 1910 if ((regs & (1 << i)) != 0) { |
1720 stack_offset -= kDoubleSize; | 1911 stack_offset -= kDoubleSize; |
1721 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); | 1912 Sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); |
1722 } | 1913 } |
1723 } | 1914 } |
1724 } | 1915 } |
1725 | 1916 |
1726 | 1917 |
1727 void MacroAssembler::MultiPopFPU(RegList regs) { | 1918 void MacroAssembler::MultiPopFPU(RegList regs) { |
1728 int16_t stack_offset = 0; | 1919 int16_t stack_offset = 0; |
1729 | 1920 |
1730 for (int16_t i = 0; i < kNumRegisters; i++) { | 1921 for (int16_t i = 0; i < kNumRegisters; i++) { |
1731 if ((regs & (1 << i)) != 0) { | 1922 if ((regs & (1 << i)) != 0) { |
1732 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); | 1923 Ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); |
1733 stack_offset += kDoubleSize; | 1924 stack_offset += kDoubleSize; |
1734 } | 1925 } |
1735 } | 1926 } |
1736 daddiu(sp, sp, stack_offset); | 1927 daddiu(sp, sp, stack_offset); |
1737 } | 1928 } |
1738 | 1929 |
1739 | 1930 |
1740 void MacroAssembler::MultiPopReversedFPU(RegList regs) { | 1931 void MacroAssembler::MultiPopReversedFPU(RegList regs) { |
1741 int16_t stack_offset = 0; | 1932 int16_t stack_offset = 0; |
1742 | 1933 |
1743 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { | 1934 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { |
1744 if ((regs & (1 << i)) != 0) { | 1935 if ((regs & (1 << i)) != 0) { |
1745 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); | 1936 Ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); |
1746 stack_offset += kDoubleSize; | 1937 stack_offset += kDoubleSize; |
1747 } | 1938 } |
1748 } | 1939 } |
1749 daddiu(sp, sp, stack_offset); | 1940 daddiu(sp, sp, stack_offset); |
1750 } | 1941 } |
1751 | 1942 |
1752 | 1943 |
1753 void MacroAssembler::Ext(Register rt, | 1944 void MacroAssembler::Ext(Register rt, |
1754 Register rs, | 1945 Register rs, |
1755 uint16_t pos, | 1946 uint16_t pos, |
(...skipping 896 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
2652 | 2843 |
2653 void MacroAssembler::TruncateDoubleToI(Register result, | 2844 void MacroAssembler::TruncateDoubleToI(Register result, |
2654 DoubleRegister double_input) { | 2845 DoubleRegister double_input) { |
2655 Label done; | 2846 Label done; |
2656 | 2847 |
2657 TryInlineTruncateDoubleToI(result, double_input, &done); | 2848 TryInlineTruncateDoubleToI(result, double_input, &done); |
2658 | 2849 |
2659 // If we fell through then inline version didn't succeed - call stub instead. | 2850 // If we fell through then inline version didn't succeed - call stub instead. |
2660 push(ra); | 2851 push(ra); |
2661 Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack. | 2852 Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack. |
2662 sdc1(double_input, MemOperand(sp, 0)); | 2853 Sdc1(double_input, MemOperand(sp, 0)); |
2663 | 2854 |
2664 DoubleToIStub stub(isolate(), sp, result, 0, true, true); | 2855 DoubleToIStub stub(isolate(), sp, result, 0, true, true); |
2665 CallStub(&stub); | 2856 CallStub(&stub); |
2666 | 2857 |
2667 Daddu(sp, sp, Operand(kDoubleSize)); | 2858 Daddu(sp, sp, Operand(kDoubleSize)); |
2668 pop(ra); | 2859 pop(ra); |
2669 | 2860 |
2670 bind(&done); | 2861 bind(&done); |
2671 } | 2862 } |
2672 | 2863 |
2673 | 2864 |
2674 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) { | 2865 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) { |
2675 Label done; | 2866 Label done; |
2676 DoubleRegister double_scratch = f12; | 2867 DoubleRegister double_scratch = f12; |
2677 DCHECK(!result.is(object)); | 2868 DCHECK(!result.is(object)); |
2678 | 2869 |
2679 ldc1(double_scratch, | 2870 Ldc1(double_scratch, |
2680 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag)); | 2871 MemOperand(object, HeapNumber::kValueOffset - kHeapObjectTag)); |
2681 TryInlineTruncateDoubleToI(result, double_scratch, &done); | 2872 TryInlineTruncateDoubleToI(result, double_scratch, &done); |
2682 | 2873 |
2683 // If we fell through then inline version didn't succeed - call stub instead. | 2874 // If we fell through then inline version didn't succeed - call stub instead. |
2684 push(ra); | 2875 push(ra); |
2685 DoubleToIStub stub(isolate(), | 2876 DoubleToIStub stub(isolate(), |
2686 object, | 2877 object, |
2687 result, | 2878 result, |
2688 HeapNumber::kValueOffset - kHeapObjectTag, | 2879 HeapNumber::kValueOffset - kHeapObjectTag, |
2689 true, | 2880 true, |
(...skipping 1349 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4039 dsrl32(dst, dst, 0); | 4230 dsrl32(dst, dst, 0); |
4040 dsll32(dst, dst, 0); | 4231 dsll32(dst, dst, 0); |
4041 or_(dst, dst, scratch); | 4232 or_(dst, dst, scratch); |
4042 } | 4233 } |
4043 | 4234 |
4044 void MacroAssembler::MaybeDropFrames() { | 4235 void MacroAssembler::MaybeDropFrames() { |
4045 // Check whether we need to drop frames to restart a function on the stack. | 4236 // Check whether we need to drop frames to restart a function on the stack. |
4046 ExternalReference restart_fp = | 4237 ExternalReference restart_fp = |
4047 ExternalReference::debug_restart_fp_address(isolate()); | 4238 ExternalReference::debug_restart_fp_address(isolate()); |
4048 li(a1, Operand(restart_fp)); | 4239 li(a1, Operand(restart_fp)); |
4049 ld(a1, MemOperand(a1)); | 4240 Ld(a1, MemOperand(a1)); |
4050 Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET, | 4241 Jump(isolate()->builtins()->FrameDropperTrampoline(), RelocInfo::CODE_TARGET, |
4051 ne, a1, Operand(zero_reg)); | 4242 ne, a1, Operand(zero_reg)); |
4052 } | 4243 } |
4053 | 4244 |
4054 // --------------------------------------------------------------------------- | 4245 // --------------------------------------------------------------------------- |
4055 // Exception handling. | 4246 // Exception handling. |
4056 | 4247 |
4057 void MacroAssembler::PushStackHandler() { | 4248 void MacroAssembler::PushStackHandler() { |
4058 // Adjust this code if not the case. | 4249 // Adjust this code if not the case. |
4059 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize); | 4250 STATIC_ASSERT(StackHandlerConstants::kSize == 1 * kPointerSize); |
4060 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); | 4251 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); |
4061 | 4252 |
4062 // Link the current handler as the next handler. | 4253 // Link the current handler as the next handler. |
4063 li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); | 4254 li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); |
4064 ld(a5, MemOperand(a6)); | 4255 Ld(a5, MemOperand(a6)); |
4065 push(a5); | 4256 push(a5); |
4066 | 4257 |
4067 // Set this new handler as the current one. | 4258 // Set this new handler as the current one. |
4068 sd(sp, MemOperand(a6)); | 4259 Sd(sp, MemOperand(a6)); |
4069 } | 4260 } |
4070 | 4261 |
4071 | 4262 |
4072 void MacroAssembler::PopStackHandler() { | 4263 void MacroAssembler::PopStackHandler() { |
4073 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); | 4264 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); |
4074 pop(a1); | 4265 pop(a1); |
4075 Daddu(sp, sp, Operand(static_cast<int64_t>(StackHandlerConstants::kSize - | 4266 Daddu(sp, sp, Operand(static_cast<int64_t>(StackHandlerConstants::kSize - |
4076 kPointerSize))); | 4267 kPointerSize))); |
4077 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); | 4268 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); |
4078 sd(a1, MemOperand(at)); | 4269 Sd(a1, MemOperand(at)); |
4079 } | 4270 } |
4080 | 4271 |
4081 | 4272 |
4082 void MacroAssembler::Allocate(int object_size, | 4273 void MacroAssembler::Allocate(int object_size, |
4083 Register result, | 4274 Register result, |
4084 Register scratch1, | 4275 Register scratch1, |
4085 Register scratch2, | 4276 Register scratch2, |
4086 Label* gc_required, | 4277 Label* gc_required, |
4087 AllocationFlags flags) { | 4278 AllocationFlags flags) { |
4088 DCHECK(object_size <= kMaxRegularHeapObjectSize); | 4279 DCHECK(object_size <= kMaxRegularHeapObjectSize); |
(...skipping 30 matching lines...) Expand all Loading... |
4119 | 4310 |
4120 // Set up allocation top address and allocation limit registers. | 4311 // Set up allocation top address and allocation limit registers. |
4121 Register top_address = scratch1; | 4312 Register top_address = scratch1; |
4122 // This code stores a temporary value in t9. | 4313 // This code stores a temporary value in t9. |
4123 Register alloc_limit = t9; | 4314 Register alloc_limit = t9; |
4124 Register result_end = scratch2; | 4315 Register result_end = scratch2; |
4125 li(top_address, Operand(allocation_top)); | 4316 li(top_address, Operand(allocation_top)); |
4126 | 4317 |
4127 if ((flags & RESULT_CONTAINS_TOP) == 0) { | 4318 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
4128 // Load allocation top into result and allocation limit into alloc_limit. | 4319 // Load allocation top into result and allocation limit into alloc_limit. |
4129 ld(result, MemOperand(top_address)); | 4320 Ld(result, MemOperand(top_address)); |
4130 ld(alloc_limit, MemOperand(top_address, kPointerSize)); | 4321 Ld(alloc_limit, MemOperand(top_address, kPointerSize)); |
4131 } else { | 4322 } else { |
4132 if (emit_debug_code()) { | 4323 if (emit_debug_code()) { |
4133 // Assert that result actually contains top on entry. | 4324 // Assert that result actually contains top on entry. |
4134 ld(alloc_limit, MemOperand(top_address)); | 4325 Ld(alloc_limit, MemOperand(top_address)); |
4135 Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit)); | 4326 Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit)); |
4136 } | 4327 } |
4137 // Load allocation limit. Result already contains allocation top. | 4328 // Load allocation limit. Result already contains allocation top. |
4138 ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top))); | 4329 Ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top))); |
4139 } | 4330 } |
4140 | 4331 |
4141 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have | 4332 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have |
4142 // the same alignment on ARM64. | 4333 // the same alignment on ARM64. |
4143 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); | 4334 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); |
4144 | 4335 |
4145 if (emit_debug_code()) { | 4336 if (emit_debug_code()) { |
4146 And(at, result, Operand(kDoubleAlignmentMask)); | 4337 And(at, result, Operand(kDoubleAlignmentMask)); |
4147 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg)); | 4338 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg)); |
4148 } | 4339 } |
4149 | 4340 |
4150 // Calculate new top and bail out if new space is exhausted. Use result | 4341 // Calculate new top and bail out if new space is exhausted. Use result |
4151 // to calculate the new top. | 4342 // to calculate the new top. |
4152 Daddu(result_end, result, Operand(object_size)); | 4343 Daddu(result_end, result, Operand(object_size)); |
4153 Branch(gc_required, Ugreater, result_end, Operand(alloc_limit)); | 4344 Branch(gc_required, Ugreater, result_end, Operand(alloc_limit)); |
4154 | 4345 |
4155 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) { | 4346 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) { |
4156 // The top pointer is not updated for allocation folding dominators. | 4347 // The top pointer is not updated for allocation folding dominators. |
4157 sd(result_end, MemOperand(top_address)); | 4348 Sd(result_end, MemOperand(top_address)); |
4158 } | 4349 } |
4159 | 4350 |
4160 // Tag object. | 4351 // Tag object. |
4161 Daddu(result, result, Operand(kHeapObjectTag)); | 4352 Daddu(result, result, Operand(kHeapObjectTag)); |
4162 } | 4353 } |
4163 | 4354 |
4164 | 4355 |
4165 void MacroAssembler::Allocate(Register object_size, Register result, | 4356 void MacroAssembler::Allocate(Register object_size, Register result, |
4166 Register result_end, Register scratch, | 4357 Register result_end, Register scratch, |
4167 Label* gc_required, AllocationFlags flags) { | 4358 Label* gc_required, AllocationFlags flags) { |
(...skipping 24 matching lines...) Expand all Loading... |
4192 DCHECK((limit - top) == kPointerSize); | 4383 DCHECK((limit - top) == kPointerSize); |
4193 | 4384 |
4194 // Set up allocation top address and object size registers. | 4385 // Set up allocation top address and object size registers. |
4195 Register top_address = scratch; | 4386 Register top_address = scratch; |
4196 // This code stores a temporary value in t9. | 4387 // This code stores a temporary value in t9. |
4197 Register alloc_limit = t9; | 4388 Register alloc_limit = t9; |
4198 li(top_address, Operand(allocation_top)); | 4389 li(top_address, Operand(allocation_top)); |
4199 | 4390 |
4200 if ((flags & RESULT_CONTAINS_TOP) == 0) { | 4391 if ((flags & RESULT_CONTAINS_TOP) == 0) { |
4201 // Load allocation top into result and allocation limit into alloc_limit. | 4392 // Load allocation top into result and allocation limit into alloc_limit. |
4202 ld(result, MemOperand(top_address)); | 4393 Ld(result, MemOperand(top_address)); |
4203 ld(alloc_limit, MemOperand(top_address, kPointerSize)); | 4394 Ld(alloc_limit, MemOperand(top_address, kPointerSize)); |
4204 } else { | 4395 } else { |
4205 if (emit_debug_code()) { | 4396 if (emit_debug_code()) { |
4206 // Assert that result actually contains top on entry. | 4397 // Assert that result actually contains top on entry. |
4207 ld(alloc_limit, MemOperand(top_address)); | 4398 Ld(alloc_limit, MemOperand(top_address)); |
4208 Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit)); | 4399 Check(eq, kUnexpectedAllocationTop, result, Operand(alloc_limit)); |
4209 } | 4400 } |
4210 // Load allocation limit. Result already contains allocation top. | 4401 // Load allocation limit. Result already contains allocation top. |
4211 ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top))); | 4402 Ld(alloc_limit, MemOperand(top_address, static_cast<int32_t>(limit - top))); |
4212 } | 4403 } |
4213 | 4404 |
4214 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have | 4405 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have |
4215 // the same alignment on ARM64. | 4406 // the same alignment on ARM64. |
4216 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); | 4407 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); |
4217 | 4408 |
4218 if (emit_debug_code()) { | 4409 if (emit_debug_code()) { |
4219 And(at, result, Operand(kDoubleAlignmentMask)); | 4410 And(at, result, Operand(kDoubleAlignmentMask)); |
4220 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg)); | 4411 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg)); |
4221 } | 4412 } |
(...skipping 10 matching lines...) Expand all Loading... |
4232 Branch(gc_required, Ugreater, result_end, Operand(alloc_limit)); | 4423 Branch(gc_required, Ugreater, result_end, Operand(alloc_limit)); |
4233 | 4424 |
4234 // Update allocation top. result temporarily holds the new top. | 4425 // Update allocation top. result temporarily holds the new top. |
4235 if (emit_debug_code()) { | 4426 if (emit_debug_code()) { |
4236 And(at, result_end, Operand(kObjectAlignmentMask)); | 4427 And(at, result_end, Operand(kObjectAlignmentMask)); |
4237 Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg)); | 4428 Check(eq, kUnalignedAllocationInNewSpace, at, Operand(zero_reg)); |
4238 } | 4429 } |
4239 | 4430 |
4240 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) { | 4431 if ((flags & ALLOCATION_FOLDING_DOMINATOR) == 0) { |
4241 // The top pointer is not updated for allocation folding dominators. | 4432 // The top pointer is not updated for allocation folding dominators. |
4242 sd(result_end, MemOperand(top_address)); | 4433 Sd(result_end, MemOperand(top_address)); |
4243 } | 4434 } |
4244 | 4435 |
4245 // Tag object if. | 4436 // Tag object if. |
4246 Daddu(result, result, Operand(kHeapObjectTag)); | 4437 Daddu(result, result, Operand(kHeapObjectTag)); |
4247 } | 4438 } |
4248 | 4439 |
4249 void MacroAssembler::FastAllocate(int object_size, Register result, | 4440 void MacroAssembler::FastAllocate(int object_size, Register result, |
4250 Register scratch1, Register scratch2, | 4441 Register scratch1, Register scratch2, |
4251 AllocationFlags flags) { | 4442 AllocationFlags flags) { |
4252 DCHECK(object_size <= kMaxRegularHeapObjectSize); | 4443 DCHECK(object_size <= kMaxRegularHeapObjectSize); |
4253 DCHECK(!AreAliased(result, scratch1, scratch2, at)); | 4444 DCHECK(!AreAliased(result, scratch1, scratch2, at)); |
4254 | 4445 |
4255 // Make object size into bytes. | 4446 // Make object size into bytes. |
4256 if ((flags & SIZE_IN_WORDS) != 0) { | 4447 if ((flags & SIZE_IN_WORDS) != 0) { |
4257 object_size *= kPointerSize; | 4448 object_size *= kPointerSize; |
4258 } | 4449 } |
4259 DCHECK(0 == (object_size & kObjectAlignmentMask)); | 4450 DCHECK(0 == (object_size & kObjectAlignmentMask)); |
4260 | 4451 |
4261 ExternalReference allocation_top = | 4452 ExternalReference allocation_top = |
4262 AllocationUtils::GetAllocationTopReference(isolate(), flags); | 4453 AllocationUtils::GetAllocationTopReference(isolate(), flags); |
4263 | 4454 |
4264 Register top_address = scratch1; | 4455 Register top_address = scratch1; |
4265 Register result_end = scratch2; | 4456 Register result_end = scratch2; |
4266 li(top_address, Operand(allocation_top)); | 4457 li(top_address, Operand(allocation_top)); |
4267 ld(result, MemOperand(top_address)); | 4458 Ld(result, MemOperand(top_address)); |
4268 | 4459 |
4269 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have | 4460 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have |
4270 // the same alignment on MIPS64. | 4461 // the same alignment on MIPS64. |
4271 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); | 4462 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); |
4272 | 4463 |
4273 if (emit_debug_code()) { | 4464 if (emit_debug_code()) { |
4274 And(at, result, Operand(kDoubleAlignmentMask)); | 4465 And(at, result, Operand(kDoubleAlignmentMask)); |
4275 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg)); | 4466 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg)); |
4276 } | 4467 } |
4277 | 4468 |
4278 // Calculate new top and write it back. | 4469 // Calculate new top and write it back. |
4279 Daddu(result_end, result, Operand(object_size)); | 4470 Daddu(result_end, result, Operand(object_size)); |
4280 sd(result_end, MemOperand(top_address)); | 4471 Sd(result_end, MemOperand(top_address)); |
4281 | 4472 |
4282 Daddu(result, result, Operand(kHeapObjectTag)); | 4473 Daddu(result, result, Operand(kHeapObjectTag)); |
4283 } | 4474 } |
4284 | 4475 |
4285 void MacroAssembler::FastAllocate(Register object_size, Register result, | 4476 void MacroAssembler::FastAllocate(Register object_size, Register result, |
4286 Register result_end, Register scratch, | 4477 Register result_end, Register scratch, |
4287 AllocationFlags flags) { | 4478 AllocationFlags flags) { |
4288 // |object_size| and |result_end| may overlap, other registers must not. | 4479 // |object_size| and |result_end| may overlap, other registers must not. |
4289 DCHECK(!AreAliased(object_size, result, scratch, at)); | 4480 DCHECK(!AreAliased(object_size, result, scratch, at)); |
4290 DCHECK(!AreAliased(result_end, result, scratch, at)); | 4481 DCHECK(!AreAliased(result_end, result, scratch, at)); |
4291 | 4482 |
4292 ExternalReference allocation_top = | 4483 ExternalReference allocation_top = |
4293 AllocationUtils::GetAllocationTopReference(isolate(), flags); | 4484 AllocationUtils::GetAllocationTopReference(isolate(), flags); |
4294 | 4485 |
4295 // Set up allocation top address and object size registers. | 4486 // Set up allocation top address and object size registers. |
4296 Register top_address = scratch; | 4487 Register top_address = scratch; |
4297 li(top_address, Operand(allocation_top)); | 4488 li(top_address, Operand(allocation_top)); |
4298 ld(result, MemOperand(top_address)); | 4489 Ld(result, MemOperand(top_address)); |
4299 | 4490 |
4300 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have | 4491 // We can ignore DOUBLE_ALIGNMENT flags here because doubles and pointers have |
4301 // the same alignment on MIPS64. | 4492 // the same alignment on MIPS64. |
4302 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); | 4493 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment); |
4303 | 4494 |
4304 if (emit_debug_code()) { | 4495 if (emit_debug_code()) { |
4305 And(at, result, Operand(kDoubleAlignmentMask)); | 4496 And(at, result, Operand(kDoubleAlignmentMask)); |
4306 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg)); | 4497 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg)); |
4307 } | 4498 } |
4308 | 4499 |
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4346 // object. | 4537 // object. |
4347 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc, | 4538 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc, |
4348 NO_ALLOCATION_FLAGS); | 4539 NO_ALLOCATION_FLAGS); |
4349 | 4540 |
4350 Heap::RootListIndex map_index = mode == MUTABLE | 4541 Heap::RootListIndex map_index = mode == MUTABLE |
4351 ? Heap::kMutableHeapNumberMapRootIndex | 4542 ? Heap::kMutableHeapNumberMapRootIndex |
4352 : Heap::kHeapNumberMapRootIndex; | 4543 : Heap::kHeapNumberMapRootIndex; |
4353 AssertIsRoot(heap_number_map, map_index); | 4544 AssertIsRoot(heap_number_map, map_index); |
4354 | 4545 |
4355 // Store heap number map in the allocated object. | 4546 // Store heap number map in the allocated object. |
4356 sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); | 4547 Sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); |
4357 } | 4548 } |
4358 | 4549 |
4359 | 4550 |
4360 void MacroAssembler::AllocateHeapNumberWithValue(Register result, | 4551 void MacroAssembler::AllocateHeapNumberWithValue(Register result, |
4361 FPURegister value, | 4552 FPURegister value, |
4362 Register scratch1, | 4553 Register scratch1, |
4363 Register scratch2, | 4554 Register scratch2, |
4364 Label* gc_required) { | 4555 Label* gc_required) { |
4365 LoadRoot(t8, Heap::kHeapNumberMapRootIndex); | 4556 LoadRoot(t8, Heap::kHeapNumberMapRootIndex); |
4366 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required); | 4557 AllocateHeapNumber(result, scratch1, scratch2, t8, gc_required); |
4367 sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset)); | 4558 Sdc1(value, FieldMemOperand(result, HeapNumber::kValueOffset)); |
4368 } | 4559 } |
4369 | 4560 |
4370 | 4561 |
4371 void MacroAssembler::AllocateJSValue(Register result, Register constructor, | 4562 void MacroAssembler::AllocateJSValue(Register result, Register constructor, |
4372 Register value, Register scratch1, | 4563 Register value, Register scratch1, |
4373 Register scratch2, Label* gc_required) { | 4564 Register scratch2, Label* gc_required) { |
4374 DCHECK(!result.is(constructor)); | 4565 DCHECK(!result.is(constructor)); |
4375 DCHECK(!result.is(scratch1)); | 4566 DCHECK(!result.is(scratch1)); |
4376 DCHECK(!result.is(scratch2)); | 4567 DCHECK(!result.is(scratch2)); |
4377 DCHECK(!result.is(value)); | 4568 DCHECK(!result.is(value)); |
4378 | 4569 |
4379 // Allocate JSValue in new space. | 4570 // Allocate JSValue in new space. |
4380 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, | 4571 Allocate(JSValue::kSize, result, scratch1, scratch2, gc_required, |
4381 NO_ALLOCATION_FLAGS); | 4572 NO_ALLOCATION_FLAGS); |
4382 | 4573 |
4383 // Initialize the JSValue. | 4574 // Initialize the JSValue. |
4384 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2); | 4575 LoadGlobalFunctionInitialMap(constructor, scratch1, scratch2); |
4385 sd(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); | 4576 Sd(scratch1, FieldMemOperand(result, HeapObject::kMapOffset)); |
4386 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex); | 4577 LoadRoot(scratch1, Heap::kEmptyFixedArrayRootIndex); |
4387 sd(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset)); | 4578 Sd(scratch1, FieldMemOperand(result, JSObject::kPropertiesOffset)); |
4388 sd(scratch1, FieldMemOperand(result, JSObject::kElementsOffset)); | 4579 Sd(scratch1, FieldMemOperand(result, JSObject::kElementsOffset)); |
4389 sd(value, FieldMemOperand(result, JSValue::kValueOffset)); | 4580 Sd(value, FieldMemOperand(result, JSValue::kValueOffset)); |
4390 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize); | 4581 STATIC_ASSERT(JSValue::kSize == 4 * kPointerSize); |
4391 } | 4582 } |
4392 | 4583 |
4393 void MacroAssembler::InitializeFieldsWithFiller(Register current_address, | 4584 void MacroAssembler::InitializeFieldsWithFiller(Register current_address, |
4394 Register end_address, | 4585 Register end_address, |
4395 Register filler) { | 4586 Register filler) { |
4396 Label loop, entry; | 4587 Label loop, entry; |
4397 Branch(&entry); | 4588 Branch(&entry); |
4398 bind(&loop); | 4589 bind(&loop); |
4399 sd(filler, MemOperand(current_address)); | 4590 Sd(filler, MemOperand(current_address)); |
4400 Daddu(current_address, current_address, kPointerSize); | 4591 Daddu(current_address, current_address, kPointerSize); |
4401 bind(&entry); | 4592 bind(&entry); |
4402 Branch(&loop, ult, current_address, Operand(end_address)); | 4593 Branch(&loop, ult, current_address, Operand(end_address)); |
4403 } | 4594 } |
4404 | 4595 |
4405 void MacroAssembler::SubNanPreservePayloadAndSign_s(FPURegister fd, | 4596 void MacroAssembler::SubNanPreservePayloadAndSign_s(FPURegister fd, |
4406 FPURegister fs, | 4597 FPURegister fs, |
4407 FPURegister ft) { | 4598 FPURegister ft) { |
4408 FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd; | 4599 FloatRegister dest = fd.is(fs) || fd.is(ft) ? kLithiumScratchDouble : fd; |
4409 Label check_nan, save_payload, done; | 4600 Label check_nan, save_payload, done; |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4468 | 4659 |
4469 bind(&done); | 4660 bind(&done); |
4470 } | 4661 } |
4471 | 4662 |
4472 void MacroAssembler::CompareMapAndBranch(Register obj, | 4663 void MacroAssembler::CompareMapAndBranch(Register obj, |
4473 Register scratch, | 4664 Register scratch, |
4474 Handle<Map> map, | 4665 Handle<Map> map, |
4475 Label* early_success, | 4666 Label* early_success, |
4476 Condition cond, | 4667 Condition cond, |
4477 Label* branch_to) { | 4668 Label* branch_to) { |
4478 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | 4669 Ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
4479 CompareMapAndBranch(scratch, map, early_success, cond, branch_to); | 4670 CompareMapAndBranch(scratch, map, early_success, cond, branch_to); |
4480 } | 4671 } |
4481 | 4672 |
4482 | 4673 |
4483 void MacroAssembler::CompareMapAndBranch(Register obj_map, | 4674 void MacroAssembler::CompareMapAndBranch(Register obj_map, |
4484 Handle<Map> map, | 4675 Handle<Map> map, |
4485 Label* early_success, | 4676 Label* early_success, |
4486 Condition cond, | 4677 Condition cond, |
4487 Label* branch_to) { | 4678 Label* branch_to) { |
4488 Branch(branch_to, cond, obj_map, Operand(map)); | 4679 Branch(branch_to, cond, obj_map, Operand(map)); |
(...skipping 15 matching lines...) Expand all Loading... |
4504 | 4695 |
4505 | 4696 |
4506 void MacroAssembler::CheckMap(Register obj, | 4697 void MacroAssembler::CheckMap(Register obj, |
4507 Register scratch, | 4698 Register scratch, |
4508 Heap::RootListIndex index, | 4699 Heap::RootListIndex index, |
4509 Label* fail, | 4700 Label* fail, |
4510 SmiCheckType smi_check_type) { | 4701 SmiCheckType smi_check_type) { |
4511 if (smi_check_type == DO_SMI_CHECK) { | 4702 if (smi_check_type == DO_SMI_CHECK) { |
4512 JumpIfSmi(obj, fail); | 4703 JumpIfSmi(obj, fail); |
4513 } | 4704 } |
4514 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); | 4705 Ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); |
4515 LoadRoot(at, index); | 4706 LoadRoot(at, index); |
4516 Branch(fail, ne, scratch, Operand(at)); | 4707 Branch(fail, ne, scratch, Operand(at)); |
4517 } | 4708 } |
4518 | 4709 |
4519 | 4710 |
4520 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) { | 4711 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) { |
4521 li(value, Operand(cell)); | 4712 li(value, Operand(cell)); |
4522 ld(value, FieldMemOperand(value, WeakCell::kValueOffset)); | 4713 Ld(value, FieldMemOperand(value, WeakCell::kValueOffset)); |
4523 } | 4714 } |
4524 | 4715 |
4525 void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, | 4716 void MacroAssembler::FPUCanonicalizeNaN(const DoubleRegister dst, |
4526 const DoubleRegister src) { | 4717 const DoubleRegister src) { |
4527 sub_d(dst, src, kDoubleRegZero); | 4718 sub_d(dst, src, kDoubleRegZero); |
4528 } | 4719 } |
4529 | 4720 |
4530 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell, | 4721 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell, |
4531 Label* miss) { | 4722 Label* miss) { |
4532 GetWeakValue(value, cell); | 4723 GetWeakValue(value, cell); |
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4642 Daddu(src_reg, sp, | 4833 Daddu(src_reg, sp, |
4643 Operand((callee_args_count.immediate() + 1) * kPointerSize)); | 4834 Operand((callee_args_count.immediate() + 1) * kPointerSize)); |
4644 } | 4835 } |
4645 | 4836 |
4646 if (FLAG_debug_code) { | 4837 if (FLAG_debug_code) { |
4647 Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg)); | 4838 Check(lo, kStackAccessBelowStackPointer, src_reg, Operand(dst_reg)); |
4648 } | 4839 } |
4649 | 4840 |
4650 // Restore caller's frame pointer and return address now as they will be | 4841 // Restore caller's frame pointer and return address now as they will be |
4651 // overwritten by the copying loop. | 4842 // overwritten by the copying loop. |
4652 ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); | 4843 Ld(ra, MemOperand(fp, StandardFrameConstants::kCallerPCOffset)); |
4653 ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); | 4844 Ld(fp, MemOperand(fp, StandardFrameConstants::kCallerFPOffset)); |
4654 | 4845 |
4655 // Now copy callee arguments to the caller frame going backwards to avoid | 4846 // Now copy callee arguments to the caller frame going backwards to avoid |
4656 // callee arguments corruption (source and destination areas could overlap). | 4847 // callee arguments corruption (source and destination areas could overlap). |
4657 | 4848 |
4658 // Both src_reg and dst_reg are pointing to the word after the one to copy, | 4849 // Both src_reg and dst_reg are pointing to the word after the one to copy, |
4659 // so they must be pre-decremented in the loop. | 4850 // so they must be pre-decremented in the loop. |
4660 Register tmp_reg = scratch1; | 4851 Register tmp_reg = scratch1; |
4661 Label loop, entry; | 4852 Label loop, entry; |
4662 Branch(&entry); | 4853 Branch(&entry); |
4663 bind(&loop); | 4854 bind(&loop); |
4664 Dsubu(src_reg, src_reg, Operand(kPointerSize)); | 4855 Dsubu(src_reg, src_reg, Operand(kPointerSize)); |
4665 Dsubu(dst_reg, dst_reg, Operand(kPointerSize)); | 4856 Dsubu(dst_reg, dst_reg, Operand(kPointerSize)); |
4666 ld(tmp_reg, MemOperand(src_reg)); | 4857 Ld(tmp_reg, MemOperand(src_reg)); |
4667 sd(tmp_reg, MemOperand(dst_reg)); | 4858 Sd(tmp_reg, MemOperand(dst_reg)); |
4668 bind(&entry); | 4859 bind(&entry); |
4669 Branch(&loop, ne, sp, Operand(src_reg)); | 4860 Branch(&loop, ne, sp, Operand(src_reg)); |
4670 | 4861 |
4671 // Leave current frame. | 4862 // Leave current frame. |
4672 mov(sp, dst_reg); | 4863 mov(sp, dst_reg); |
4673 } | 4864 } |
4674 | 4865 |
4675 void MacroAssembler::InvokePrologue(const ParameterCount& expected, | 4866 void MacroAssembler::InvokePrologue(const ParameterCount& expected, |
4676 const ParameterCount& actual, | 4867 const ParameterCount& actual, |
4677 Label* done, | 4868 Label* done, |
(...skipping 58 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4736 } | 4927 } |
4737 } | 4928 } |
4738 | 4929 |
4739 void MacroAssembler::CheckDebugHook(Register fun, Register new_target, | 4930 void MacroAssembler::CheckDebugHook(Register fun, Register new_target, |
4740 const ParameterCount& expected, | 4931 const ParameterCount& expected, |
4741 const ParameterCount& actual) { | 4932 const ParameterCount& actual) { |
4742 Label skip_hook; | 4933 Label skip_hook; |
4743 ExternalReference debug_hook_active = | 4934 ExternalReference debug_hook_active = |
4744 ExternalReference::debug_hook_on_function_call_address(isolate()); | 4935 ExternalReference::debug_hook_on_function_call_address(isolate()); |
4745 li(t0, Operand(debug_hook_active)); | 4936 li(t0, Operand(debug_hook_active)); |
4746 lb(t0, MemOperand(t0)); | 4937 Lb(t0, MemOperand(t0)); |
4747 Branch(&skip_hook, eq, t0, Operand(zero_reg)); | 4938 Branch(&skip_hook, eq, t0, Operand(zero_reg)); |
4748 { | 4939 { |
4749 FrameScope frame(this, | 4940 FrameScope frame(this, |
4750 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); | 4941 has_frame() ? StackFrame::NONE : StackFrame::INTERNAL); |
4751 if (expected.is_reg()) { | 4942 if (expected.is_reg()) { |
4752 SmiTag(expected.reg()); | 4943 SmiTag(expected.reg()); |
4753 Push(expected.reg()); | 4944 Push(expected.reg()); |
4754 } | 4945 } |
4755 if (actual.is_reg()) { | 4946 if (actual.is_reg()) { |
4756 SmiTag(actual.reg()); | 4947 SmiTag(actual.reg()); |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4800 | 4991 |
4801 Label done; | 4992 Label done; |
4802 bool definitely_mismatches = false; | 4993 bool definitely_mismatches = false; |
4803 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag, | 4994 InvokePrologue(expected, actual, &done, &definitely_mismatches, flag, |
4804 call_wrapper); | 4995 call_wrapper); |
4805 if (!definitely_mismatches) { | 4996 if (!definitely_mismatches) { |
4806 // We call indirectly through the code field in the function to | 4997 // We call indirectly through the code field in the function to |
4807 // allow recompilation to take effect without changing any of the | 4998 // allow recompilation to take effect without changing any of the |
4808 // call sites. | 4999 // call sites. |
4809 Register code = t0; | 5000 Register code = t0; |
4810 ld(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset)); | 5001 Ld(code, FieldMemOperand(function, JSFunction::kCodeEntryOffset)); |
4811 if (flag == CALL_FUNCTION) { | 5002 if (flag == CALL_FUNCTION) { |
4812 call_wrapper.BeforeCall(CallSize(code)); | 5003 call_wrapper.BeforeCall(CallSize(code)); |
4813 Call(code); | 5004 Call(code); |
4814 call_wrapper.AfterCall(); | 5005 call_wrapper.AfterCall(); |
4815 } else { | 5006 } else { |
4816 DCHECK(flag == JUMP_FUNCTION); | 5007 DCHECK(flag == JUMP_FUNCTION); |
4817 Jump(code); | 5008 Jump(code); |
4818 } | 5009 } |
4819 // Continue here if InvokePrologue does handle the invocation due to | 5010 // Continue here if InvokePrologue does handle the invocation due to |
4820 // mismatched parameter counts. | 5011 // mismatched parameter counts. |
4821 bind(&done); | 5012 bind(&done); |
4822 } | 5013 } |
4823 } | 5014 } |
4824 | 5015 |
4825 | 5016 |
4826 void MacroAssembler::InvokeFunction(Register function, | 5017 void MacroAssembler::InvokeFunction(Register function, |
4827 Register new_target, | 5018 Register new_target, |
4828 const ParameterCount& actual, | 5019 const ParameterCount& actual, |
4829 InvokeFlag flag, | 5020 InvokeFlag flag, |
4830 const CallWrapper& call_wrapper) { | 5021 const CallWrapper& call_wrapper) { |
4831 // You can't call a function without a valid frame. | 5022 // You can't call a function without a valid frame. |
4832 DCHECK(flag == JUMP_FUNCTION || has_frame()); | 5023 DCHECK(flag == JUMP_FUNCTION || has_frame()); |
4833 | 5024 |
4834 // Contract with called JS functions requires that function is passed in a1. | 5025 // Contract with called JS functions requires that function is passed in a1. |
4835 DCHECK(function.is(a1)); | 5026 DCHECK(function.is(a1)); |
4836 Register expected_reg = a2; | 5027 Register expected_reg = a2; |
4837 Register temp_reg = t0; | 5028 Register temp_reg = t0; |
4838 ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); | 5029 Ld(temp_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); |
4839 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); | 5030 Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); |
4840 // The argument count is stored as int32_t on 64-bit platforms. | 5031 // The argument count is stored as int32_t on 64-bit platforms. |
4841 // TODO(plind): Smi on 32-bit platforms. | 5032 // TODO(plind): Smi on 32-bit platforms. |
4842 lw(expected_reg, | 5033 Lw(expected_reg, |
4843 FieldMemOperand(temp_reg, | 5034 FieldMemOperand(temp_reg, |
4844 SharedFunctionInfo::kFormalParameterCountOffset)); | 5035 SharedFunctionInfo::kFormalParameterCountOffset)); |
4845 ParameterCount expected(expected_reg); | 5036 ParameterCount expected(expected_reg); |
4846 InvokeFunctionCode(a1, new_target, expected, actual, flag, call_wrapper); | 5037 InvokeFunctionCode(a1, new_target, expected, actual, flag, call_wrapper); |
4847 } | 5038 } |
4848 | 5039 |
4849 | 5040 |
4850 void MacroAssembler::InvokeFunction(Register function, | 5041 void MacroAssembler::InvokeFunction(Register function, |
4851 const ParameterCount& expected, | 5042 const ParameterCount& expected, |
4852 const ParameterCount& actual, | 5043 const ParameterCount& actual, |
4853 InvokeFlag flag, | 5044 InvokeFlag flag, |
4854 const CallWrapper& call_wrapper) { | 5045 const CallWrapper& call_wrapper) { |
4855 // You can't call a function without a valid frame. | 5046 // You can't call a function without a valid frame. |
4856 DCHECK(flag == JUMP_FUNCTION || has_frame()); | 5047 DCHECK(flag == JUMP_FUNCTION || has_frame()); |
4857 | 5048 |
4858 // Contract with called JS functions requires that function is passed in a1. | 5049 // Contract with called JS functions requires that function is passed in a1. |
4859 DCHECK(function.is(a1)); | 5050 DCHECK(function.is(a1)); |
4860 | 5051 |
4861 // Get the function and setup the context. | 5052 // Get the function and setup the context. |
4862 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); | 5053 Ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); |
4863 | 5054 |
4864 InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper); | 5055 InvokeFunctionCode(a1, no_reg, expected, actual, flag, call_wrapper); |
4865 } | 5056 } |
4866 | 5057 |
4867 | 5058 |
4868 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, | 5059 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, |
4869 const ParameterCount& expected, | 5060 const ParameterCount& expected, |
4870 const ParameterCount& actual, | 5061 const ParameterCount& actual, |
4871 InvokeFlag flag, | 5062 InvokeFlag flag, |
4872 const CallWrapper& call_wrapper) { | 5063 const CallWrapper& call_wrapper) { |
4873 li(a1, function); | 5064 li(a1, function); |
4874 InvokeFunction(a1, expected, actual, flag, call_wrapper); | 5065 InvokeFunction(a1, expected, actual, flag, call_wrapper); |
4875 } | 5066 } |
4876 | 5067 |
4877 | 5068 |
4878 void MacroAssembler::IsObjectJSStringType(Register object, | 5069 void MacroAssembler::IsObjectJSStringType(Register object, |
4879 Register scratch, | 5070 Register scratch, |
4880 Label* fail) { | 5071 Label* fail) { |
4881 DCHECK(kNotStringTag != 0); | 5072 DCHECK(kNotStringTag != 0); |
4882 | 5073 |
4883 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 5074 Ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
4884 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); | 5075 Lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); |
4885 And(scratch, scratch, Operand(kIsNotStringMask)); | 5076 And(scratch, scratch, Operand(kIsNotStringMask)); |
4886 Branch(fail, ne, scratch, Operand(zero_reg)); | 5077 Branch(fail, ne, scratch, Operand(zero_reg)); |
4887 } | 5078 } |
4888 | 5079 |
4889 | 5080 |
4890 // --------------------------------------------------------------------------- | 5081 // --------------------------------------------------------------------------- |
4891 // Support functions. | 5082 // Support functions. |
4892 | 5083 |
4893 void MacroAssembler::GetMapConstructor(Register result, Register map, | 5084 void MacroAssembler::GetMapConstructor(Register result, Register map, |
4894 Register temp, Register temp2) { | 5085 Register temp, Register temp2) { |
4895 Label done, loop; | 5086 Label done, loop; |
4896 ld(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset)); | 5087 ld(result, FieldMemOperand(map, Map::kConstructorOrBackPointerOffset)); |
4897 bind(&loop); | 5088 bind(&loop); |
4898 JumpIfSmi(result, &done); | 5089 JumpIfSmi(result, &done); |
4899 GetObjectType(result, temp, temp2); | 5090 GetObjectType(result, temp, temp2); |
4900 Branch(&done, ne, temp2, Operand(MAP_TYPE)); | 5091 Branch(&done, ne, temp2, Operand(MAP_TYPE)); |
4901 ld(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset)); | 5092 ld(result, FieldMemOperand(result, Map::kConstructorOrBackPointerOffset)); |
4902 Branch(&loop); | 5093 Branch(&loop); |
4903 bind(&done); | 5094 bind(&done); |
4904 } | 5095 } |
4905 | 5096 |
4906 void MacroAssembler::GetObjectType(Register object, | 5097 void MacroAssembler::GetObjectType(Register object, |
4907 Register map, | 5098 Register map, |
4908 Register type_reg) { | 5099 Register type_reg) { |
4909 ld(map, FieldMemOperand(object, HeapObject::kMapOffset)); | 5100 Ld(map, FieldMemOperand(object, HeapObject::kMapOffset)); |
4910 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 5101 Lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
4911 } | 5102 } |
4912 | 5103 |
4913 | 5104 |
4914 // ----------------------------------------------------------------------------- | 5105 // ----------------------------------------------------------------------------- |
4915 // Runtime calls. | 5106 // Runtime calls. |
4916 | 5107 |
4917 void MacroAssembler::CallStub(CodeStub* stub, | 5108 void MacroAssembler::CallStub(CodeStub* stub, |
4918 TypeFeedbackId ast_id, | 5109 TypeFeedbackId ast_id, |
4919 Condition cond, | 5110 Condition cond, |
4920 Register r1, | 5111 Register r1, |
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
4952 JumpIfNotSmi(object, ¬_smi); | 5143 JumpIfNotSmi(object, ¬_smi); |
4953 // Remove smi tag and convert to double. | 5144 // Remove smi tag and convert to double. |
4954 // dsra(scratch1, object, kSmiTagSize); | 5145 // dsra(scratch1, object, kSmiTagSize); |
4955 dsra32(scratch1, object, 0); | 5146 dsra32(scratch1, object, 0); |
4956 mtc1(scratch1, result); | 5147 mtc1(scratch1, result); |
4957 cvt_d_w(result, result); | 5148 cvt_d_w(result, result); |
4958 Branch(&done); | 5149 Branch(&done); |
4959 bind(¬_smi); | 5150 bind(¬_smi); |
4960 } | 5151 } |
4961 // Check for heap number and load double value from it. | 5152 // Check for heap number and load double value from it. |
4962 ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset)); | 5153 Ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset)); |
4963 Branch(not_number, ne, scratch1, Operand(heap_number_map)); | 5154 Branch(not_number, ne, scratch1, Operand(heap_number_map)); |
4964 | 5155 |
4965 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) { | 5156 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) { |
4966 // If exponent is all ones the number is either a NaN or +/-Infinity. | 5157 // If exponent is all ones the number is either a NaN or +/-Infinity. |
4967 Register exponent = scratch1; | 5158 Register exponent = scratch1; |
4968 Register mask_reg = scratch2; | 5159 Register mask_reg = scratch2; |
4969 lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset)); | 5160 Lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset)); |
4970 li(mask_reg, HeapNumber::kExponentMask); | 5161 li(mask_reg, HeapNumber::kExponentMask); |
4971 | 5162 |
4972 And(exponent, exponent, mask_reg); | 5163 And(exponent, exponent, mask_reg); |
4973 Branch(not_number, eq, exponent, Operand(mask_reg)); | 5164 Branch(not_number, eq, exponent, Operand(mask_reg)); |
4974 } | 5165 } |
4975 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset)); | 5166 Ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset)); |
4976 bind(&done); | 5167 bind(&done); |
4977 } | 5168 } |
4978 | 5169 |
4979 | 5170 |
4980 void MacroAssembler::SmiToDoubleFPURegister(Register smi, | 5171 void MacroAssembler::SmiToDoubleFPURegister(Register smi, |
4981 FPURegister value, | 5172 FPURegister value, |
4982 Register scratch1) { | 5173 Register scratch1) { |
4983 dsra32(scratch1, smi, 0); | 5174 dsra32(scratch1, smi, 0); |
4984 mtc1(scratch1, value); | 5175 mtc1(scratch1, value); |
4985 cvt_d_w(value, value); | 5176 cvt_d_w(value, value); |
(...skipping 457 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5443 zero_reg, | 5634 zero_reg, |
5444 Operand(zero_reg), | 5635 Operand(zero_reg), |
5445 bd); | 5636 bd); |
5446 } | 5637 } |
5447 | 5638 |
5448 void MacroAssembler::SetCounter(StatsCounter* counter, int value, | 5639 void MacroAssembler::SetCounter(StatsCounter* counter, int value, |
5449 Register scratch1, Register scratch2) { | 5640 Register scratch1, Register scratch2) { |
5450 if (FLAG_native_code_counters && counter->Enabled()) { | 5641 if (FLAG_native_code_counters && counter->Enabled()) { |
5451 li(scratch1, Operand(value)); | 5642 li(scratch1, Operand(value)); |
5452 li(scratch2, Operand(ExternalReference(counter))); | 5643 li(scratch2, Operand(ExternalReference(counter))); |
5453 sw(scratch1, MemOperand(scratch2)); | 5644 Sw(scratch1, MemOperand(scratch2)); |
5454 } | 5645 } |
5455 } | 5646 } |
5456 | 5647 |
5457 | 5648 |
5458 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, | 5649 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, |
5459 Register scratch1, Register scratch2) { | 5650 Register scratch1, Register scratch2) { |
5460 DCHECK(value > 0); | 5651 DCHECK(value > 0); |
5461 if (FLAG_native_code_counters && counter->Enabled()) { | 5652 if (FLAG_native_code_counters && counter->Enabled()) { |
5462 li(scratch2, Operand(ExternalReference(counter))); | 5653 li(scratch2, Operand(ExternalReference(counter))); |
5463 lw(scratch1, MemOperand(scratch2)); | 5654 Lw(scratch1, MemOperand(scratch2)); |
5464 Addu(scratch1, scratch1, Operand(value)); | 5655 Addu(scratch1, scratch1, Operand(value)); |
5465 sw(scratch1, MemOperand(scratch2)); | 5656 Sw(scratch1, MemOperand(scratch2)); |
5466 } | 5657 } |
5467 } | 5658 } |
5468 | 5659 |
5469 | 5660 |
5470 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, | 5661 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, |
5471 Register scratch1, Register scratch2) { | 5662 Register scratch1, Register scratch2) { |
5472 DCHECK(value > 0); | 5663 DCHECK(value > 0); |
5473 if (FLAG_native_code_counters && counter->Enabled()) { | 5664 if (FLAG_native_code_counters && counter->Enabled()) { |
5474 li(scratch2, Operand(ExternalReference(counter))); | 5665 li(scratch2, Operand(ExternalReference(counter))); |
5475 lw(scratch1, MemOperand(scratch2)); | 5666 Lw(scratch1, MemOperand(scratch2)); |
5476 Subu(scratch1, scratch1, Operand(value)); | 5667 Subu(scratch1, scratch1, Operand(value)); |
5477 sw(scratch1, MemOperand(scratch2)); | 5668 Sw(scratch1, MemOperand(scratch2)); |
5478 } | 5669 } |
5479 } | 5670 } |
5480 | 5671 |
5481 | 5672 |
5482 // ----------------------------------------------------------------------------- | 5673 // ----------------------------------------------------------------------------- |
5483 // Debugging. | 5674 // Debugging. |
5484 | 5675 |
5485 void MacroAssembler::Assert(Condition cc, BailoutReason reason, | 5676 void MacroAssembler::Assert(Condition cc, BailoutReason reason, |
5486 Register rs, Operand rt) { | 5677 Register rs, Operand rt) { |
5487 if (emit_debug_code()) | 5678 if (emit_debug_code()) |
(...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5543 while (abort_instructions++ < kExpectedAbortInstructions) { | 5734 while (abort_instructions++ < kExpectedAbortInstructions) { |
5544 nop(); | 5735 nop(); |
5545 } | 5736 } |
5546 } | 5737 } |
5547 } | 5738 } |
5548 | 5739 |
5549 | 5740 |
5550 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { | 5741 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { |
5551 if (context_chain_length > 0) { | 5742 if (context_chain_length > 0) { |
5552 // Move up the chain of contexts to the context containing the slot. | 5743 // Move up the chain of contexts to the context containing the slot. |
5553 ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX))); | 5744 Ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
5554 for (int i = 1; i < context_chain_length; i++) { | 5745 for (int i = 1; i < context_chain_length; i++) { |
5555 ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); | 5746 Ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); |
5556 } | 5747 } |
5557 } else { | 5748 } else { |
5558 // Slot is in the current function context. Move it into the | 5749 // Slot is in the current function context. Move it into the |
5559 // destination register in case we store into it (the write barrier | 5750 // destination register in case we store into it (the write barrier |
5560 // cannot be allowed to destroy the context in esi). | 5751 // cannot be allowed to destroy the context in esi). |
5561 Move(dst, cp); | 5752 Move(dst, cp); |
5562 } | 5753 } |
5563 } | 5754 } |
5564 | 5755 |
5565 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) { | 5756 void MacroAssembler::LoadNativeContextSlot(int index, Register dst) { |
5566 ld(dst, NativeContextMemOperand()); | 5757 Ld(dst, NativeContextMemOperand()); |
5567 ld(dst, ContextMemOperand(dst, index)); | 5758 Ld(dst, ContextMemOperand(dst, index)); |
5568 } | 5759 } |
5569 | 5760 |
5570 | 5761 |
5571 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, | 5762 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, |
5572 Register map, | 5763 Register map, |
5573 Register scratch) { | 5764 Register scratch) { |
5574 // Load the initial map. The global functions all have initial maps. | 5765 // Load the initial map. The global functions all have initial maps. |
5575 ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); | 5766 Ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); |
5576 if (emit_debug_code()) { | 5767 if (emit_debug_code()) { |
5577 Label ok, fail; | 5768 Label ok, fail; |
5578 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); | 5769 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); |
5579 Branch(&ok); | 5770 Branch(&ok); |
5580 bind(&fail); | 5771 bind(&fail); |
5581 Abort(kGlobalFunctionsMustHaveInitialMap); | 5772 Abort(kGlobalFunctionsMustHaveInitialMap); |
5582 bind(&ok); | 5773 bind(&ok); |
5583 } | 5774 } |
5584 } | 5775 } |
5585 | 5776 |
(...skipping 23 matching lines...) Expand all Loading... |
5609 nop(); // Pad the empty space. | 5800 nop(); // Pad the empty space. |
5610 } else { | 5801 } else { |
5611 PushStandardFrame(a1); | 5802 PushStandardFrame(a1); |
5612 nop(Assembler::CODE_AGE_SEQUENCE_NOP); | 5803 nop(Assembler::CODE_AGE_SEQUENCE_NOP); |
5613 nop(Assembler::CODE_AGE_SEQUENCE_NOP); | 5804 nop(Assembler::CODE_AGE_SEQUENCE_NOP); |
5614 nop(Assembler::CODE_AGE_SEQUENCE_NOP); | 5805 nop(Assembler::CODE_AGE_SEQUENCE_NOP); |
5615 } | 5806 } |
5616 } | 5807 } |
5617 | 5808 |
5618 void MacroAssembler::EmitLoadFeedbackVector(Register vector) { | 5809 void MacroAssembler::EmitLoadFeedbackVector(Register vector) { |
5619 ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); | 5810 Ld(vector, MemOperand(fp, JavaScriptFrameConstants::kFunctionOffset)); |
5620 ld(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset)); | 5811 Ld(vector, FieldMemOperand(vector, JSFunction::kFeedbackVectorOffset)); |
5621 ld(vector, FieldMemOperand(vector, Cell::kValueOffset)); | 5812 Ld(vector, FieldMemOperand(vector, Cell::kValueOffset)); |
5622 } | 5813 } |
5623 | 5814 |
5624 | 5815 |
5625 void MacroAssembler::EnterFrame(StackFrame::Type type, | 5816 void MacroAssembler::EnterFrame(StackFrame::Type type, |
5626 bool load_constant_pool_pointer_reg) { | 5817 bool load_constant_pool_pointer_reg) { |
5627 // Out-of-line constant pool not implemented on mips64. | 5818 // Out-of-line constant pool not implemented on mips64. |
5628 UNREACHABLE(); | 5819 UNREACHABLE(); |
5629 } | 5820 } |
5630 | 5821 |
5631 | 5822 |
5632 void MacroAssembler::EnterFrame(StackFrame::Type type) { | 5823 void MacroAssembler::EnterFrame(StackFrame::Type type) { |
5633 int stack_offset, fp_offset; | 5824 int stack_offset, fp_offset; |
5634 if (type == StackFrame::INTERNAL) { | 5825 if (type == StackFrame::INTERNAL) { |
5635 stack_offset = -4 * kPointerSize; | 5826 stack_offset = -4 * kPointerSize; |
5636 fp_offset = 2 * kPointerSize; | 5827 fp_offset = 2 * kPointerSize; |
5637 } else { | 5828 } else { |
5638 stack_offset = -3 * kPointerSize; | 5829 stack_offset = -3 * kPointerSize; |
5639 fp_offset = 1 * kPointerSize; | 5830 fp_offset = 1 * kPointerSize; |
5640 } | 5831 } |
5641 daddiu(sp, sp, stack_offset); | 5832 daddiu(sp, sp, stack_offset); |
5642 stack_offset = -stack_offset - kPointerSize; | 5833 stack_offset = -stack_offset - kPointerSize; |
5643 sd(ra, MemOperand(sp, stack_offset)); | 5834 Sd(ra, MemOperand(sp, stack_offset)); |
5644 stack_offset -= kPointerSize; | 5835 stack_offset -= kPointerSize; |
5645 sd(fp, MemOperand(sp, stack_offset)); | 5836 Sd(fp, MemOperand(sp, stack_offset)); |
5646 stack_offset -= kPointerSize; | 5837 stack_offset -= kPointerSize; |
5647 li(t9, Operand(StackFrame::TypeToMarker(type))); | 5838 li(t9, Operand(StackFrame::TypeToMarker(type))); |
5648 sd(t9, MemOperand(sp, stack_offset)); | 5839 Sd(t9, MemOperand(sp, stack_offset)); |
5649 if (type == StackFrame::INTERNAL) { | 5840 if (type == StackFrame::INTERNAL) { |
5650 DCHECK_EQ(stack_offset, kPointerSize); | 5841 DCHECK_EQ(stack_offset, kPointerSize); |
5651 li(t9, Operand(CodeObject())); | 5842 li(t9, Operand(CodeObject())); |
5652 sd(t9, MemOperand(sp, 0)); | 5843 Sd(t9, MemOperand(sp, 0)); |
5653 } else { | 5844 } else { |
5654 DCHECK_EQ(stack_offset, 0); | 5845 DCHECK_EQ(stack_offset, 0); |
5655 } | 5846 } |
5656 // Adjust FP to point to saved FP. | 5847 // Adjust FP to point to saved FP. |
5657 Daddu(fp, sp, Operand(fp_offset)); | 5848 Daddu(fp, sp, Operand(fp_offset)); |
5658 } | 5849 } |
5659 | 5850 |
5660 | 5851 |
5661 void MacroAssembler::LeaveFrame(StackFrame::Type type) { | 5852 void MacroAssembler::LeaveFrame(StackFrame::Type type) { |
5662 daddiu(sp, fp, 2 * kPointerSize); | 5853 daddiu(sp, fp, 2 * kPointerSize); |
5663 ld(ra, MemOperand(fp, 1 * kPointerSize)); | 5854 Ld(ra, MemOperand(fp, 1 * kPointerSize)); |
5664 ld(fp, MemOperand(fp, 0 * kPointerSize)); | 5855 Ld(fp, MemOperand(fp, 0 * kPointerSize)); |
5665 } | 5856 } |
5666 | 5857 |
5667 void MacroAssembler::EnterBuiltinFrame(Register context, Register target, | 5858 void MacroAssembler::EnterBuiltinFrame(Register context, Register target, |
5668 Register argc) { | 5859 Register argc) { |
5669 Push(ra, fp); | 5860 Push(ra, fp); |
5670 Move(fp, sp); | 5861 Move(fp, sp); |
5671 Push(context, target, argc); | 5862 Push(context, target, argc); |
5672 } | 5863 } |
5673 | 5864 |
5674 void MacroAssembler::LeaveBuiltinFrame(Register context, Register target, | 5865 void MacroAssembler::LeaveBuiltinFrame(Register context, Register target, |
(...skipping 17 matching lines...) Expand all Loading... |
5692 // [fp + 1 (==kCallerPCOffset)] - saved old ra | 5883 // [fp + 1 (==kCallerPCOffset)] - saved old ra |
5693 // [fp + 0 (==kCallerFPOffset)] - saved old fp | 5884 // [fp + 0 (==kCallerFPOffset)] - saved old fp |
5694 // [fp - 1 StackFrame::EXIT Smi | 5885 // [fp - 1 StackFrame::EXIT Smi |
5695 // [fp - 2 (==kSPOffset)] - sp of the called function | 5886 // [fp - 2 (==kSPOffset)] - sp of the called function |
5696 // [fp - 3 (==kCodeOffset)] - CodeObject | 5887 // [fp - 3 (==kCodeOffset)] - CodeObject |
5697 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the | 5888 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the |
5698 // new stack (will contain saved ra) | 5889 // new stack (will contain saved ra) |
5699 | 5890 |
5700 // Save registers and reserve room for saved entry sp and code object. | 5891 // Save registers and reserve room for saved entry sp and code object. |
5701 daddiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp); | 5892 daddiu(sp, sp, -2 * kPointerSize - ExitFrameConstants::kFixedFrameSizeFromFp); |
5702 sd(ra, MemOperand(sp, 4 * kPointerSize)); | 5893 Sd(ra, MemOperand(sp, 4 * kPointerSize)); |
5703 sd(fp, MemOperand(sp, 3 * kPointerSize)); | 5894 Sd(fp, MemOperand(sp, 3 * kPointerSize)); |
5704 li(at, Operand(StackFrame::TypeToMarker(frame_type))); | 5895 li(at, Operand(StackFrame::TypeToMarker(frame_type))); |
5705 sd(at, MemOperand(sp, 2 * kPointerSize)); | 5896 Sd(at, MemOperand(sp, 2 * kPointerSize)); |
5706 // Set up new frame pointer. | 5897 // Set up new frame pointer. |
5707 daddiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp); | 5898 daddiu(fp, sp, ExitFrameConstants::kFixedFrameSizeFromFp); |
5708 | 5899 |
5709 if (emit_debug_code()) { | 5900 if (emit_debug_code()) { |
5710 sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset)); | 5901 Sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
5711 } | 5902 } |
5712 | 5903 |
5713 // Accessed from ExitFrame::code_slot. | 5904 // Accessed from ExitFrame::code_slot. |
5714 li(t8, Operand(CodeObject()), CONSTANT_SIZE); | 5905 li(t8, Operand(CodeObject()), CONSTANT_SIZE); |
5715 sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset)); | 5906 Sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset)); |
5716 | 5907 |
5717 // Save the frame pointer and the context in top. | 5908 // Save the frame pointer and the context in top. |
5718 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | 5909 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); |
5719 sd(fp, MemOperand(t8)); | 5910 Sd(fp, MemOperand(t8)); |
5720 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); | 5911 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); |
5721 sd(cp, MemOperand(t8)); | 5912 Sd(cp, MemOperand(t8)); |
5722 | 5913 |
5723 const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); | 5914 const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); |
5724 if (save_doubles) { | 5915 if (save_doubles) { |
5725 // The stack is already aligned to 0 modulo 8 for stores with sdc1. | 5916 // The stack is already aligned to 0 modulo 8 for stores with sdc1. |
5726 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2; | 5917 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2; |
5727 int space = kNumOfSavedRegisters * kDoubleSize; | 5918 int space = kNumOfSavedRegisters * kDoubleSize; |
5728 Dsubu(sp, sp, Operand(space)); | 5919 Dsubu(sp, sp, Operand(space)); |
5729 // Remember: we only need to save every 2nd double FPU value. | 5920 // Remember: we only need to save every 2nd double FPU value. |
5730 for (int i = 0; i < kNumOfSavedRegisters; i++) { | 5921 for (int i = 0; i < kNumOfSavedRegisters; i++) { |
5731 FPURegister reg = FPURegister::from_code(2 * i); | 5922 FPURegister reg = FPURegister::from_code(2 * i); |
5732 sdc1(reg, MemOperand(sp, i * kDoubleSize)); | 5923 Sdc1(reg, MemOperand(sp, i * kDoubleSize)); |
5733 } | 5924 } |
5734 } | 5925 } |
5735 | 5926 |
5736 // Reserve place for the return address, stack space and an optional slot | 5927 // Reserve place for the return address, stack space and an optional slot |
5737 // (used by the DirectCEntryStub to hold the return value if a struct is | 5928 // (used by the DirectCEntryStub to hold the return value if a struct is |
5738 // returned) and align the frame preparing for calling the runtime function. | 5929 // returned) and align the frame preparing for calling the runtime function. |
5739 DCHECK(stack_space >= 0); | 5930 DCHECK(stack_space >= 0); |
5740 Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize)); | 5931 Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize)); |
5741 if (frame_alignment > 0) { | 5932 if (frame_alignment > 0) { |
5742 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); | 5933 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); |
5743 And(sp, sp, Operand(-frame_alignment)); // Align stack. | 5934 And(sp, sp, Operand(-frame_alignment)); // Align stack. |
5744 } | 5935 } |
5745 | 5936 |
5746 // Set the exit frame sp value to point just before the return address | 5937 // Set the exit frame sp value to point just before the return address |
5747 // location. | 5938 // location. |
5748 daddiu(at, sp, kPointerSize); | 5939 daddiu(at, sp, kPointerSize); |
5749 sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset)); | 5940 Sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset)); |
5750 } | 5941 } |
5751 | 5942 |
5752 | 5943 |
5753 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, | 5944 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, |
5754 bool restore_context, bool do_return, | 5945 bool restore_context, bool do_return, |
5755 bool argument_count_is_length) { | 5946 bool argument_count_is_length) { |
5756 // Optionally restore all double registers. | 5947 // Optionally restore all double registers. |
5757 if (save_doubles) { | 5948 if (save_doubles) { |
5758 // Remember: we only need to restore every 2nd double FPU value. | 5949 // Remember: we only need to restore every 2nd double FPU value. |
5759 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2; | 5950 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2; |
5760 Dsubu(t8, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp + | 5951 Dsubu(t8, fp, Operand(ExitFrameConstants::kFixedFrameSizeFromFp + |
5761 kNumOfSavedRegisters * kDoubleSize)); | 5952 kNumOfSavedRegisters * kDoubleSize)); |
5762 for (int i = 0; i < kNumOfSavedRegisters; i++) { | 5953 for (int i = 0; i < kNumOfSavedRegisters; i++) { |
5763 FPURegister reg = FPURegister::from_code(2 * i); | 5954 FPURegister reg = FPURegister::from_code(2 * i); |
5764 ldc1(reg, MemOperand(t8, i * kDoubleSize)); | 5955 Ldc1(reg, MemOperand(t8, i * kDoubleSize)); |
5765 } | 5956 } |
5766 } | 5957 } |
5767 | 5958 |
5768 // Clear top frame. | 5959 // Clear top frame. |
5769 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); | 5960 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); |
5770 sd(zero_reg, MemOperand(t8)); | 5961 Sd(zero_reg, MemOperand(t8)); |
5771 | 5962 |
5772 // Restore current context from top and clear it in debug mode. | 5963 // Restore current context from top and clear it in debug mode. |
5773 if (restore_context) { | 5964 if (restore_context) { |
5774 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); | 5965 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); |
5775 ld(cp, MemOperand(t8)); | 5966 Ld(cp, MemOperand(t8)); |
5776 } | 5967 } |
5777 #ifdef DEBUG | 5968 #ifdef DEBUG |
5778 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); | 5969 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); |
5779 sd(a3, MemOperand(t8)); | 5970 Sd(a3, MemOperand(t8)); |
5780 #endif | 5971 #endif |
5781 | 5972 |
5782 // Pop the arguments, restore registers, and return. | 5973 // Pop the arguments, restore registers, and return. |
5783 mov(sp, fp); // Respect ABI stack constraint. | 5974 mov(sp, fp); // Respect ABI stack constraint. |
5784 ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset)); | 5975 Ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset)); |
5785 ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset)); | 5976 Ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset)); |
5786 | 5977 |
5787 if (argument_count.is_valid()) { | 5978 if (argument_count.is_valid()) { |
5788 if (argument_count_is_length) { | 5979 if (argument_count_is_length) { |
5789 daddu(sp, sp, argument_count); | 5980 daddu(sp, sp, argument_count); |
5790 } else { | 5981 } else { |
5791 Dlsa(sp, sp, argument_count, kPointerSizeLog2, t8); | 5982 Dlsa(sp, sp, argument_count, kPointerSizeLog2, t8); |
5792 } | 5983 } |
5793 } | 5984 } |
5794 | 5985 |
5795 if (do_return) { | 5986 if (do_return) { |
(...skipping 69 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5865 DCHECK(!dst.is(overflow)); | 6056 DCHECK(!dst.is(overflow)); |
5866 DCHECK(!src.is(overflow)); | 6057 DCHECK(!src.is(overflow)); |
5867 SmiTag(dst, src); | 6058 SmiTag(dst, src); |
5868 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0. | 6059 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0. |
5869 } | 6060 } |
5870 } | 6061 } |
5871 | 6062 |
5872 | 6063 |
5873 void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) { | 6064 void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) { |
5874 if (SmiValuesAre32Bits()) { | 6065 if (SmiValuesAre32Bits()) { |
5875 lw(dst, UntagSmiMemOperand(src.rm(), src.offset())); | 6066 Lw(dst, UntagSmiMemOperand(src.rm(), src.offset())); |
5876 } else { | 6067 } else { |
5877 lw(dst, src); | 6068 Lw(dst, src); |
5878 SmiUntag(dst); | 6069 SmiUntag(dst); |
5879 } | 6070 } |
5880 } | 6071 } |
5881 | 6072 |
5882 | 6073 |
5883 void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) { | 6074 void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) { |
5884 if (SmiValuesAre32Bits()) { | 6075 if (SmiValuesAre32Bits()) { |
5885 // TODO(plind): not clear if lw or ld faster here, need micro-benchmark. | 6076 // TODO(plind): not clear if lw or ld faster here, need micro-benchmark. |
5886 lw(dst, UntagSmiMemOperand(src.rm(), src.offset())); | 6077 Lw(dst, UntagSmiMemOperand(src.rm(), src.offset())); |
5887 dsll(dst, dst, scale); | 6078 dsll(dst, dst, scale); |
5888 } else { | 6079 } else { |
5889 lw(dst, src); | 6080 Lw(dst, src); |
5890 DCHECK(scale >= kSmiTagSize); | 6081 DCHECK(scale >= kSmiTagSize); |
5891 sll(dst, dst, scale - kSmiTagSize); | 6082 sll(dst, dst, scale - kSmiTagSize); |
5892 } | 6083 } |
5893 } | 6084 } |
5894 | 6085 |
5895 | 6086 |
5896 // Returns 2 values: the Smi and a scaled version of the int within the Smi. | 6087 // Returns 2 values: the Smi and a scaled version of the int within the Smi. |
5897 void MacroAssembler::SmiLoadWithScale(Register d_smi, | 6088 void MacroAssembler::SmiLoadWithScale(Register d_smi, |
5898 Register d_scaled, | 6089 Register d_scaled, |
5899 MemOperand src, | 6090 MemOperand src, |
5900 int scale) { | 6091 int scale) { |
5901 if (SmiValuesAre32Bits()) { | 6092 if (SmiValuesAre32Bits()) { |
5902 ld(d_smi, src); | 6093 Ld(d_smi, src); |
5903 dsra(d_scaled, d_smi, kSmiShift - scale); | 6094 dsra(d_scaled, d_smi, kSmiShift - scale); |
5904 } else { | 6095 } else { |
5905 lw(d_smi, src); | 6096 Lw(d_smi, src); |
5906 DCHECK(scale >= kSmiTagSize); | 6097 DCHECK(scale >= kSmiTagSize); |
5907 sll(d_scaled, d_smi, scale - kSmiTagSize); | 6098 sll(d_scaled, d_smi, scale - kSmiTagSize); |
5908 } | 6099 } |
5909 } | 6100 } |
5910 | 6101 |
5911 | 6102 |
5912 // Returns 2 values: the untagged Smi (int32) and scaled version of that int. | 6103 // Returns 2 values: the untagged Smi (int32) and scaled version of that int. |
5913 void MacroAssembler::SmiLoadUntagWithScale(Register d_int, | 6104 void MacroAssembler::SmiLoadUntagWithScale(Register d_int, |
5914 Register d_scaled, | 6105 Register d_scaled, |
5915 MemOperand src, | 6106 MemOperand src, |
5916 int scale) { | 6107 int scale) { |
5917 if (SmiValuesAre32Bits()) { | 6108 if (SmiValuesAre32Bits()) { |
5918 lw(d_int, UntagSmiMemOperand(src.rm(), src.offset())); | 6109 Lw(d_int, UntagSmiMemOperand(src.rm(), src.offset())); |
5919 dsll(d_scaled, d_int, scale); | 6110 dsll(d_scaled, d_int, scale); |
5920 } else { | 6111 } else { |
5921 lw(d_int, src); | 6112 Lw(d_int, src); |
5922 // Need both the int and the scaled in, so use two instructions. | 6113 // Need both the int and the scaled in, so use two instructions. |
5923 SmiUntag(d_int); | 6114 SmiUntag(d_int); |
5924 sll(d_scaled, d_int, scale); | 6115 sll(d_scaled, d_int, scale); |
5925 } | 6116 } |
5926 } | 6117 } |
5927 | 6118 |
5928 | 6119 |
5929 void MacroAssembler::UntagAndJumpIfSmi(Register dst, | 6120 void MacroAssembler::UntagAndJumpIfSmi(Register dst, |
5930 Register src, | 6121 Register src, |
5931 Label* smi_case) { | 6122 Label* smi_case) { |
(...skipping 118 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6050 bind(&done); | 6241 bind(&done); |
6051 } | 6242 } |
6052 | 6243 |
6053 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, | 6244 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, |
6054 Register scratch) { | 6245 Register scratch) { |
6055 if (emit_debug_code()) { | 6246 if (emit_debug_code()) { |
6056 Label done_checking; | 6247 Label done_checking; |
6057 AssertNotSmi(object); | 6248 AssertNotSmi(object); |
6058 LoadRoot(scratch, Heap::kUndefinedValueRootIndex); | 6249 LoadRoot(scratch, Heap::kUndefinedValueRootIndex); |
6059 Branch(&done_checking, eq, object, Operand(scratch)); | 6250 Branch(&done_checking, eq, object, Operand(scratch)); |
6060 ld(t8, FieldMemOperand(object, HeapObject::kMapOffset)); | 6251 Ld(t8, FieldMemOperand(object, HeapObject::kMapOffset)); |
6061 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex); | 6252 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex); |
6062 Assert(eq, kExpectedUndefinedOrCell, t8, Operand(scratch)); | 6253 Assert(eq, kExpectedUndefinedOrCell, t8, Operand(scratch)); |
6063 bind(&done_checking); | 6254 bind(&done_checking); |
6064 } | 6255 } |
6065 } | 6256 } |
6066 | 6257 |
6067 | 6258 |
6068 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) { | 6259 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) { |
6069 if (emit_debug_code()) { | 6260 if (emit_debug_code()) { |
6070 DCHECK(!reg.is(at)); | 6261 DCHECK(!reg.is(at)); |
6071 LoadRoot(at, index); | 6262 LoadRoot(at, index); |
6072 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at)); | 6263 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at)); |
6073 } | 6264 } |
6074 } | 6265 } |
6075 | 6266 |
6076 | 6267 |
6077 void MacroAssembler::JumpIfNotHeapNumber(Register object, | 6268 void MacroAssembler::JumpIfNotHeapNumber(Register object, |
6078 Register heap_number_map, | 6269 Register heap_number_map, |
6079 Register scratch, | 6270 Register scratch, |
6080 Label* on_not_heap_number) { | 6271 Label* on_not_heap_number) { |
6081 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); | 6272 Ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); |
6082 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 6273 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
6083 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map)); | 6274 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map)); |
6084 } | 6275 } |
6085 | 6276 |
6086 | 6277 |
6087 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings( | 6278 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings( |
6088 Register first, Register second, Register scratch1, Register scratch2, | 6279 Register first, Register second, Register scratch1, Register scratch2, |
6089 Label* failure) { | 6280 Label* failure) { |
6090 // Test that both first and second are sequential one-byte strings. | 6281 // Test that both first and second are sequential one-byte strings. |
6091 // Assume that they are non-smis. | 6282 // Assume that they are non-smis. |
6092 ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); | 6283 Ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); |
6093 ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); | 6284 Ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); |
6094 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); | 6285 Lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); |
6095 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); | 6286 Lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); |
6096 | 6287 |
6097 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1, | 6288 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1, |
6098 scratch2, failure); | 6289 scratch2, failure); |
6099 } | 6290 } |
6100 | 6291 |
6101 | 6292 |
6102 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first, | 6293 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first, |
6103 Register second, | 6294 Register second, |
6104 Register scratch1, | 6295 Register scratch1, |
6105 Register scratch2, | 6296 Register scratch2, |
(...skipping 213 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6319 | 6510 |
6320 void MacroAssembler::EmitSeqStringSetCharCheck(Register string, | 6511 void MacroAssembler::EmitSeqStringSetCharCheck(Register string, |
6321 Register index, | 6512 Register index, |
6322 Register value, | 6513 Register value, |
6323 Register scratch, | 6514 Register scratch, |
6324 uint32_t encoding_mask) { | 6515 uint32_t encoding_mask) { |
6325 Label is_object; | 6516 Label is_object; |
6326 SmiTst(string, at); | 6517 SmiTst(string, at); |
6327 Check(ne, kNonObject, at, Operand(zero_reg)); | 6518 Check(ne, kNonObject, at, Operand(zero_reg)); |
6328 | 6519 |
6329 ld(at, FieldMemOperand(string, HeapObject::kMapOffset)); | 6520 Ld(at, FieldMemOperand(string, HeapObject::kMapOffset)); |
6330 lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset)); | 6521 Lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset)); |
6331 | 6522 |
6332 andi(at, at, kStringRepresentationMask | kStringEncodingMask); | 6523 andi(at, at, kStringRepresentationMask | kStringEncodingMask); |
6333 li(scratch, Operand(encoding_mask)); | 6524 li(scratch, Operand(encoding_mask)); |
6334 Check(eq, kUnexpectedStringType, at, Operand(scratch)); | 6525 Check(eq, kUnexpectedStringType, at, Operand(scratch)); |
6335 | 6526 |
6336 // TODO(plind): requires Smi size check code for mips32. | 6527 // TODO(plind): requires Smi size check code for mips32. |
6337 | 6528 |
6338 ld(at, FieldMemOperand(string, String::kLengthOffset)); | 6529 Ld(at, FieldMemOperand(string, String::kLengthOffset)); |
6339 Check(lt, kIndexIsTooLarge, index, Operand(at)); | 6530 Check(lt, kIndexIsTooLarge, index, Operand(at)); |
6340 | 6531 |
6341 DCHECK(Smi::kZero == 0); | 6532 DCHECK(Smi::kZero == 0); |
6342 Check(ge, kIndexIsNegative, index, Operand(zero_reg)); | 6533 Check(ge, kIndexIsNegative, index, Operand(zero_reg)); |
6343 } | 6534 } |
6344 | 6535 |
6345 | 6536 |
6346 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, | 6537 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, |
6347 int num_double_arguments, | 6538 int num_double_arguments, |
6348 Register scratch) { | 6539 Register scratch) { |
6349 int frame_alignment = ActivationFrameAlignment(); | 6540 int frame_alignment = ActivationFrameAlignment(); |
6350 | 6541 |
6351 // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots. | 6542 // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots. |
6352 // O32: Up to four simple arguments are passed in registers a0..a3. | 6543 // O32: Up to four simple arguments are passed in registers a0..a3. |
6353 // Those four arguments must have reserved argument slots on the stack for | 6544 // Those four arguments must have reserved argument slots on the stack for |
6354 // mips, even though those argument slots are not normally used. | 6545 // mips, even though those argument slots are not normally used. |
6355 // Both ABIs: Remaining arguments are pushed on the stack, above (higher | 6546 // Both ABIs: Remaining arguments are pushed on the stack, above (higher |
6356 // address than) the (O32) argument slots. (arg slot calculation handled by | 6547 // address than) the (O32) argument slots. (arg slot calculation handled by |
6357 // CalculateStackPassedWords()). | 6548 // CalculateStackPassedWords()). |
6358 int stack_passed_arguments = CalculateStackPassedWords( | 6549 int stack_passed_arguments = CalculateStackPassedWords( |
6359 num_reg_arguments, num_double_arguments); | 6550 num_reg_arguments, num_double_arguments); |
6360 if (frame_alignment > kPointerSize) { | 6551 if (frame_alignment > kPointerSize) { |
6361 // Make stack end at alignment and make room for num_arguments - 4 words | 6552 // Make stack end at alignment and make room for num_arguments - 4 words |
6362 // and the original value of sp. | 6553 // and the original value of sp. |
6363 mov(scratch, sp); | 6554 mov(scratch, sp); |
6364 Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); | 6555 Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); |
6365 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); | 6556 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment)); |
6366 And(sp, sp, Operand(-frame_alignment)); | 6557 And(sp, sp, Operand(-frame_alignment)); |
6367 sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); | 6558 Sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); |
6368 } else { | 6559 } else { |
6369 Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); | 6560 Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); |
6370 } | 6561 } |
6371 } | 6562 } |
6372 | 6563 |
6373 | 6564 |
6374 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, | 6565 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, |
6375 Register scratch) { | 6566 Register scratch) { |
6376 PrepareCallCFunction(num_reg_arguments, 0, scratch); | 6567 PrepareCallCFunction(num_reg_arguments, 0, scratch); |
6377 } | 6568 } |
(...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6439 mov(t9, function); | 6630 mov(t9, function); |
6440 function = t9; | 6631 function = t9; |
6441 } | 6632 } |
6442 | 6633 |
6443 Call(function); | 6634 Call(function); |
6444 | 6635 |
6445 int stack_passed_arguments = CalculateStackPassedWords( | 6636 int stack_passed_arguments = CalculateStackPassedWords( |
6446 num_reg_arguments, num_double_arguments); | 6637 num_reg_arguments, num_double_arguments); |
6447 | 6638 |
6448 if (base::OS::ActivationFrameAlignment() > kPointerSize) { | 6639 if (base::OS::ActivationFrameAlignment() > kPointerSize) { |
6449 ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); | 6640 Ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); |
6450 } else { | 6641 } else { |
6451 Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); | 6642 Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); |
6452 } | 6643 } |
6453 } | 6644 } |
6454 | 6645 |
6455 | 6646 |
6456 #undef BRANCH_ARGS_CHECK | 6647 #undef BRANCH_ARGS_CHECK |
6457 | 6648 |
6458 | 6649 |
6459 void MacroAssembler::CheckPageFlag( | 6650 void MacroAssembler::CheckPageFlag( |
6460 Register object, | 6651 Register object, |
6461 Register scratch, | 6652 Register scratch, |
6462 int mask, | 6653 int mask, |
6463 Condition cc, | 6654 Condition cc, |
6464 Label* condition_met) { | 6655 Label* condition_met) { |
6465 And(scratch, object, Operand(~Page::kPageAlignmentMask)); | 6656 And(scratch, object, Operand(~Page::kPageAlignmentMask)); |
6466 ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); | 6657 Ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); |
6467 And(scratch, scratch, Operand(mask)); | 6658 And(scratch, scratch, Operand(mask)); |
6468 Branch(condition_met, cc, scratch, Operand(zero_reg)); | 6659 Branch(condition_met, cc, scratch, Operand(zero_reg)); |
6469 } | 6660 } |
6470 | 6661 |
6471 | 6662 |
6472 void MacroAssembler::JumpIfBlack(Register object, | 6663 void MacroAssembler::JumpIfBlack(Register object, |
6473 Register scratch0, | 6664 Register scratch0, |
6474 Register scratch1, | 6665 Register scratch1, |
6475 Label* on_black) { | 6666 Label* on_black) { |
6476 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern. | 6667 HasColor(object, scratch0, scratch1, on_black, 1, 1); // kBlackBitPattern. |
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6533 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0); | 6724 DCHECK(strcmp(Marking::kGreyBitPattern, "10") == 0); |
6534 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); | 6725 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0); |
6535 | 6726 |
6536 // Since both black and grey have a 1 in the first position and white does | 6727 // Since both black and grey have a 1 in the first position and white does |
6537 // not have a 1 there we only need to check one bit. | 6728 // not have a 1 there we only need to check one bit. |
6538 // Note that we are using a 4-byte aligned 8-byte load. | 6729 // Note that we are using a 4-byte aligned 8-byte load. |
6539 if (emit_debug_code()) { | 6730 if (emit_debug_code()) { |
6540 LoadWordPair(load_scratch, | 6731 LoadWordPair(load_scratch, |
6541 MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 6732 MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
6542 } else { | 6733 } else { |
6543 lwu(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); | 6734 Lwu(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); |
6544 } | 6735 } |
6545 And(t8, mask_scratch, load_scratch); | 6736 And(t8, mask_scratch, load_scratch); |
6546 Branch(value_is_white, eq, t8, Operand(zero_reg)); | 6737 Branch(value_is_white, eq, t8, Operand(zero_reg)); |
6547 } | 6738 } |
6548 | 6739 |
6549 | 6740 |
6550 void MacroAssembler::LoadInstanceDescriptors(Register map, | 6741 void MacroAssembler::LoadInstanceDescriptors(Register map, |
6551 Register descriptors) { | 6742 Register descriptors) { |
6552 ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); | 6743 Ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); |
6553 } | 6744 } |
6554 | 6745 |
6555 | 6746 |
6556 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { | 6747 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { |
6557 lwu(dst, FieldMemOperand(map, Map::kBitField3Offset)); | 6748 Lwu(dst, FieldMemOperand(map, Map::kBitField3Offset)); |
6558 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst); | 6749 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst); |
6559 } | 6750 } |
6560 | 6751 |
6561 | 6752 |
6562 void MacroAssembler::EnumLength(Register dst, Register map) { | 6753 void MacroAssembler::EnumLength(Register dst, Register map) { |
6563 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); | 6754 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); |
6564 lwu(dst, FieldMemOperand(map, Map::kBitField3Offset)); | 6755 Lwu(dst, FieldMemOperand(map, Map::kBitField3Offset)); |
6565 And(dst, dst, Operand(Map::EnumLengthBits::kMask)); | 6756 And(dst, dst, Operand(Map::EnumLengthBits::kMask)); |
6566 SmiTag(dst); | 6757 SmiTag(dst); |
6567 } | 6758 } |
6568 | 6759 |
6569 | 6760 |
6570 void MacroAssembler::LoadAccessor(Register dst, Register holder, | 6761 void MacroAssembler::LoadAccessor(Register dst, Register holder, |
6571 int accessor_index, | 6762 int accessor_index, |
6572 AccessorComponent accessor) { | 6763 AccessorComponent accessor) { |
6573 ld(dst, FieldMemOperand(holder, HeapObject::kMapOffset)); | 6764 Ld(dst, FieldMemOperand(holder, HeapObject::kMapOffset)); |
6574 LoadInstanceDescriptors(dst, dst); | 6765 LoadInstanceDescriptors(dst, dst); |
6575 ld(dst, | 6766 Ld(dst, |
6576 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index))); | 6767 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index))); |
6577 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset | 6768 int offset = accessor == ACCESSOR_GETTER ? AccessorPair::kGetterOffset |
6578 : AccessorPair::kSetterOffset; | 6769 : AccessorPair::kSetterOffset; |
6579 ld(dst, FieldMemOperand(dst, offset)); | 6770 Ld(dst, FieldMemOperand(dst, offset)); |
6580 } | 6771 } |
6581 | 6772 |
6582 | 6773 |
6583 void MacroAssembler::CheckEnumCache(Label* call_runtime) { | 6774 void MacroAssembler::CheckEnumCache(Label* call_runtime) { |
6584 Register null_value = a5; | 6775 Register null_value = a5; |
6585 Register empty_fixed_array_value = a6; | 6776 Register empty_fixed_array_value = a6; |
6586 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); | 6777 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); |
6587 Label next, start; | 6778 Label next, start; |
6588 mov(a2, a0); | 6779 mov(a2, a0); |
6589 | 6780 |
6590 // Check if the enum length field is properly initialized, indicating that | 6781 // Check if the enum length field is properly initialized, indicating that |
6591 // there is an enum cache. | 6782 // there is an enum cache. |
6592 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset)); | 6783 Ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset)); |
6593 | 6784 |
6594 EnumLength(a3, a1); | 6785 EnumLength(a3, a1); |
6595 Branch( | 6786 Branch( |
6596 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel))); | 6787 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel))); |
6597 | 6788 |
6598 LoadRoot(null_value, Heap::kNullValueRootIndex); | 6789 LoadRoot(null_value, Heap::kNullValueRootIndex); |
6599 jmp(&start); | 6790 jmp(&start); |
6600 | 6791 |
6601 bind(&next); | 6792 bind(&next); |
6602 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset)); | 6793 Ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset)); |
6603 | 6794 |
6604 // For all objects but the receiver, check that the cache is empty. | 6795 // For all objects but the receiver, check that the cache is empty. |
6605 EnumLength(a3, a1); | 6796 EnumLength(a3, a1); |
6606 Branch(call_runtime, ne, a3, Operand(Smi::kZero)); | 6797 Branch(call_runtime, ne, a3, Operand(Smi::kZero)); |
6607 | 6798 |
6608 bind(&start); | 6799 bind(&start); |
6609 | 6800 |
6610 // Check that there are no elements. Register a2 contains the current JS | 6801 // Check that there are no elements. Register a2 contains the current JS |
6611 // object we've reached through the prototype chain. | 6802 // object we've reached through the prototype chain. |
6612 Label no_elements; | 6803 Label no_elements; |
6613 ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset)); | 6804 Ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset)); |
6614 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value)); | 6805 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value)); |
6615 | 6806 |
6616 // Second chance, the object may be using the empty slow element dictionary. | 6807 // Second chance, the object may be using the empty slow element dictionary. |
6617 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex); | 6808 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex); |
6618 Branch(call_runtime, ne, a2, Operand(at)); | 6809 Branch(call_runtime, ne, a2, Operand(at)); |
6619 | 6810 |
6620 bind(&no_elements); | 6811 bind(&no_elements); |
6621 ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset)); | 6812 Ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset)); |
6622 Branch(&next, ne, a2, Operand(null_value)); | 6813 Branch(&next, ne, a2, Operand(null_value)); |
6623 } | 6814 } |
6624 | 6815 |
6625 | 6816 |
6626 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { | 6817 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { |
6627 DCHECK(!output_reg.is(input_reg)); | 6818 DCHECK(!output_reg.is(input_reg)); |
6628 Label done; | 6819 Label done; |
6629 li(output_reg, Operand(255)); | 6820 li(output_reg, Operand(255)); |
6630 // Normal branch: nop in delay slot. | 6821 // Normal branch: nop in delay slot. |
6631 Branch(&done, gt, input_reg, Operand(output_reg)); | 6822 Branch(&done, gt, input_reg, Operand(output_reg)); |
(...skipping 43 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6675 const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag; | 6866 const int kMementoMapOffset = JSArray::kSize - kHeapObjectTag; |
6676 const int kMementoLastWordOffset = | 6867 const int kMementoLastWordOffset = |
6677 kMementoMapOffset + AllocationMemento::kSize - kPointerSize; | 6868 kMementoMapOffset + AllocationMemento::kSize - kPointerSize; |
6678 | 6869 |
6679 // Bail out if the object is not in new space. | 6870 // Bail out if the object is not in new space. |
6680 JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found); | 6871 JumpIfNotInNewSpace(receiver_reg, scratch_reg, no_memento_found); |
6681 // If the object is in new space, we need to check whether it is on the same | 6872 // If the object is in new space, we need to check whether it is on the same |
6682 // page as the current top. | 6873 // page as the current top. |
6683 Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset)); | 6874 Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset)); |
6684 li(at, Operand(new_space_allocation_top_adr)); | 6875 li(at, Operand(new_space_allocation_top_adr)); |
6685 ld(at, MemOperand(at)); | 6876 Ld(at, MemOperand(at)); |
6686 Xor(scratch_reg, scratch_reg, Operand(at)); | 6877 Xor(scratch_reg, scratch_reg, Operand(at)); |
6687 And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask)); | 6878 And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask)); |
6688 Branch(&top_check, eq, scratch_reg, Operand(zero_reg)); | 6879 Branch(&top_check, eq, scratch_reg, Operand(zero_reg)); |
6689 // The object is on a different page than allocation top. Bail out if the | 6880 // The object is on a different page than allocation top. Bail out if the |
6690 // object sits on the page boundary as no memento can follow and we cannot | 6881 // object sits on the page boundary as no memento can follow and we cannot |
6691 // touch the memory following it. | 6882 // touch the memory following it. |
6692 Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset)); | 6883 Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset)); |
6693 Xor(scratch_reg, scratch_reg, Operand(receiver_reg)); | 6884 Xor(scratch_reg, scratch_reg, Operand(receiver_reg)); |
6694 And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask)); | 6885 And(scratch_reg, scratch_reg, Operand(~Page::kPageAlignmentMask)); |
6695 Branch(no_memento_found, ne, scratch_reg, Operand(zero_reg)); | 6886 Branch(no_memento_found, ne, scratch_reg, Operand(zero_reg)); |
6696 // Continue with the actual map check. | 6887 // Continue with the actual map check. |
6697 jmp(&map_check); | 6888 jmp(&map_check); |
6698 // If top is on the same page as the current object, we need to check whether | 6889 // If top is on the same page as the current object, we need to check whether |
6699 // we are below top. | 6890 // we are below top. |
6700 bind(&top_check); | 6891 bind(&top_check); |
6701 Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset)); | 6892 Daddu(scratch_reg, receiver_reg, Operand(kMementoLastWordOffset)); |
6702 li(at, Operand(new_space_allocation_top_adr)); | 6893 li(at, Operand(new_space_allocation_top_adr)); |
6703 ld(at, MemOperand(at)); | 6894 Ld(at, MemOperand(at)); |
6704 Branch(no_memento_found, ge, scratch_reg, Operand(at)); | 6895 Branch(no_memento_found, ge, scratch_reg, Operand(at)); |
6705 // Memento map check. | 6896 // Memento map check. |
6706 bind(&map_check); | 6897 bind(&map_check); |
6707 ld(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset)); | 6898 Ld(scratch_reg, MemOperand(receiver_reg, kMementoMapOffset)); |
6708 Branch(no_memento_found, ne, scratch_reg, | 6899 Branch(no_memento_found, ne, scratch_reg, |
6709 Operand(isolate()->factory()->allocation_memento_map())); | 6900 Operand(isolate()->factory()->allocation_memento_map())); |
6710 } | 6901 } |
6711 | 6902 |
6712 | 6903 |
6713 Register GetRegisterThatIsNotOneOf(Register reg1, | 6904 Register GetRegisterThatIsNotOneOf(Register reg1, |
6714 Register reg2, | 6905 Register reg2, |
6715 Register reg3, | 6906 Register reg3, |
6716 Register reg4, | 6907 Register reg4, |
6717 Register reg5, | 6908 Register reg5, |
(...skipping 103 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
6821 if (mag.shift > 0) sra(result, result, mag.shift); | 7012 if (mag.shift > 0) sra(result, result, mag.shift); |
6822 srl(at, dividend, 31); | 7013 srl(at, dividend, 31); |
6823 Addu(result, result, Operand(at)); | 7014 Addu(result, result, Operand(at)); |
6824 } | 7015 } |
6825 | 7016 |
6826 | 7017 |
6827 } // namespace internal | 7018 } // namespace internal |
6828 } // namespace v8 | 7019 } // namespace v8 |
6829 | 7020 |
6830 #endif // V8_TARGET_ARCH_MIPS64 | 7021 #endif // V8_TARGET_ARCH_MIPS64 |
OLD | NEW |