Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(258)

Side by Side Diff: src/mips64/macro-assembler-mips64.cc

Issue 371923006: Add mips64 port. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Rebase Created 6 years, 5 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/mips64/macro-assembler-mips64.h ('k') | src/mips64/regexp-macro-assembler-mips64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2012 the V8 project authors. All rights reserved. 1 // Copyright 2012 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <limits.h> // For LONG_MIN, LONG_MAX. 5 #include <limits.h> // For LONG_MIN, LONG_MAX.
6 6
7 #include "src/v8.h" 7 #include "src/v8.h"
8 8
9 #if V8_TARGET_ARCH_MIPS 9 #if V8_TARGET_ARCH_MIPS64
10 10
11 #include "src/bootstrapper.h" 11 #include "src/bootstrapper.h"
12 #include "src/codegen.h" 12 #include "src/codegen.h"
13 #include "src/cpu-profiler.h" 13 #include "src/cpu-profiler.h"
14 #include "src/debug.h" 14 #include "src/debug.h"
15 #include "src/isolate-inl.h" 15 #include "src/isolate-inl.h"
16 #include "src/runtime.h" 16 #include "src/runtime.h"
17 17
18 namespace v8 { 18 namespace v8 {
19 namespace internal { 19 namespace internal {
(...skipping 14 matching lines...) Expand all
34 Representation r) { 34 Representation r) {
35 ASSERT(!r.IsDouble()); 35 ASSERT(!r.IsDouble());
36 if (r.IsInteger8()) { 36 if (r.IsInteger8()) {
37 lb(dst, src); 37 lb(dst, src);
38 } else if (r.IsUInteger8()) { 38 } else if (r.IsUInteger8()) {
39 lbu(dst, src); 39 lbu(dst, src);
40 } else if (r.IsInteger16()) { 40 } else if (r.IsInteger16()) {
41 lh(dst, src); 41 lh(dst, src);
42 } else if (r.IsUInteger16()) { 42 } else if (r.IsUInteger16()) {
43 lhu(dst, src); 43 lhu(dst, src);
44 } else if (r.IsInteger32()) {
45 lw(dst, src);
44 } else { 46 } else {
45 lw(dst, src); 47 ld(dst, src);
46 } 48 }
47 } 49 }
48 50
49 51
50 void MacroAssembler::Store(Register src, 52 void MacroAssembler::Store(Register src,
51 const MemOperand& dst, 53 const MemOperand& dst,
52 Representation r) { 54 Representation r) {
53 ASSERT(!r.IsDouble()); 55 ASSERT(!r.IsDouble());
54 if (r.IsInteger8() || r.IsUInteger8()) { 56 if (r.IsInteger8() || r.IsUInteger8()) {
55 sb(src, dst); 57 sb(src, dst);
56 } else if (r.IsInteger16() || r.IsUInteger16()) { 58 } else if (r.IsInteger16() || r.IsUInteger16()) {
57 sh(src, dst); 59 sh(src, dst);
60 } else if (r.IsInteger32()) {
61 sw(src, dst);
58 } else { 62 } else {
59 if (r.IsHeapObject()) { 63 if (r.IsHeapObject()) {
60 AssertNotSmi(src); 64 AssertNotSmi(src);
61 } else if (r.IsSmi()) { 65 } else if (r.IsSmi()) {
62 AssertSmi(src); 66 AssertSmi(src);
63 } 67 }
64 sw(src, dst); 68 sd(src, dst);
65 } 69 }
66 } 70 }
67 71
68 72
69 void MacroAssembler::LoadRoot(Register destination, 73 void MacroAssembler::LoadRoot(Register destination,
70 Heap::RootListIndex index) { 74 Heap::RootListIndex index) {
71 lw(destination, MemOperand(s6, index << kPointerSizeLog2)); 75 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
72 } 76 }
73 77
74 78
75 void MacroAssembler::LoadRoot(Register destination, 79 void MacroAssembler::LoadRoot(Register destination,
76 Heap::RootListIndex index, 80 Heap::RootListIndex index,
77 Condition cond, 81 Condition cond,
78 Register src1, const Operand& src2) { 82 Register src1, const Operand& src2) {
79 Branch(2, NegateCondition(cond), src1, src2); 83 Branch(2, NegateCondition(cond), src1, src2);
80 lw(destination, MemOperand(s6, index << kPointerSizeLog2)); 84 ld(destination, MemOperand(s6, index << kPointerSizeLog2));
81 } 85 }
82 86
83 87
84 void MacroAssembler::StoreRoot(Register source, 88 void MacroAssembler::StoreRoot(Register source,
85 Heap::RootListIndex index) { 89 Heap::RootListIndex index) {
86 sw(source, MemOperand(s6, index << kPointerSizeLog2)); 90 sd(source, MemOperand(s6, index << kPointerSizeLog2));
87 } 91 }
88 92
89 93
90 void MacroAssembler::StoreRoot(Register source, 94 void MacroAssembler::StoreRoot(Register source,
91 Heap::RootListIndex index, 95 Heap::RootListIndex index,
92 Condition cond, 96 Condition cond,
93 Register src1, const Operand& src2) { 97 Register src1, const Operand& src2) {
94 Branch(2, NegateCondition(cond), src1, src2); 98 Branch(2, NegateCondition(cond), src1, src2);
95 sw(source, MemOperand(s6, index << kPointerSizeLog2)); 99 sd(source, MemOperand(s6, index << kPointerSizeLog2));
96 } 100 }
97 101
98 102
99 // Push and pop all registers that can hold pointers. 103 // Push and pop all registers that can hold pointers.
100 void MacroAssembler::PushSafepointRegisters() { 104 void MacroAssembler::PushSafepointRegisters() {
101 // Safepoints expect a block of kNumSafepointRegisters values on the 105 // Safepoints expect a block of kNumSafepointRegisters values on the
102 // stack, so adjust the stack for unsaved registers. 106 // stack, so adjust the stack for unsaved registers.
103 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; 107 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
104 ASSERT(num_unsaved >= 0); 108 ASSERT(num_unsaved >= 0);
105 if (num_unsaved > 0) { 109 if (num_unsaved > 0) {
106 Subu(sp, sp, Operand(num_unsaved * kPointerSize)); 110 Dsubu(sp, sp, Operand(num_unsaved * kPointerSize));
107 } 111 }
108 MultiPush(kSafepointSavedRegisters); 112 MultiPush(kSafepointSavedRegisters);
109 } 113 }
110 114
111 115
112 void MacroAssembler::PopSafepointRegisters() { 116 void MacroAssembler::PopSafepointRegisters() {
113 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters; 117 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
114 MultiPop(kSafepointSavedRegisters); 118 MultiPop(kSafepointSavedRegisters);
115 if (num_unsaved > 0) { 119 if (num_unsaved > 0) {
116 Addu(sp, sp, Operand(num_unsaved * kPointerSize)); 120 Daddu(sp, sp, Operand(num_unsaved * kPointerSize));
117 } 121 }
118 } 122 }
119 123
120 124
121 void MacroAssembler::PushSafepointRegistersAndDoubles() { 125 void MacroAssembler::PushSafepointRegistersAndDoubles() {
122 PushSafepointRegisters(); 126 PushSafepointRegisters();
123 Subu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize)); 127 Dsubu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
124 for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) { 128 for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i++) {
125 FPURegister reg = FPURegister::FromAllocationIndex(i); 129 FPURegister reg = FPURegister::FromAllocationIndex(i);
126 sdc1(reg, MemOperand(sp, i * kDoubleSize)); 130 sdc1(reg, MemOperand(sp, i * kDoubleSize));
127 } 131 }
128 } 132 }
129 133
130 134
131 void MacroAssembler::PopSafepointRegistersAndDoubles() { 135 void MacroAssembler::PopSafepointRegistersAndDoubles() {
132 for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i+=2) { 136 for (int i = 0; i < FPURegister::NumAllocatableRegisters(); i++) {
133 FPURegister reg = FPURegister::FromAllocationIndex(i); 137 FPURegister reg = FPURegister::FromAllocationIndex(i);
134 ldc1(reg, MemOperand(sp, i * kDoubleSize)); 138 ldc1(reg, MemOperand(sp, i * kDoubleSize));
135 } 139 }
136 Addu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize)); 140 Daddu(sp, sp, Operand(FPURegister::NumAllocatableRegisters() * kDoubleSize));
137 PopSafepointRegisters(); 141 PopSafepointRegisters();
138 } 142 }
139 143
140 144
141 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src, 145 void MacroAssembler::StoreToSafepointRegistersAndDoublesSlot(Register src,
142 Register dst) { 146 Register dst) {
143 sw(src, SafepointRegistersAndDoublesSlot(dst)); 147 sd(src, SafepointRegistersAndDoublesSlot(dst));
144 } 148 }
145 149
146 150
147 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) { 151 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
148 sw(src, SafepointRegisterSlot(dst)); 152 sd(src, SafepointRegisterSlot(dst));
149 } 153 }
150 154
151 155
152 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) { 156 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
153 lw(dst, SafepointRegisterSlot(src)); 157 ld(dst, SafepointRegisterSlot(src));
154 } 158 }
155 159
156 160
157 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) { 161 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
158 // The registers are pushed starting with the highest encoding, 162 // The registers are pushed starting with the highest encoding,
159 // which means that lowest encodings are closest to the stack pointer. 163 // which means that lowest encodings are closest to the stack pointer.
160 return kSafepointRegisterStackIndexMap[reg_code]; 164 return kSafepointRegisterStackIndexMap[reg_code];
161 } 165 }
162 166
163 167
(...skipping 39 matching lines...) Expand 10 before | Expand all | Expand 10 after
203 207
204 // Skip barrier if writing a smi. 208 // Skip barrier if writing a smi.
205 if (smi_check == INLINE_SMI_CHECK) { 209 if (smi_check == INLINE_SMI_CHECK) {
206 JumpIfSmi(value, &done); 210 JumpIfSmi(value, &done);
207 } 211 }
208 212
209 // Although the object register is tagged, the offset is relative to the start 213 // Although the object register is tagged, the offset is relative to the start
210 // of the object, so so offset must be a multiple of kPointerSize. 214 // of the object, so so offset must be a multiple of kPointerSize.
211 ASSERT(IsAligned(offset, kPointerSize)); 215 ASSERT(IsAligned(offset, kPointerSize));
212 216
213 Addu(dst, object, Operand(offset - kHeapObjectTag)); 217 Daddu(dst, object, Operand(offset - kHeapObjectTag));
214 if (emit_debug_code()) { 218 if (emit_debug_code()) {
215 Label ok; 219 Label ok;
216 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1)); 220 And(t8, dst, Operand((1 << kPointerSizeLog2) - 1));
217 Branch(&ok, eq, t8, Operand(zero_reg)); 221 Branch(&ok, eq, t8, Operand(zero_reg));
218 stop("Unaligned cell in write barrier"); 222 stop("Unaligned cell in write barrier");
219 bind(&ok); 223 bind(&ok);
220 } 224 }
221 225
222 RecordWrite(object, 226 RecordWrite(object,
223 dst, 227 dst,
224 value, 228 value,
225 ra_status, 229 ra_status,
226 save_fp, 230 save_fp,
227 remembered_set_action, 231 remembered_set_action,
228 OMIT_SMI_CHECK, 232 OMIT_SMI_CHECK,
229 pointers_to_here_check_for_value); 233 pointers_to_here_check_for_value);
230 234
231 bind(&done); 235 bind(&done);
232 236
233 // Clobber clobbered input registers when running with the debug-code flag 237 // Clobber clobbered input registers when running with the debug-code flag
234 // turned on to provoke errors. 238 // turned on to provoke errors.
235 if (emit_debug_code()) { 239 if (emit_debug_code()) {
236 li(value, Operand(BitCast<int32_t>(kZapValue + 4))); 240 li(value, Operand(BitCast<int64_t>(kZapValue + 4)));
237 li(dst, Operand(BitCast<int32_t>(kZapValue + 8))); 241 li(dst, Operand(BitCast<int64_t>(kZapValue + 8)));
238 } 242 }
239 } 243 }
240 244
241 245
242 // Will clobber 4 registers: object, map, dst, ip. The 246 // Will clobber 4 registers: object, map, dst, ip. The
243 // register 'object' contains a heap object pointer. 247 // register 'object' contains a heap object pointer.
244 void MacroAssembler::RecordWriteForMap(Register object, 248 void MacroAssembler::RecordWriteForMap(Register object,
245 Register map, 249 Register map,
246 Register dst, 250 Register dst,
247 RAStatus ra_status, 251 RAStatus ra_status,
248 SaveFPRegsMode fp_mode) { 252 SaveFPRegsMode fp_mode) {
249 if (emit_debug_code()) { 253 if (emit_debug_code()) {
250 ASSERT(!dst.is(at)); 254 ASSERT(!dst.is(at));
251 lw(dst, FieldMemOperand(map, HeapObject::kMapOffset)); 255 ld(dst, FieldMemOperand(map, HeapObject::kMapOffset));
252 Check(eq, 256 Check(eq,
253 kWrongAddressOrValuePassedToRecordWrite, 257 kWrongAddressOrValuePassedToRecordWrite,
254 dst, 258 dst,
255 Operand(isolate()->factory()->meta_map())); 259 Operand(isolate()->factory()->meta_map()));
256 } 260 }
257 261
258 if (!FLAG_incremental_marking) { 262 if (!FLAG_incremental_marking) {
259 return; 263 return;
260 } 264 }
261 265
262 // Count number of write barriers in generated code. 266 // Count number of write barriers in generated code.
263 isolate()->counters()->write_barriers_static()->Increment(); 267 isolate()->counters()->write_barriers_static()->Increment();
264 // TODO(mstarzinger): Dynamic counter missing. 268 // TODO(mstarzinger): Dynamic counter missing.
265 269
266 if (emit_debug_code()) { 270 if (emit_debug_code()) {
267 lw(at, FieldMemOperand(object, HeapObject::kMapOffset)); 271 ld(at, FieldMemOperand(object, HeapObject::kMapOffset));
268 Check(eq, 272 Check(eq,
269 kWrongAddressOrValuePassedToRecordWrite, 273 kWrongAddressOrValuePassedToRecordWrite,
270 map, 274 map,
271 Operand(at)); 275 Operand(at));
272 } 276 }
273 277
274 Label done; 278 Label done;
275 279
276 // A single check of the map's pages interesting flag suffices, since it is 280 // A single check of the map's pages interesting flag suffices, since it is
277 // only set during incremental collection, and then it's also guaranteed that 281 // only set during incremental collection, and then it's also guaranteed that
278 // the from object's page's interesting flag is also set. This optimization 282 // the from object's page's interesting flag is also set. This optimization
279 // relies on the fact that maps can never be in new space. 283 // relies on the fact that maps can never be in new space.
280 CheckPageFlag(map, 284 CheckPageFlag(map,
281 map, // Used as scratch. 285 map, // Used as scratch.
282 MemoryChunk::kPointersToHereAreInterestingMask, 286 MemoryChunk::kPointersToHereAreInterestingMask,
283 eq, 287 eq,
284 &done); 288 &done);
285 289
286 Addu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag)); 290 Daddu(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
287 if (emit_debug_code()) { 291 if (emit_debug_code()) {
288 Label ok; 292 Label ok;
289 And(at, dst, Operand((1 << kPointerSizeLog2) - 1)); 293 And(at, dst, Operand((1 << kPointerSizeLog2) - 1));
290 Branch(&ok, eq, at, Operand(zero_reg)); 294 Branch(&ok, eq, at, Operand(zero_reg));
291 stop("Unaligned cell in write barrier"); 295 stop("Unaligned cell in write barrier");
292 bind(&ok); 296 bind(&ok);
293 } 297 }
294 298
295 // Record the actual write. 299 // Record the actual write.
296 if (ra_status == kRAHasNotBeenSaved) { 300 if (ra_status == kRAHasNotBeenSaved) {
297 push(ra); 301 push(ra);
298 } 302 }
299 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET, 303 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
300 fp_mode); 304 fp_mode);
301 CallStub(&stub); 305 CallStub(&stub);
302 if (ra_status == kRAHasNotBeenSaved) { 306 if (ra_status == kRAHasNotBeenSaved) {
303 pop(ra); 307 pop(ra);
304 } 308 }
305 309
306 bind(&done); 310 bind(&done);
307 311
308 // Clobber clobbered registers when running with the debug-code flag 312 // Clobber clobbered registers when running with the debug-code flag
309 // turned on to provoke errors. 313 // turned on to provoke errors.
310 if (emit_debug_code()) { 314 if (emit_debug_code()) {
311 li(dst, Operand(BitCast<int32_t>(kZapValue + 12))); 315 li(dst, Operand(BitCast<int64_t>(kZapValue + 12)));
312 li(map, Operand(BitCast<int32_t>(kZapValue + 16))); 316 li(map, Operand(BitCast<int64_t>(kZapValue + 16)));
313 } 317 }
314 } 318 }
315 319
316 320
317 // Will clobber 4 registers: object, address, scratch, ip. The 321 // Will clobber 4 registers: object, address, scratch, ip. The
318 // register 'object' contains a heap object pointer. The heap object 322 // register 'object' contains a heap object pointer. The heap object
319 // tag is shifted away. 323 // tag is shifted away.
320 void MacroAssembler::RecordWrite( 324 void MacroAssembler::RecordWrite(
321 Register object, 325 Register object,
322 Register address, 326 Register address,
323 Register value, 327 Register value,
324 RAStatus ra_status, 328 RAStatus ra_status,
325 SaveFPRegsMode fp_mode, 329 SaveFPRegsMode fp_mode,
326 RememberedSetAction remembered_set_action, 330 RememberedSetAction remembered_set_action,
327 SmiCheck smi_check, 331 SmiCheck smi_check,
328 PointersToHereCheck pointers_to_here_check_for_value) { 332 PointersToHereCheck pointers_to_here_check_for_value) {
329 ASSERT(!AreAliased(object, address, value, t8)); 333 ASSERT(!AreAliased(object, address, value, t8));
330 ASSERT(!AreAliased(object, address, value, t9)); 334 ASSERT(!AreAliased(object, address, value, t9));
331 335
332 if (emit_debug_code()) { 336 if (emit_debug_code()) {
333 lw(at, MemOperand(address)); 337 ld(at, MemOperand(address));
334 Assert( 338 Assert(
335 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value)); 339 eq, kWrongAddressOrValuePassedToRecordWrite, at, Operand(value));
336 } 340 }
337 341
338 if (remembered_set_action == OMIT_REMEMBERED_SET && 342 if (remembered_set_action == OMIT_REMEMBERED_SET &&
339 !FLAG_incremental_marking) { 343 !FLAG_incremental_marking) {
340 return; 344 return;
341 } 345 }
342 346
343 // Count number of write barriers in generated code. 347 // Count number of write barriers in generated code.
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
375 CallStub(&stub); 379 CallStub(&stub);
376 if (ra_status == kRAHasNotBeenSaved) { 380 if (ra_status == kRAHasNotBeenSaved) {
377 pop(ra); 381 pop(ra);
378 } 382 }
379 383
380 bind(&done); 384 bind(&done);
381 385
382 // Clobber clobbered registers when running with the debug-code flag 386 // Clobber clobbered registers when running with the debug-code flag
383 // turned on to provoke errors. 387 // turned on to provoke errors.
384 if (emit_debug_code()) { 388 if (emit_debug_code()) {
385 li(address, Operand(BitCast<int32_t>(kZapValue + 12))); 389 li(address, Operand(BitCast<int64_t>(kZapValue + 12)));
386 li(value, Operand(BitCast<int32_t>(kZapValue + 16))); 390 li(value, Operand(BitCast<int64_t>(kZapValue + 16)));
387 } 391 }
388 } 392 }
389 393
390 394
391 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests. 395 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
392 Register address, 396 Register address,
393 Register scratch, 397 Register scratch,
394 SaveFPRegsMode fp_mode, 398 SaveFPRegsMode fp_mode,
395 RememberedSetFinalAction and_then) { 399 RememberedSetFinalAction and_then) {
396 Label done; 400 Label done;
397 if (emit_debug_code()) { 401 if (emit_debug_code()) {
398 Label ok; 402 Label ok;
399 JumpIfNotInNewSpace(object, scratch, &ok); 403 JumpIfNotInNewSpace(object, scratch, &ok);
400 stop("Remembered set pointer is in new space"); 404 stop("Remembered set pointer is in new space");
401 bind(&ok); 405 bind(&ok);
402 } 406 }
403 // Load store buffer top. 407 // Load store buffer top.
404 ExternalReference store_buffer = 408 ExternalReference store_buffer =
405 ExternalReference::store_buffer_top(isolate()); 409 ExternalReference::store_buffer_top(isolate());
406 li(t8, Operand(store_buffer)); 410 li(t8, Operand(store_buffer));
407 lw(scratch, MemOperand(t8)); 411 ld(scratch, MemOperand(t8));
408 // Store pointer to buffer and increment buffer top. 412 // Store pointer to buffer and increment buffer top.
409 sw(address, MemOperand(scratch)); 413 sd(address, MemOperand(scratch));
410 Addu(scratch, scratch, kPointerSize); 414 Daddu(scratch, scratch, kPointerSize);
411 // Write back new top of buffer. 415 // Write back new top of buffer.
412 sw(scratch, MemOperand(t8)); 416 sd(scratch, MemOperand(t8));
413 // Call stub on end of buffer. 417 // Call stub on end of buffer.
414 // Check for end of buffer. 418 // Check for end of buffer.
415 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit)); 419 And(t8, scratch, Operand(StoreBuffer::kStoreBufferOverflowBit));
420 ASSERT(!scratch.is(t8));
416 if (and_then == kFallThroughAtEnd) { 421 if (and_then == kFallThroughAtEnd) {
417 Branch(&done, eq, t8, Operand(zero_reg)); 422 Branch(&done, eq, t8, Operand(zero_reg));
418 } else { 423 } else {
419 ASSERT(and_then == kReturnAtEnd); 424 ASSERT(and_then == kReturnAtEnd);
420 Ret(eq, t8, Operand(zero_reg)); 425 Ret(eq, t8, Operand(zero_reg));
421 } 426 }
422 push(ra); 427 push(ra);
423 StoreBufferOverflowStub store_buffer_overflow = 428 StoreBufferOverflowStub store_buffer_overflow =
424 StoreBufferOverflowStub(isolate(), fp_mode); 429 StoreBufferOverflowStub(isolate(), fp_mode);
425 CallStub(&store_buffer_overflow); 430 CallStub(&store_buffer_overflow);
(...skipping 12 matching lines...) Expand all
438 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg, 443 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
439 Register scratch, 444 Register scratch,
440 Label* miss) { 445 Label* miss) {
441 Label same_contexts; 446 Label same_contexts;
442 447
443 ASSERT(!holder_reg.is(scratch)); 448 ASSERT(!holder_reg.is(scratch));
444 ASSERT(!holder_reg.is(at)); 449 ASSERT(!holder_reg.is(at));
445 ASSERT(!scratch.is(at)); 450 ASSERT(!scratch.is(at));
446 451
447 // Load current lexical context from the stack frame. 452 // Load current lexical context from the stack frame.
448 lw(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset)); 453 ld(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
449 // In debug mode, make sure the lexical context is set. 454 // In debug mode, make sure the lexical context is set.
450 #ifdef DEBUG 455 #ifdef DEBUG
451 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext, 456 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext,
452 scratch, Operand(zero_reg)); 457 scratch, Operand(zero_reg));
453 #endif 458 #endif
454 459
455 // Load the native context of the current context. 460 // Load the native context of the current context.
456 int offset = 461 int offset =
457 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize; 462 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
458 lw(scratch, FieldMemOperand(scratch, offset)); 463 ld(scratch, FieldMemOperand(scratch, offset));
459 lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); 464 ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
460 465
461 // Check the context is a native context. 466 // Check the context is a native context.
462 if (emit_debug_code()) { 467 if (emit_debug_code()) {
463 push(holder_reg); // Temporarily save holder on the stack. 468 push(holder_reg); // Temporarily save holder on the stack.
464 // Read the first word and compare to the native_context_map. 469 // Read the first word and compare to the native_context_map.
465 lw(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset)); 470 ld(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
466 LoadRoot(at, Heap::kNativeContextMapRootIndex); 471 LoadRoot(at, Heap::kNativeContextMapRootIndex);
467 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext, 472 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
468 holder_reg, Operand(at)); 473 holder_reg, Operand(at));
469 pop(holder_reg); // Restore holder. 474 pop(holder_reg); // Restore holder.
470 } 475 }
471 476
472 // Check if both contexts are the same. 477 // Check if both contexts are the same.
473 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); 478 ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
474 Branch(&same_contexts, eq, scratch, Operand(at)); 479 Branch(&same_contexts, eq, scratch, Operand(at));
475 480
476 // Check the context is a native context. 481 // Check the context is a native context.
477 if (emit_debug_code()) { 482 if (emit_debug_code()) {
478 push(holder_reg); // Temporarily save holder on the stack. 483 push(holder_reg); // Temporarily save holder on the stack.
479 mov(holder_reg, at); // Move at to its holding place. 484 mov(holder_reg, at); // Move at to its holding place.
480 LoadRoot(at, Heap::kNullValueRootIndex); 485 LoadRoot(at, Heap::kNullValueRootIndex);
481 Check(ne, kJSGlobalProxyContextShouldNotBeNull, 486 Check(ne, kJSGlobalProxyContextShouldNotBeNull,
482 holder_reg, Operand(at)); 487 holder_reg, Operand(at));
483 488
484 lw(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset)); 489 ld(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
485 LoadRoot(at, Heap::kNativeContextMapRootIndex); 490 LoadRoot(at, Heap::kNativeContextMapRootIndex);
486 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext, 491 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext,
487 holder_reg, Operand(at)); 492 holder_reg, Operand(at));
488 // Restore at is not needed. at is reloaded below. 493 // Restore at is not needed. at is reloaded below.
489 pop(holder_reg); // Restore holder. 494 pop(holder_reg); // Restore holder.
490 // Restore at to holder's context. 495 // Restore at to holder's context.
491 lw(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset)); 496 ld(at, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
492 } 497 }
493 498
494 // Check that the security token in the calling global object is 499 // Check that the security token in the calling global object is
495 // compatible with the security token in the receiving global 500 // compatible with the security token in the receiving global
496 // object. 501 // object.
497 int token_offset = Context::kHeaderSize + 502 int token_offset = Context::kHeaderSize +
498 Context::SECURITY_TOKEN_INDEX * kPointerSize; 503 Context::SECURITY_TOKEN_INDEX * kPointerSize;
499 504
500 lw(scratch, FieldMemOperand(scratch, token_offset)); 505 ld(scratch, FieldMemOperand(scratch, token_offset));
501 lw(at, FieldMemOperand(at, token_offset)); 506 ld(at, FieldMemOperand(at, token_offset));
502 Branch(miss, ne, scratch, Operand(at)); 507 Branch(miss, ne, scratch, Operand(at));
503 508
504 bind(&same_contexts); 509 bind(&same_contexts);
505 } 510 }
506 511
507 512
508 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) { 513 void MacroAssembler::GetNumberHash(Register reg0, Register scratch) {
509 // First of all we assign the hash seed to scratch. 514 // First of all we assign the hash seed to scratch.
510 LoadRoot(scratch, Heap::kHashSeedRootIndex); 515 LoadRoot(scratch, Heap::kHashSeedRootIndex);
511 SmiUntag(scratch); 516 SmiUntag(scratch);
512 517
513 // Xor original key with a seed. 518 // Xor original key with a seed.
514 xor_(reg0, reg0, scratch); 519 xor_(reg0, reg0, scratch);
515 520
516 // Compute the hash code from the untagged key. This must be kept in sync 521 // Compute the hash code from the untagged key. This must be kept in sync
517 // with ComputeIntegerHash in utils.h. 522 // with ComputeIntegerHash in utils.h.
518 // 523 //
519 // hash = ~hash + (hash << 15); 524 // hash = ~hash + (hash << 15);
525 // The algorithm uses 32-bit integer values.
520 nor(scratch, reg0, zero_reg); 526 nor(scratch, reg0, zero_reg);
521 sll(at, reg0, 15); 527 sll(at, reg0, 15);
522 addu(reg0, scratch, at); 528 addu(reg0, scratch, at);
523 529
524 // hash = hash ^ (hash >> 12); 530 // hash = hash ^ (hash >> 12);
525 srl(at, reg0, 12); 531 srl(at, reg0, 12);
526 xor_(reg0, reg0, at); 532 xor_(reg0, reg0, at);
527 533
528 // hash = hash + (hash << 2); 534 // hash = hash + (hash << 2);
529 sll(at, reg0, 2); 535 sll(at, reg0, 2);
(...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after
572 // 578 //
573 // reg1 - Used to hold the capacity mask of the dictionary. 579 // reg1 - Used to hold the capacity mask of the dictionary.
574 // 580 //
575 // reg2 - Used for the index into the dictionary. 581 // reg2 - Used for the index into the dictionary.
576 // at - Temporary (avoid MacroAssembler instructions also using 'at'). 582 // at - Temporary (avoid MacroAssembler instructions also using 'at').
577 Label done; 583 Label done;
578 584
579 GetNumberHash(reg0, reg1); 585 GetNumberHash(reg0, reg1);
580 586
581 // Compute the capacity mask. 587 // Compute the capacity mask.
582 lw(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset)); 588 ld(reg1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
583 sra(reg1, reg1, kSmiTagSize); 589 SmiUntag(reg1, reg1);
584 Subu(reg1, reg1, Operand(1)); 590 Dsubu(reg1, reg1, Operand(1));
585 591
586 // Generate an unrolled loop that performs a few probes before giving up. 592 // Generate an unrolled loop that performs a few probes before giving up.
587 for (int i = 0; i < kNumberDictionaryProbes; i++) { 593 for (int i = 0; i < kNumberDictionaryProbes; i++) {
588 // Use reg2 for index calculations and keep the hash intact in reg0. 594 // Use reg2 for index calculations and keep the hash intact in reg0.
589 mov(reg2, reg0); 595 mov(reg2, reg0);
590 // Compute the masked index: (hash + i + i * i) & mask. 596 // Compute the masked index: (hash + i + i * i) & mask.
591 if (i > 0) { 597 if (i > 0) {
592 Addu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i))); 598 Daddu(reg2, reg2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
593 } 599 }
594 and_(reg2, reg2, reg1); 600 and_(reg2, reg2, reg1);
595 601
596 // Scale the index by multiplying by the element size. 602 // Scale the index by multiplying by the element size.
597 ASSERT(SeededNumberDictionary::kEntrySize == 3); 603 ASSERT(SeededNumberDictionary::kEntrySize == 3);
598 sll(at, reg2, 1); // 2x. 604 dsll(at, reg2, 1); // 2x.
599 addu(reg2, reg2, at); // reg2 = reg2 * 3. 605 daddu(reg2, reg2, at); // reg2 = reg2 * 3.
600 606
601 // Check if the key is identical to the name. 607 // Check if the key is identical to the name.
602 sll(at, reg2, kPointerSizeLog2); 608 dsll(at, reg2, kPointerSizeLog2);
603 addu(reg2, elements, at); 609 daddu(reg2, elements, at);
604 610
605 lw(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset)); 611 ld(at, FieldMemOperand(reg2, SeededNumberDictionary::kElementsStartOffset));
606 if (i != kNumberDictionaryProbes - 1) { 612 if (i != kNumberDictionaryProbes - 1) {
607 Branch(&done, eq, key, Operand(at)); 613 Branch(&done, eq, key, Operand(at));
608 } else { 614 } else {
609 Branch(miss, ne, key, Operand(at)); 615 Branch(miss, ne, key, Operand(at));
610 } 616 }
611 } 617 }
612 618
613 bind(&done); 619 bind(&done);
614 // Check that the value is a normal property. 620 // Check that the value is a normal property.
615 // reg2: elements + (index * kPointerSize). 621 // reg2: elements + (index * kPointerSize).
616 const int kDetailsOffset = 622 const int kDetailsOffset =
617 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize; 623 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
618 lw(reg1, FieldMemOperand(reg2, kDetailsOffset)); 624 ld(reg1, FieldMemOperand(reg2, kDetailsOffset));
619 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask))); 625 And(at, reg1, Operand(Smi::FromInt(PropertyDetails::TypeField::kMask)));
620 Branch(miss, ne, at, Operand(zero_reg)); 626 Branch(miss, ne, at, Operand(zero_reg));
621 627
622 // Get the value at the masked, scaled index and return. 628 // Get the value at the masked, scaled index and return.
623 const int kValueOffset = 629 const int kValueOffset =
624 SeededNumberDictionary::kElementsStartOffset + kPointerSize; 630 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
625 lw(result, FieldMemOperand(reg2, kValueOffset)); 631 ld(result, FieldMemOperand(reg2, kValueOffset));
626 } 632 }
627 633
628 634
629 // --------------------------------------------------------------------------- 635 // ---------------------------------------------------------------------------
630 // Instruction macros. 636 // Instruction macros.
631 637
632 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) { 638 void MacroAssembler::Addu(Register rd, Register rs, const Operand& rt) {
633 if (rt.is_reg()) { 639 if (rt.is_reg()) {
634 addu(rd, rs, rt.rm()); 640 addu(rd, rs, rt.rm());
635 } else { 641 } else {
636 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) { 642 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
637 addiu(rd, rs, rt.imm32_); 643 addiu(rd, rs, rt.imm64_);
638 } else { 644 } else {
639 // li handles the relocation. 645 // li handles the relocation.
640 ASSERT(!rs.is(at)); 646 ASSERT(!rs.is(at));
641 li(at, rt); 647 li(at, rt);
642 addu(rd, rs, at); 648 addu(rd, rs, at);
643 } 649 }
644 } 650 }
645 } 651 }
646 652
647 653
654 void MacroAssembler::Daddu(Register rd, Register rs, const Operand& rt) {
655 if (rt.is_reg()) {
656 daddu(rd, rs, rt.rm());
657 } else {
658 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
659 daddiu(rd, rs, rt.imm64_);
660 } else {
661 // li handles the relocation.
662 ASSERT(!rs.is(at));
663 li(at, rt);
664 daddu(rd, rs, at);
665 }
666 }
667 }
668
669
648 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) { 670 void MacroAssembler::Subu(Register rd, Register rs, const Operand& rt) {
649 if (rt.is_reg()) { 671 if (rt.is_reg()) {
650 subu(rd, rs, rt.rm()); 672 subu(rd, rs, rt.rm());
651 } else { 673 } else {
652 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) { 674 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
653 addiu(rd, rs, -rt.imm32_); // No subiu instr, use addiu(x, y, -imm). 675 addiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm).
654 } else { 676 } else {
655 // li handles the relocation. 677 // li handles the relocation.
656 ASSERT(!rs.is(at)); 678 ASSERT(!rs.is(at));
657 li(at, rt); 679 li(at, rt);
658 subu(rd, rs, at); 680 subu(rd, rs, at);
659 } 681 }
660 } 682 }
661 } 683 }
662 684
663 685
686 void MacroAssembler::Dsubu(Register rd, Register rs, const Operand& rt) {
687 if (rt.is_reg()) {
688 dsubu(rd, rs, rt.rm());
689 } else {
690 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
691 daddiu(rd, rs, -rt.imm64_); // No subiu instr, use addiu(x, y, -imm).
692 } else {
693 // li handles the relocation.
694 ASSERT(!rs.is(at));
695 li(at, rt);
696 dsubu(rd, rs, at);
697 }
698 }
699 }
700
701
664 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) { 702 void MacroAssembler::Mul(Register rd, Register rs, const Operand& rt) {
665 if (rt.is_reg()) { 703 if (rt.is_reg()) {
666 if (kArchVariant == kLoongson) { 704 if (kArchVariant == kLoongson) {
667 mult(rs, rt.rm()); 705 mult(rs, rt.rm());
668 mflo(rd); 706 mflo(rd);
669 } else { 707 } else {
670 mul(rd, rs, rt.rm()); 708 mul(rd, rs, rt.rm());
671 } 709 }
672 } else { 710 } else {
673 // li handles the relocation. 711 // li handles the relocation.
674 ASSERT(!rs.is(at)); 712 ASSERT(!rs.is(at));
675 li(at, rt); 713 li(at, rt);
676 if (kArchVariant == kLoongson) { 714 if (kArchVariant == kLoongson) {
677 mult(rs, at); 715 mult(rs, at);
678 mflo(rd); 716 mflo(rd);
679 } else { 717 } else {
680 mul(rd, rs, at); 718 mul(rd, rs, at);
681 } 719 }
682 } 720 }
683 } 721 }
684 722
685 723
724 void MacroAssembler::Dmul(Register rd, Register rs, const Operand& rt) {
725 if (rt.is_reg()) {
726 if (kArchVariant == kLoongson) {
727 dmult(rs, rt.rm());
728 mflo(rd);
729 } else {
730 // TODO(yuyin):
731 // dmul(rd, rs, rt.rm());
732 dmult(rs, rt.rm());
733 mflo(rd);
734 }
735 } else {
736 // li handles the relocation.
737 ASSERT(!rs.is(at));
738 li(at, rt);
739 if (kArchVariant == kLoongson) {
740 dmult(rs, at);
741 mflo(rd);
742 } else {
743 // TODO(yuyin):
744 // dmul(rd, rs, at);
745 dmult(rs, at);
746 mflo(rd);
747 }
748 }
749 }
750
751
686 void MacroAssembler::Mult(Register rs, const Operand& rt) { 752 void MacroAssembler::Mult(Register rs, const Operand& rt) {
687 if (rt.is_reg()) { 753 if (rt.is_reg()) {
688 mult(rs, rt.rm()); 754 mult(rs, rt.rm());
689 } else { 755 } else {
690 // li handles the relocation. 756 // li handles the relocation.
691 ASSERT(!rs.is(at)); 757 ASSERT(!rs.is(at));
692 li(at, rt); 758 li(at, rt);
693 mult(rs, at); 759 mult(rs, at);
694 } 760 }
695 } 761 }
696 762
697 763
764 void MacroAssembler::Dmult(Register rs, const Operand& rt) {
765 if (rt.is_reg()) {
766 dmult(rs, rt.rm());
767 } else {
768 // li handles the relocation.
769 ASSERT(!rs.is(at));
770 li(at, rt);
771 dmult(rs, at);
772 }
773 }
774
775
698 void MacroAssembler::Multu(Register rs, const Operand& rt) { 776 void MacroAssembler::Multu(Register rs, const Operand& rt) {
699 if (rt.is_reg()) { 777 if (rt.is_reg()) {
700 multu(rs, rt.rm()); 778 multu(rs, rt.rm());
701 } else { 779 } else {
702 // li handles the relocation. 780 // li handles the relocation.
703 ASSERT(!rs.is(at)); 781 ASSERT(!rs.is(at));
704 li(at, rt); 782 li(at, rt);
705 multu(rs, at); 783 multu(rs, at);
706 } 784 }
707 } 785 }
708 786
709 787
788 void MacroAssembler::Dmultu(Register rs, const Operand& rt) {
789 if (rt.is_reg()) {
790 dmultu(rs, rt.rm());
791 } else {
792 // li handles the relocation.
793 ASSERT(!rs.is(at));
794 li(at, rt);
795 dmultu(rs, at);
796 }
797 }
798
799
710 void MacroAssembler::Div(Register rs, const Operand& rt) { 800 void MacroAssembler::Div(Register rs, const Operand& rt) {
711 if (rt.is_reg()) { 801 if (rt.is_reg()) {
712 div(rs, rt.rm()); 802 div(rs, rt.rm());
713 } else { 803 } else {
714 // li handles the relocation. 804 // li handles the relocation.
715 ASSERT(!rs.is(at)); 805 ASSERT(!rs.is(at));
716 li(at, rt); 806 li(at, rt);
717 div(rs, at); 807 div(rs, at);
718 } 808 }
719 } 809 }
720 810
721 811
812 void MacroAssembler::Ddiv(Register rs, const Operand& rt) {
813 if (rt.is_reg()) {
814 ddiv(rs, rt.rm());
815 } else {
816 // li handles the relocation.
817 ASSERT(!rs.is(at));
818 li(at, rt);
819 ddiv(rs, at);
820 }
821 }
822
823
722 void MacroAssembler::Divu(Register rs, const Operand& rt) { 824 void MacroAssembler::Divu(Register rs, const Operand& rt) {
723 if (rt.is_reg()) { 825 if (rt.is_reg()) {
724 divu(rs, rt.rm()); 826 divu(rs, rt.rm());
725 } else { 827 } else {
726 // li handles the relocation. 828 // li handles the relocation.
727 ASSERT(!rs.is(at)); 829 ASSERT(!rs.is(at));
728 li(at, rt); 830 li(at, rt);
729 divu(rs, at); 831 divu(rs, at);
730 } 832 }
731 } 833 }
732 834
733 835
836 void MacroAssembler::Ddivu(Register rs, const Operand& rt) {
837 if (rt.is_reg()) {
838 ddivu(rs, rt.rm());
839 } else {
840 // li handles the relocation.
841 ASSERT(!rs.is(at));
842 li(at, rt);
843 ddivu(rs, at);
844 }
845 }
846
847
734 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) { 848 void MacroAssembler::And(Register rd, Register rs, const Operand& rt) {
735 if (rt.is_reg()) { 849 if (rt.is_reg()) {
736 and_(rd, rs, rt.rm()); 850 and_(rd, rs, rt.rm());
737 } else { 851 } else {
738 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) { 852 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
739 andi(rd, rs, rt.imm32_); 853 andi(rd, rs, rt.imm64_);
740 } else { 854 } else {
741 // li handles the relocation. 855 // li handles the relocation.
742 ASSERT(!rs.is(at)); 856 ASSERT(!rs.is(at));
743 li(at, rt); 857 li(at, rt);
744 and_(rd, rs, at); 858 and_(rd, rs, at);
745 } 859 }
746 } 860 }
747 } 861 }
748 862
749 863
750 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) { 864 void MacroAssembler::Or(Register rd, Register rs, const Operand& rt) {
751 if (rt.is_reg()) { 865 if (rt.is_reg()) {
752 or_(rd, rs, rt.rm()); 866 or_(rd, rs, rt.rm());
753 } else { 867 } else {
754 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) { 868 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
755 ori(rd, rs, rt.imm32_); 869 ori(rd, rs, rt.imm64_);
756 } else { 870 } else {
757 // li handles the relocation. 871 // li handles the relocation.
758 ASSERT(!rs.is(at)); 872 ASSERT(!rs.is(at));
759 li(at, rt); 873 li(at, rt);
760 or_(rd, rs, at); 874 or_(rd, rs, at);
761 } 875 }
762 } 876 }
763 } 877 }
764 878
765 879
766 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) { 880 void MacroAssembler::Xor(Register rd, Register rs, const Operand& rt) {
767 if (rt.is_reg()) { 881 if (rt.is_reg()) {
768 xor_(rd, rs, rt.rm()); 882 xor_(rd, rs, rt.rm());
769 } else { 883 } else {
770 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) { 884 if (is_uint16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
771 xori(rd, rs, rt.imm32_); 885 xori(rd, rs, rt.imm64_);
772 } else { 886 } else {
773 // li handles the relocation. 887 // li handles the relocation.
774 ASSERT(!rs.is(at)); 888 ASSERT(!rs.is(at));
775 li(at, rt); 889 li(at, rt);
776 xor_(rd, rs, at); 890 xor_(rd, rs, at);
777 } 891 }
778 } 892 }
779 } 893 }
780 894
781 895
(...skipping 15 matching lines...) Expand all
797 ASSERT(!at.is(rt.rm())); 911 ASSERT(!at.is(rt.rm()));
798 li(at, -1); 912 li(at, -1);
799 xor_(rs, rt.rm(), at); 913 xor_(rs, rt.rm(), at);
800 } 914 }
801 915
802 916
803 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) { 917 void MacroAssembler::Slt(Register rd, Register rs, const Operand& rt) {
804 if (rt.is_reg()) { 918 if (rt.is_reg()) {
805 slt(rd, rs, rt.rm()); 919 slt(rd, rs, rt.rm());
806 } else { 920 } else {
807 if (is_int16(rt.imm32_) && !MustUseReg(rt.rmode_)) { 921 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
808 slti(rd, rs, rt.imm32_); 922 slti(rd, rs, rt.imm64_);
809 } else { 923 } else {
810 // li handles the relocation. 924 // li handles the relocation.
811 ASSERT(!rs.is(at)); 925 ASSERT(!rs.is(at));
812 li(at, rt); 926 li(at, rt);
813 slt(rd, rs, at); 927 slt(rd, rs, at);
814 } 928 }
815 } 929 }
816 } 930 }
817 931
818 932
819 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) { 933 void MacroAssembler::Sltu(Register rd, Register rs, const Operand& rt) {
820 if (rt.is_reg()) { 934 if (rt.is_reg()) {
821 sltu(rd, rs, rt.rm()); 935 sltu(rd, rs, rt.rm());
822 } else { 936 } else {
823 if (is_uint16(rt.imm32_) && !MustUseReg(rt.rmode_)) { 937 if (is_int16(rt.imm64_) && !MustUseReg(rt.rmode_)) {
824 sltiu(rd, rs, rt.imm32_); 938 sltiu(rd, rs, rt.imm64_);
825 } else { 939 } else {
826 // li handles the relocation. 940 // li handles the relocation.
827 ASSERT(!rs.is(at)); 941 ASSERT(!rs.is(at));
828 li(at, rt); 942 li(at, rt);
829 sltu(rd, rs, at); 943 sltu(rd, rs, at);
830 } 944 }
831 } 945 }
832 } 946 }
833 947
834 948
835 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) { 949 void MacroAssembler::Ror(Register rd, Register rs, const Operand& rt) {
836 if (kArchVariant == kMips32r2) { 950 if (kArchVariant == kMips64r2) {
837 if (rt.is_reg()) { 951 if (rt.is_reg()) {
838 rotrv(rd, rs, rt.rm()); 952 rotrv(rd, rs, rt.rm());
839 } else { 953 } else {
840 rotr(rd, rs, rt.imm32_); 954 rotr(rd, rs, rt.imm64_);
841 } 955 }
842 } else { 956 } else {
843 if (rt.is_reg()) { 957 if (rt.is_reg()) {
844 subu(at, zero_reg, rt.rm()); 958 subu(at, zero_reg, rt.rm());
845 sllv(at, rs, at); 959 sllv(at, rs, at);
846 srlv(rd, rs, rt.rm()); 960 srlv(rd, rs, rt.rm());
847 or_(rd, rd, at); 961 or_(rd, rd, at);
848 } else { 962 } else {
849 if (rt.imm32_ == 0) { 963 if (rt.imm64_ == 0) {
850 srl(rd, rs, 0); 964 srl(rd, rs, 0);
851 } else { 965 } else {
852 srl(at, rs, rt.imm32_); 966 srl(at, rs, rt.imm64_);
853 sll(rd, rs, (0x20 - rt.imm32_) & 0x1f); 967 sll(rd, rs, (0x20 - rt.imm64_) & 0x1f);
854 or_(rd, rd, at); 968 or_(rd, rd, at);
855 } 969 }
856 } 970 }
857 } 971 }
858 } 972 }
859 973
860 974
975 void MacroAssembler::Dror(Register rd, Register rs, const Operand& rt) {
976 if (rt.is_reg()) {
977 drotrv(rd, rs, rt.rm());
978 } else {
979 drotr(rd, rs, rt.imm64_);
980 }
981 }
982
983
861 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) { 984 void MacroAssembler::Pref(int32_t hint, const MemOperand& rs) {
862 if (kArchVariant == kLoongson) { 985 if (kArchVariant == kLoongson) {
863 lw(zero_reg, rs); 986 lw(zero_reg, rs);
864 } else { 987 } else {
865 pref(hint, rs); 988 pref(hint, rs);
866 } 989 }
867 } 990 }
868 991
869 992
870 // ------------Pseudo-instructions------------- 993 // ------------Pseudo-instructions-------------
871 994
872 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) { 995 void MacroAssembler::Ulw(Register rd, const MemOperand& rs) {
873 lwr(rd, rs); 996 lwr(rd, rs);
874 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3)); 997 lwl(rd, MemOperand(rs.rm(), rs.offset() + 3));
875 } 998 }
876 999
877 1000
878 void MacroAssembler::Usw(Register rd, const MemOperand& rs) { 1001 void MacroAssembler::Usw(Register rd, const MemOperand& rs) {
879 swr(rd, rs); 1002 swr(rd, rs);
880 swl(rd, MemOperand(rs.rm(), rs.offset() + 3)); 1003 swl(rd, MemOperand(rs.rm(), rs.offset() + 3));
881 } 1004 }
882 1005
883 1006
1007 // Do 64-bit load from unaligned address. Note this only handles
1008 // the specific case of 32-bit aligned, but not 64-bit aligned.
1009 void MacroAssembler::Uld(Register rd, const MemOperand& rs, Register scratch) {
1010 // Assert fail if the offset from start of object IS actually aligned.
1011 // ONLY use with known misalignment, since there is performance cost.
1012 ASSERT((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1013 // TODO(plind): endian dependency.
1014 lwu(rd, rs);
1015 lw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1016 dsll32(scratch, scratch, 0);
1017 Daddu(rd, rd, scratch);
1018 }
1019
1020
1021 // Do 64-bit store to unaligned address. Note this only handles
1022 // the specific case of 32-bit aligned, but not 64-bit aligned.
1023 void MacroAssembler::Usd(Register rd, const MemOperand& rs, Register scratch) {
1024 // Assert fail if the offset from start of object IS actually aligned.
1025 // ONLY use with known misalignment, since there is performance cost.
1026 ASSERT((rs.offset() + kHeapObjectTag) & (kPointerSize - 1));
1027 // TODO(plind): endian dependency.
1028 sw(rd, rs);
1029 dsrl32(scratch, rd, 0);
1030 sw(scratch, MemOperand(rs.rm(), rs.offset() + kPointerSize / 2));
1031 }
1032
1033
884 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) { 1034 void MacroAssembler::li(Register dst, Handle<Object> value, LiFlags mode) {
885 AllowDeferredHandleDereference smi_check; 1035 AllowDeferredHandleDereference smi_check;
886 if (value->IsSmi()) { 1036 if (value->IsSmi()) {
887 li(dst, Operand(value), mode); 1037 li(dst, Operand(value), mode);
888 } else { 1038 } else {
889 ASSERT(value->IsHeapObject()); 1039 ASSERT(value->IsHeapObject());
890 if (isolate()->heap()->InNewSpace(*value)) { 1040 if (isolate()->heap()->InNewSpace(*value)) {
891 Handle<Cell> cell = isolate()->factory()->NewCell(value); 1041 Handle<Cell> cell = isolate()->factory()->NewCell(value);
892 li(dst, Operand(cell)); 1042 li(dst, Operand(cell));
893 lw(dst, FieldMemOperand(dst, Cell::kValueOffset)); 1043 ld(dst, FieldMemOperand(dst, Cell::kValueOffset));
894 } else { 1044 } else {
895 li(dst, Operand(value)); 1045 li(dst, Operand(value));
896 } 1046 }
897 } 1047 }
898 } 1048 }
899 1049
900 1050
901 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) { 1051 void MacroAssembler::li(Register rd, Operand j, LiFlags mode) {
902 ASSERT(!j.is_reg()); 1052 ASSERT(!j.is_reg());
903 BlockTrampolinePoolScope block_trampoline_pool(this); 1053 BlockTrampolinePoolScope block_trampoline_pool(this);
904 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) { 1054 if (!MustUseReg(j.rmode_) && mode == OPTIMIZE_SIZE) {
905 // Normal load of an immediate value which does not need Relocation Info. 1055 // Normal load of an immediate value which does not need Relocation Info.
906 if (is_int16(j.imm32_)) { 1056 if (is_int32(j.imm64_)) {
907 addiu(rd, zero_reg, j.imm32_); 1057 if (is_int16(j.imm64_)) {
908 } else if (!(j.imm32_ & kHiMask)) { 1058 daddiu(rd, zero_reg, (j.imm64_ & kImm16Mask));
909 ori(rd, zero_reg, j.imm32_); 1059 } else if (!(j.imm64_ & kHiMask)) {
910 } else if (!(j.imm32_ & kImm16Mask)) { 1060 ori(rd, zero_reg, (j.imm64_ & kImm16Mask));
911 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask); 1061 } else if (!(j.imm64_ & kImm16Mask)) {
1062 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1063 } else {
1064 lui(rd, (j.imm64_ >> kLuiShift) & kImm16Mask);
1065 ori(rd, rd, (j.imm64_ & kImm16Mask));
1066 }
912 } else { 1067 } else {
913 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask); 1068 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
914 ori(rd, rd, (j.imm32_ & kImm16Mask)); 1069 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
1070 dsll(rd, rd, 16);
1071 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1072 dsll(rd, rd, 16);
1073 ori(rd, rd, j.imm64_ & kImm16Mask);
915 } 1074 }
1075 } else if (MustUseReg(j.rmode_)) {
1076 RecordRelocInfo(j.rmode_, j.imm64_);
1077 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1078 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1079 dsll(rd, rd, 16);
1080 ori(rd, rd, j.imm64_ & kImm16Mask);
1081 } else if (mode == ADDRESS_LOAD) {
1082 // We always need the same number of instructions as we may need to patch
1083 // this code to load another value which may need all 4 instructions.
1084 lui(rd, (j.imm64_ >> 32) & kImm16Mask);
1085 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
1086 dsll(rd, rd, 16);
1087 ori(rd, rd, j.imm64_ & kImm16Mask);
916 } else { 1088 } else {
917 if (MustUseReg(j.rmode_)) { 1089 lui(rd, (j.imm64_ >> 48) & kImm16Mask);
918 RecordRelocInfo(j.rmode_, j.imm32_); 1090 ori(rd, rd, (j.imm64_ >> 32) & kImm16Mask);
919 } 1091 dsll(rd, rd, 16);
920 // We always need the same number of instructions as we may need to patch 1092 ori(rd, rd, (j.imm64_ >> 16) & kImm16Mask);
921 // this code to load another value which may need 2 instructions to load. 1093 dsll(rd, rd, 16);
922 lui(rd, (j.imm32_ >> kLuiShift) & kImm16Mask); 1094 ori(rd, rd, j.imm64_ & kImm16Mask);
923 ori(rd, rd, (j.imm32_ & kImm16Mask));
924 } 1095 }
925 } 1096 }
926 1097
927 1098
928 void MacroAssembler::MultiPush(RegList regs) { 1099 void MacroAssembler::MultiPush(RegList regs) {
929 int16_t num_to_push = NumberOfBitsSet(regs); 1100 int16_t num_to_push = NumberOfBitsSet(regs);
930 int16_t stack_offset = num_to_push * kPointerSize; 1101 int16_t stack_offset = num_to_push * kPointerSize;
931 1102
932 Subu(sp, sp, Operand(stack_offset)); 1103 Dsubu(sp, sp, Operand(stack_offset));
933 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { 1104 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
934 if ((regs & (1 << i)) != 0) { 1105 if ((regs & (1 << i)) != 0) {
935 stack_offset -= kPointerSize; 1106 stack_offset -= kPointerSize;
936 sw(ToRegister(i), MemOperand(sp, stack_offset)); 1107 sd(ToRegister(i), MemOperand(sp, stack_offset));
937 } 1108 }
938 } 1109 }
939 } 1110 }
940 1111
941 1112
942 void MacroAssembler::MultiPushReversed(RegList regs) { 1113 void MacroAssembler::MultiPushReversed(RegList regs) {
943 int16_t num_to_push = NumberOfBitsSet(regs); 1114 int16_t num_to_push = NumberOfBitsSet(regs);
944 int16_t stack_offset = num_to_push * kPointerSize; 1115 int16_t stack_offset = num_to_push * kPointerSize;
945 1116
946 Subu(sp, sp, Operand(stack_offset)); 1117 Dsubu(sp, sp, Operand(stack_offset));
947 for (int16_t i = 0; i < kNumRegisters; i++) { 1118 for (int16_t i = 0; i < kNumRegisters; i++) {
948 if ((regs & (1 << i)) != 0) { 1119 if ((regs & (1 << i)) != 0) {
949 stack_offset -= kPointerSize; 1120 stack_offset -= kPointerSize;
950 sw(ToRegister(i), MemOperand(sp, stack_offset)); 1121 sd(ToRegister(i), MemOperand(sp, stack_offset));
951 } 1122 }
952 } 1123 }
953 } 1124 }
954 1125
955 1126
956 void MacroAssembler::MultiPop(RegList regs) { 1127 void MacroAssembler::MultiPop(RegList regs) {
957 int16_t stack_offset = 0; 1128 int16_t stack_offset = 0;
958 1129
959 for (int16_t i = 0; i < kNumRegisters; i++) { 1130 for (int16_t i = 0; i < kNumRegisters; i++) {
960 if ((regs & (1 << i)) != 0) { 1131 if ((regs & (1 << i)) != 0) {
961 lw(ToRegister(i), MemOperand(sp, stack_offset)); 1132 ld(ToRegister(i), MemOperand(sp, stack_offset));
962 stack_offset += kPointerSize; 1133 stack_offset += kPointerSize;
963 } 1134 }
964 } 1135 }
965 addiu(sp, sp, stack_offset); 1136 daddiu(sp, sp, stack_offset);
966 } 1137 }
967 1138
968 1139
969 void MacroAssembler::MultiPopReversed(RegList regs) { 1140 void MacroAssembler::MultiPopReversed(RegList regs) {
970 int16_t stack_offset = 0; 1141 int16_t stack_offset = 0;
971 1142
972 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { 1143 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
973 if ((regs & (1 << i)) != 0) { 1144 if ((regs & (1 << i)) != 0) {
974 lw(ToRegister(i), MemOperand(sp, stack_offset)); 1145 ld(ToRegister(i), MemOperand(sp, stack_offset));
975 stack_offset += kPointerSize; 1146 stack_offset += kPointerSize;
976 } 1147 }
977 } 1148 }
978 addiu(sp, sp, stack_offset); 1149 daddiu(sp, sp, stack_offset);
979 } 1150 }
980 1151
981 1152
982 void MacroAssembler::MultiPushFPU(RegList regs) { 1153 void MacroAssembler::MultiPushFPU(RegList regs) {
983 int16_t num_to_push = NumberOfBitsSet(regs); 1154 int16_t num_to_push = NumberOfBitsSet(regs);
984 int16_t stack_offset = num_to_push * kDoubleSize; 1155 int16_t stack_offset = num_to_push * kDoubleSize;
985 1156
986 Subu(sp, sp, Operand(stack_offset)); 1157 Dsubu(sp, sp, Operand(stack_offset));
987 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { 1158 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
988 if ((regs & (1 << i)) != 0) { 1159 if ((regs & (1 << i)) != 0) {
989 stack_offset -= kDoubleSize; 1160 stack_offset -= kDoubleSize;
990 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); 1161 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
991 } 1162 }
992 } 1163 }
993 } 1164 }
994 1165
995 1166
996 void MacroAssembler::MultiPushReversedFPU(RegList regs) { 1167 void MacroAssembler::MultiPushReversedFPU(RegList regs) {
997 int16_t num_to_push = NumberOfBitsSet(regs); 1168 int16_t num_to_push = NumberOfBitsSet(regs);
998 int16_t stack_offset = num_to_push * kDoubleSize; 1169 int16_t stack_offset = num_to_push * kDoubleSize;
999 1170
1000 Subu(sp, sp, Operand(stack_offset)); 1171 Dsubu(sp, sp, Operand(stack_offset));
1001 for (int16_t i = 0; i < kNumRegisters; i++) { 1172 for (int16_t i = 0; i < kNumRegisters; i++) {
1002 if ((regs & (1 << i)) != 0) { 1173 if ((regs & (1 << i)) != 0) {
1003 stack_offset -= kDoubleSize; 1174 stack_offset -= kDoubleSize;
1004 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); 1175 sdc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1005 } 1176 }
1006 } 1177 }
1007 } 1178 }
1008 1179
1009 1180
1010 void MacroAssembler::MultiPopFPU(RegList regs) { 1181 void MacroAssembler::MultiPopFPU(RegList regs) {
1011 int16_t stack_offset = 0; 1182 int16_t stack_offset = 0;
1012 1183
1013 for (int16_t i = 0; i < kNumRegisters; i++) { 1184 for (int16_t i = 0; i < kNumRegisters; i++) {
1014 if ((regs & (1 << i)) != 0) { 1185 if ((regs & (1 << i)) != 0) {
1015 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); 1186 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1016 stack_offset += kDoubleSize; 1187 stack_offset += kDoubleSize;
1017 } 1188 }
1018 } 1189 }
1019 addiu(sp, sp, stack_offset); 1190 daddiu(sp, sp, stack_offset);
1020 } 1191 }
1021 1192
1022 1193
1023 void MacroAssembler::MultiPopReversedFPU(RegList regs) { 1194 void MacroAssembler::MultiPopReversedFPU(RegList regs) {
1024 int16_t stack_offset = 0; 1195 int16_t stack_offset = 0;
1025 1196
1026 for (int16_t i = kNumRegisters - 1; i >= 0; i--) { 1197 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
1027 if ((regs & (1 << i)) != 0) { 1198 if ((regs & (1 << i)) != 0) {
1028 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset)); 1199 ldc1(FPURegister::from_code(i), MemOperand(sp, stack_offset));
1029 stack_offset += kDoubleSize; 1200 stack_offset += kDoubleSize;
1030 } 1201 }
1031 } 1202 }
1032 addiu(sp, sp, stack_offset); 1203 daddiu(sp, sp, stack_offset);
1033 } 1204 }
1034 1205
1035 1206
1036 void MacroAssembler::FlushICache(Register address, unsigned instructions) { 1207 void MacroAssembler::FlushICache(Register address, unsigned instructions) {
1037 RegList saved_regs = kJSCallerSaved | ra.bit(); 1208 RegList saved_regs = kJSCallerSaved | ra.bit();
1038 MultiPush(saved_regs); 1209 MultiPush(saved_regs);
1039 AllowExternalCallThatCantCauseGC scope(this); 1210 AllowExternalCallThatCantCauseGC scope(this);
1040 1211
1041 // Save to a0 in case address == t0. 1212 // Save to a0 in case address == a4.
1042 Move(a0, address); 1213 Move(a0, address);
1043 PrepareCallCFunction(2, t0); 1214 PrepareCallCFunction(2, a4);
1044 1215
1045 li(a1, instructions * kInstrSize); 1216 li(a1, instructions * kInstrSize);
1046 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2); 1217 CallCFunction(ExternalReference::flush_icache_function(isolate()), 2);
1047 MultiPop(saved_regs); 1218 MultiPop(saved_regs);
1048 } 1219 }
1049 1220
1050 1221
1051 void MacroAssembler::Ext(Register rt, 1222 void MacroAssembler::Ext(Register rt,
1052 Register rs, 1223 Register rs,
1053 uint16_t pos, 1224 uint16_t pos,
1054 uint16_t size) { 1225 uint16_t size) {
1055 ASSERT(pos < 32); 1226 ASSERT(pos < 32);
1056 ASSERT(pos + size < 33); 1227 ASSERT(pos + size < 33);
1057 1228 ext_(rt, rs, pos, size);
1058 if (kArchVariant == kMips32r2) {
1059 ext_(rt, rs, pos, size);
1060 } else {
1061 // Move rs to rt and shift it left then right to get the
1062 // desired bitfield on the right side and zeroes on the left.
1063 int shift_left = 32 - (pos + size);
1064 sll(rt, rs, shift_left); // Acts as a move if shift_left == 0.
1065
1066 int shift_right = 32 - size;
1067 if (shift_right > 0) {
1068 srl(rt, rt, shift_right);
1069 }
1070 }
1071 } 1229 }
1072 1230
1073 1231
1074 void MacroAssembler::Ins(Register rt, 1232 void MacroAssembler::Ins(Register rt,
1075 Register rs, 1233 Register rs,
1076 uint16_t pos, 1234 uint16_t pos,
1077 uint16_t size) { 1235 uint16_t size) {
1078 ASSERT(pos < 32); 1236 ASSERT(pos < 32);
1079 ASSERT(pos + size <= 32); 1237 ASSERT(pos + size <= 32);
1080 ASSERT(size != 0); 1238 ASSERT(size != 0);
1081 1239 ins_(rt, rs, pos, size);
1082 if (kArchVariant == kMips32r2) {
1083 ins_(rt, rs, pos, size);
1084 } else {
1085 ASSERT(!rt.is(t8) && !rs.is(t8));
1086 Subu(at, zero_reg, Operand(1));
1087 srl(at, at, 32 - size);
1088 and_(t8, rs, at);
1089 sll(t8, t8, pos);
1090 sll(at, at, pos);
1091 nor(at, at, zero_reg);
1092 and_(at, rt, at);
1093 or_(rt, t8, at);
1094 }
1095 } 1240 }
1096 1241
1097 1242
1098 void MacroAssembler::Cvt_d_uw(FPURegister fd, 1243 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1099 FPURegister fs, 1244 FPURegister fs,
1100 FPURegister scratch) { 1245 FPURegister scratch) {
1101 // Move the data from fs to t8. 1246 // Move the data from fs to t8.
1102 mfc1(t8, fs); 1247 mfc1(t8, fs);
1103 Cvt_d_uw(fd, t8, scratch); 1248 Cvt_d_uw(fd, t8, scratch);
1104 } 1249 }
1105 1250
1106 1251
1107 void MacroAssembler::Cvt_d_uw(FPURegister fd, 1252 void MacroAssembler::Cvt_d_uw(FPURegister fd,
1108 Register rs, 1253 Register rs,
1109 FPURegister scratch) { 1254 FPURegister scratch) {
1110 // Convert rs to a FP value in fd (and fd + 1). 1255 // Convert rs to a FP value in fd (and fd + 1).
1111 // We do this by converting rs minus the MSB to avoid sign conversion, 1256 // We do this by converting rs minus the MSB to avoid sign conversion,
1112 // then adding 2^31 to the result (if needed). 1257 // then adding 2^31 to the result (if needed).
1113 1258
1114 ASSERT(!fd.is(scratch)); 1259 ASSERT(!fd.is(scratch));
1115 ASSERT(!rs.is(t9)); 1260 ASSERT(!rs.is(t9));
1116 ASSERT(!rs.is(at)); 1261 ASSERT(!rs.is(at));
1117 1262
1118 // Save rs's MSB to t9. 1263 // Save rs's MSB to t9.
1119 Ext(t9, rs, 31, 1); 1264 Ext(t9, rs, 31, 1);
1120 // Remove rs's MSB. 1265 // Remove rs's MSB.
1121 Ext(at, rs, 0, 31); 1266 Ext(at, rs, 0, 31);
1122 // Move the result to fd. 1267 // Move the result to fd.
1123 mtc1(at, fd); 1268 mtc1(at, fd);
1269 mthc1(zero_reg, fd);
1124 1270
1125 // Convert fd to a real FP value. 1271 // Convert fd to a real FP value.
1126 cvt_d_w(fd, fd); 1272 cvt_d_w(fd, fd);
1127 1273
1128 Label conversion_done; 1274 Label conversion_done;
1129 1275
1130 // If rs's MSB was 0, it's done. 1276 // If rs's MSB was 0, it's done.
1131 // Otherwise we need to add that to the FP register. 1277 // Otherwise we need to add that to the FP register.
1132 Branch(&conversion_done, eq, t9, Operand(zero_reg)); 1278 Branch(&conversion_done, eq, t9, Operand(zero_reg));
1133 1279
1134 // Load 2^31 into f20 as its float representation. 1280 // Load 2^31 into f20 as its float representation.
1135 li(at, 0x41E00000); 1281 li(at, 0x41E00000);
1136 mtc1(at, FPURegister::from_code(scratch.code() + 1));
1137 mtc1(zero_reg, scratch); 1282 mtc1(zero_reg, scratch);
1283 mthc1(at, scratch);
1138 // Add it to fd. 1284 // Add it to fd.
1139 add_d(fd, fd, scratch); 1285 add_d(fd, fd, scratch);
1140 1286
1141 bind(&conversion_done); 1287 bind(&conversion_done);
1142 } 1288 }
1143 1289
1144 1290
1291 void MacroAssembler::Round_l_d(FPURegister fd, FPURegister fs) {
1292 round_l_d(fd, fs);
1293 }
1294
1295
1296 void MacroAssembler::Floor_l_d(FPURegister fd, FPURegister fs) {
1297 floor_l_d(fd, fs);
1298 }
1299
1300
1301 void MacroAssembler::Ceil_l_d(FPURegister fd, FPURegister fs) {
1302 ceil_l_d(fd, fs);
1303 }
1304
1305
1306 void MacroAssembler::Trunc_l_d(FPURegister fd, FPURegister fs) {
1307 trunc_l_d(fd, fs);
1308 }
1309
1310
1311 void MacroAssembler::Trunc_l_ud(FPURegister fd,
1312 FPURegister fs,
1313 FPURegister scratch) {
1314 // Load to GPR.
1315 dmfc1(t8, fs);
1316 // Reset sign bit.
1317 li(at, 0x7fffffffffffffff);
1318 and_(t8, t8, at);
1319 dmtc1(t8, fs);
1320 trunc_l_d(fd, fs);
1321 }
1322
1323
1145 void MacroAssembler::Trunc_uw_d(FPURegister fd, 1324 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1146 FPURegister fs, 1325 FPURegister fs,
1147 FPURegister scratch) { 1326 FPURegister scratch) {
1148 Trunc_uw_d(fs, t8, scratch); 1327 Trunc_uw_d(fs, t8, scratch);
1149 mtc1(t8, fd); 1328 mtc1(t8, fd);
1150 } 1329 }
1151 1330
1152 1331
1153 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) { 1332 void MacroAssembler::Trunc_w_d(FPURegister fd, FPURegister fs) {
1154 if (kArchVariant == kLoongson && fd.is(fs)) { 1333 trunc_w_d(fd, fs);
1155 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1156 trunc_w_d(fd, fs);
1157 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1158 } else {
1159 trunc_w_d(fd, fs);
1160 }
1161 } 1334 }
1162 1335
1163 1336
1164 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) { 1337 void MacroAssembler::Round_w_d(FPURegister fd, FPURegister fs) {
1165 if (kArchVariant == kLoongson && fd.is(fs)) { 1338 round_w_d(fd, fs);
1166 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1167 round_w_d(fd, fs);
1168 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1169 } else {
1170 round_w_d(fd, fs);
1171 }
1172 } 1339 }
1173 1340
1174 1341
1175 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) { 1342 void MacroAssembler::Floor_w_d(FPURegister fd, FPURegister fs) {
1176 if (kArchVariant == kLoongson && fd.is(fs)) { 1343 floor_w_d(fd, fs);
1177 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1178 floor_w_d(fd, fs);
1179 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1180 } else {
1181 floor_w_d(fd, fs);
1182 }
1183 } 1344 }
1184 1345
1185 1346
1186 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) { 1347 void MacroAssembler::Ceil_w_d(FPURegister fd, FPURegister fs) {
1187 if (kArchVariant == kLoongson && fd.is(fs)) { 1348 ceil_w_d(fd, fs);
1188 mfc1(t8, FPURegister::from_code(fs.code() + 1));
1189 ceil_w_d(fd, fs);
1190 mtc1(t8, FPURegister::from_code(fs.code() + 1));
1191 } else {
1192 ceil_w_d(fd, fs);
1193 }
1194 } 1349 }
1195 1350
1196 1351
1197 void MacroAssembler::Trunc_uw_d(FPURegister fd, 1352 void MacroAssembler::Trunc_uw_d(FPURegister fd,
1198 Register rs, 1353 Register rs,
1199 FPURegister scratch) { 1354 FPURegister scratch) {
1200 ASSERT(!fd.is(scratch)); 1355 ASSERT(!fd.is(scratch));
1201 ASSERT(!rs.is(at)); 1356 ASSERT(!rs.is(at));
1202 1357
1203 // Load 2^31 into scratch as its float representation. 1358 // Load 2^31 into scratch as its float representation.
1204 li(at, 0x41E00000); 1359 li(at, 0x41E00000);
1205 mtc1(at, FPURegister::from_code(scratch.code() + 1));
1206 mtc1(zero_reg, scratch); 1360 mtc1(zero_reg, scratch);
1361 mthc1(at, scratch);
1207 // Test if scratch > fd. 1362 // Test if scratch > fd.
1208 // If fd < 2^31 we can convert it normally. 1363 // If fd < 2^31 we can convert it normally.
1209 Label simple_convert; 1364 Label simple_convert;
1210 BranchF(&simple_convert, NULL, lt, fd, scratch); 1365 BranchF(&simple_convert, NULL, lt, fd, scratch);
1211 1366
1212 // First we subtract 2^31 from fd, then trunc it to rs 1367 // First we subtract 2^31 from fd, then trunc it to rs
1213 // and add 2^31 to rs. 1368 // and add 2^31 to rs.
1214 sub_d(scratch, fd, scratch); 1369 sub_d(scratch, fd, scratch);
1215 trunc_w_d(scratch, scratch); 1370 trunc_w_d(scratch, scratch);
1216 mfc1(rs, scratch); 1371 mfc1(rs, scratch);
1217 Or(rs, rs, 1 << 31); 1372 Or(rs, rs, 1 << 31);
1218 1373
1219 Label done; 1374 Label done;
1220 Branch(&done); 1375 Branch(&done);
1221 // Simple conversion. 1376 // Simple conversion.
1222 bind(&simple_convert); 1377 bind(&simple_convert);
1223 trunc_w_d(scratch, fd); 1378 trunc_w_d(scratch, fd);
1224 mfc1(rs, scratch); 1379 mfc1(rs, scratch);
1225 1380
1226 bind(&done); 1381 bind(&done);
1227 } 1382 }
1228 1383
1229 1384
1385 void MacroAssembler::Madd_d(FPURegister fd, FPURegister fr, FPURegister fs,
1386 FPURegister ft, FPURegister scratch) {
1387 if (0) { // TODO(plind): find reasonable arch-variant symbol names.
1388 madd_d(fd, fr, fs, ft);
1389 } else {
1390 // Can not change source regs's value.
1391 ASSERT(!fr.is(scratch) && !fs.is(scratch) && !ft.is(scratch));
1392 mul_d(scratch, fs, ft);
1393 add_d(fd, fr, scratch);
1394 }
1395 }
1396
1397
1230 void MacroAssembler::BranchF(Label* target, 1398 void MacroAssembler::BranchF(Label* target,
1231 Label* nan, 1399 Label* nan,
1232 Condition cc, 1400 Condition cc,
1233 FPURegister cmp1, 1401 FPURegister cmp1,
1234 FPURegister cmp2, 1402 FPURegister cmp2,
1235 BranchDelaySlot bd) { 1403 BranchDelaySlot bd) {
1236 BlockTrampolinePoolScope block_trampoline_pool(this); 1404 BlockTrampolinePoolScope block_trampoline_pool(this);
1237 if (cc == al) { 1405 if (cc == al) {
1238 Branch(bd, target); 1406 Branch(bd, target);
1239 return; 1407 return;
(...skipping 60 matching lines...) Expand 10 before | Expand all | Expand 10 after
1300 DoubleRepresentation value_rep(imm); 1468 DoubleRepresentation value_rep(imm);
1301 // Handle special values first. 1469 // Handle special values first.
1302 bool force_load = dst.is(kDoubleRegZero); 1470 bool force_load = dst.is(kDoubleRegZero);
1303 if (value_rep == zero && !force_load) { 1471 if (value_rep == zero && !force_load) {
1304 mov_d(dst, kDoubleRegZero); 1472 mov_d(dst, kDoubleRegZero);
1305 } else if (value_rep == minus_zero && !force_load) { 1473 } else if (value_rep == minus_zero && !force_load) {
1306 neg_d(dst, kDoubleRegZero); 1474 neg_d(dst, kDoubleRegZero);
1307 } else { 1475 } else {
1308 uint32_t lo, hi; 1476 uint32_t lo, hi;
1309 DoubleAsTwoUInt32(imm, &lo, &hi); 1477 DoubleAsTwoUInt32(imm, &lo, &hi);
1310 // Move the low part of the double into the lower of the corresponding FPU 1478 // Move the low part of the double into the lower bits of the corresponding
1311 // register of FPU register pair. 1479 // FPU register.
1312 if (lo != 0) { 1480 if (lo != 0) {
1313 li(at, Operand(lo)); 1481 li(at, Operand(lo));
1314 mtc1(at, dst); 1482 mtc1(at, dst);
1315 } else { 1483 } else {
1316 mtc1(zero_reg, dst); 1484 mtc1(zero_reg, dst);
1317 } 1485 }
1318 // Move the high part of the double into the higher of the corresponding FPU 1486 // Move the high part of the double into the high bits of the corresponding
1319 // register of FPU register pair. 1487 // FPU register.
1320 if (hi != 0) { 1488 if (hi != 0) {
1321 li(at, Operand(hi)); 1489 li(at, Operand(hi));
1322 mtc1(at, dst.high()); 1490 mthc1(at, dst);
1323 } else { 1491 } else {
1324 mtc1(zero_reg, dst.high()); 1492 mthc1(zero_reg, dst);
1325 } 1493 }
1326 } 1494 }
1327 } 1495 }
1328 1496
1329 1497
1330 void MacroAssembler::Movz(Register rd, Register rs, Register rt) { 1498 void MacroAssembler::Movz(Register rd, Register rs, Register rt) {
1331 if (kArchVariant == kLoongson) { 1499 if (kArchVariant == kLoongson) {
1332 Label done; 1500 Label done;
1333 Branch(&done, ne, rt, Operand(zero_reg)); 1501 Branch(&done, ne, rt, Operand(zero_reg));
1334 mov(rd, rs); 1502 mov(rd, rs);
(...skipping 10 matching lines...) Expand all
1345 Branch(&done, eq, rt, Operand(zero_reg)); 1513 Branch(&done, eq, rt, Operand(zero_reg));
1346 mov(rd, rs); 1514 mov(rd, rs);
1347 bind(&done); 1515 bind(&done);
1348 } else { 1516 } else {
1349 movn(rd, rs, rt); 1517 movn(rd, rs, rt);
1350 } 1518 }
1351 } 1519 }
1352 1520
1353 1521
1354 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) { 1522 void MacroAssembler::Movt(Register rd, Register rs, uint16_t cc) {
1355 if (kArchVariant == kLoongson) { 1523 movt(rd, rs, cc);
1356 // Tests an FP condition code and then conditionally move rs to rd.
1357 // We do not currently use any FPU cc bit other than bit 0.
1358 ASSERT(cc == 0);
1359 ASSERT(!(rs.is(t8) || rd.is(t8)));
1360 Label done;
1361 Register scratch = t8;
1362 // For testing purposes we need to fetch content of the FCSR register and
1363 // than test its cc (floating point condition code) bit (for cc = 0, it is
1364 // 24. bit of the FCSR).
1365 cfc1(scratch, FCSR);
1366 // For the MIPS I, II and III architectures, the contents of scratch is
1367 // UNPREDICTABLE for the instruction immediately following CFC1.
1368 nop();
1369 srl(scratch, scratch, 16);
1370 andi(scratch, scratch, 0x0080);
1371 Branch(&done, eq, scratch, Operand(zero_reg));
1372 mov(rd, rs);
1373 bind(&done);
1374 } else {
1375 movt(rd, rs, cc);
1376 }
1377 } 1524 }
1378 1525
1379 1526
1380 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) { 1527 void MacroAssembler::Movf(Register rd, Register rs, uint16_t cc) {
1381 if (kArchVariant == kLoongson) { 1528 movf(rd, rs, cc);
1382 // Tests an FP condition code and then conditionally move rs to rd.
1383 // We do not currently use any FPU cc bit other than bit 0.
1384 ASSERT(cc == 0);
1385 ASSERT(!(rs.is(t8) || rd.is(t8)));
1386 Label done;
1387 Register scratch = t8;
1388 // For testing purposes we need to fetch content of the FCSR register and
1389 // than test its cc (floating point condition code) bit (for cc = 0, it is
1390 // 24. bit of the FCSR).
1391 cfc1(scratch, FCSR);
1392 // For the MIPS I, II and III architectures, the contents of scratch is
1393 // UNPREDICTABLE for the instruction immediately following CFC1.
1394 nop();
1395 srl(scratch, scratch, 16);
1396 andi(scratch, scratch, 0x0080);
1397 Branch(&done, ne, scratch, Operand(zero_reg));
1398 mov(rd, rs);
1399 bind(&done);
1400 } else {
1401 movf(rd, rs, cc);
1402 }
1403 } 1529 }
1404 1530
1405 1531
1406 void MacroAssembler::Clz(Register rd, Register rs) { 1532 void MacroAssembler::Clz(Register rd, Register rs) {
1407 if (kArchVariant == kLoongson) { 1533 clz(rd, rs);
1408 ASSERT(!(rd.is(t8) || rd.is(t9)) && !(rs.is(t8) || rs.is(t9)));
1409 Register mask = t8;
1410 Register scratch = t9;
1411 Label loop, end;
1412 mov(at, rs);
1413 mov(rd, zero_reg);
1414 lui(mask, 0x8000);
1415 bind(&loop);
1416 and_(scratch, at, mask);
1417 Branch(&end, ne, scratch, Operand(zero_reg));
1418 addiu(rd, rd, 1);
1419 Branch(&loop, ne, mask, Operand(zero_reg), USE_DELAY_SLOT);
1420 srl(mask, mask, 1);
1421 bind(&end);
1422 } else {
1423 clz(rd, rs);
1424 }
1425 } 1534 }
1426 1535
1427 1536
1428 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode, 1537 void MacroAssembler::EmitFPUTruncate(FPURoundingMode rounding_mode,
1429 Register result, 1538 Register result,
1430 DoubleRegister double_input, 1539 DoubleRegister double_input,
1431 Register scratch, 1540 Register scratch,
1432 DoubleRegister double_scratch, 1541 DoubleRegister double_scratch,
1433 Register except_flag, 1542 Register except_flag,
1434 CheckForInexactConversion check_inexact) { 1543 CheckForInexactConversion check_inexact) {
(...skipping 80 matching lines...) Expand 10 before | Expand all | Expand 10 after
1515 1624
1516 1625
1517 void MacroAssembler::TruncateDoubleToI(Register result, 1626 void MacroAssembler::TruncateDoubleToI(Register result,
1518 DoubleRegister double_input) { 1627 DoubleRegister double_input) {
1519 Label done; 1628 Label done;
1520 1629
1521 TryInlineTruncateDoubleToI(result, double_input, &done); 1630 TryInlineTruncateDoubleToI(result, double_input, &done);
1522 1631
1523 // If we fell through then inline version didn't succeed - call stub instead. 1632 // If we fell through then inline version didn't succeed - call stub instead.
1524 push(ra); 1633 push(ra);
1525 Subu(sp, sp, Operand(kDoubleSize)); // Put input on stack. 1634 Dsubu(sp, sp, Operand(kDoubleSize)); // Put input on stack.
1526 sdc1(double_input, MemOperand(sp, 0)); 1635 sdc1(double_input, MemOperand(sp, 0));
1527 1636
1528 DoubleToIStub stub(isolate(), sp, result, 0, true, true); 1637 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
1529 CallStub(&stub); 1638 CallStub(&stub);
1530 1639
1531 Addu(sp, sp, Operand(kDoubleSize)); 1640 Daddu(sp, sp, Operand(kDoubleSize));
1532 pop(ra); 1641 pop(ra);
1533 1642
1534 bind(&done); 1643 bind(&done);
1535 } 1644 }
1536 1645
1537 1646
1538 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) { 1647 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
1539 Label done; 1648 Label done;
1540 DoubleRegister double_scratch = f12; 1649 DoubleRegister double_scratch = f12;
1541 ASSERT(!result.is(object)); 1650 ASSERT(!result.is(object));
(...skipping 29 matching lines...) Expand all
1571 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number); 1680 JumpIfNotHeapNumber(object, heap_number_map, scratch, not_number);
1572 TruncateHeapNumberToI(result, object); 1681 TruncateHeapNumberToI(result, object);
1573 1682
1574 bind(&done); 1683 bind(&done);
1575 } 1684 }
1576 1685
1577 1686
1578 void MacroAssembler::GetLeastBitsFromSmi(Register dst, 1687 void MacroAssembler::GetLeastBitsFromSmi(Register dst,
1579 Register src, 1688 Register src,
1580 int num_least_bits) { 1689 int num_least_bits) {
1581 Ext(dst, src, kSmiTagSize, num_least_bits); 1690 // Ext(dst, src, kSmiTagSize, num_least_bits);
1691 SmiUntag(dst, src);
1692 And(dst, dst, Operand((1 << num_least_bits) - 1));
1582 } 1693 }
1583 1694
1584 1695
1585 void MacroAssembler::GetLeastBitsFromInt32(Register dst, 1696 void MacroAssembler::GetLeastBitsFromInt32(Register dst,
1586 Register src, 1697 Register src,
1587 int num_least_bits) { 1698 int num_least_bits) {
1699 ASSERT(!src.is(dst));
1588 And(dst, src, Operand((1 << num_least_bits) - 1)); 1700 And(dst, src, Operand((1 << num_least_bits) - 1));
1589 } 1701 }
1590 1702
1591 1703
1592 // Emulated condtional branches do not emit a nop in the branch delay slot. 1704 // Emulated condtional branches do not emit a nop in the branch delay slot.
1593 // 1705 //
1594 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct. 1706 // BRANCH_ARGS_CHECK checks that conditional jump arguments are correct.
1595 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \ 1707 #define BRANCH_ARGS_CHECK(cond, rs, rt) ASSERT( \
1596 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \ 1708 (cond == cc_always && rs.is(zero_reg) && rt.rm().is(zero_reg)) || \
1597 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg)))) 1709 (cond != cc_always && (!rs.is(zero_reg) || !rt.rm().is(zero_reg))))
(...skipping 193 matching lines...) Expand 10 before | Expand all | Expand 10 after
1791 break; 1903 break;
1792 case ne: 1904 case ne:
1793 // We don't want any other register but scratch clobbered. 1905 // We don't want any other register but scratch clobbered.
1794 ASSERT(!scratch.is(rs)); 1906 ASSERT(!scratch.is(rs));
1795 r2 = scratch; 1907 r2 = scratch;
1796 li(r2, rt); 1908 li(r2, rt);
1797 bne(rs, r2, offset); 1909 bne(rs, r2, offset);
1798 break; 1910 break;
1799 // Signed comparison. 1911 // Signed comparison.
1800 case greater: 1912 case greater:
1801 if (rt.imm32_ == 0) { 1913 if (rt.imm64_ == 0) {
1802 bgtz(rs, offset); 1914 bgtz(rs, offset);
1803 } else { 1915 } else {
1804 r2 = scratch; 1916 r2 = scratch;
1805 li(r2, rt); 1917 li(r2, rt);
1806 slt(scratch, r2, rs); 1918 slt(scratch, r2, rs);
1807 bne(scratch, zero_reg, offset); 1919 bne(scratch, zero_reg, offset);
1808 } 1920 }
1809 break; 1921 break;
1810 case greater_equal: 1922 case greater_equal:
1811 if (rt.imm32_ == 0) { 1923 if (rt.imm64_ == 0) {
1812 bgez(rs, offset); 1924 bgez(rs, offset);
1813 } else if (is_int16(rt.imm32_)) { 1925 } else if (is_int16(rt.imm64_)) {
1814 slti(scratch, rs, rt.imm32_); 1926 slti(scratch, rs, rt.imm64_);
1815 beq(scratch, zero_reg, offset); 1927 beq(scratch, zero_reg, offset);
1816 } else { 1928 } else {
1817 r2 = scratch; 1929 r2 = scratch;
1818 li(r2, rt); 1930 li(r2, rt);
1819 slt(scratch, rs, r2); 1931 slt(scratch, rs, r2);
1820 beq(scratch, zero_reg, offset); 1932 beq(scratch, zero_reg, offset);
1821 } 1933 }
1822 break; 1934 break;
1823 case less: 1935 case less:
1824 if (rt.imm32_ == 0) { 1936 if (rt.imm64_ == 0) {
1825 bltz(rs, offset); 1937 bltz(rs, offset);
1826 } else if (is_int16(rt.imm32_)) { 1938 } else if (is_int16(rt.imm64_)) {
1827 slti(scratch, rs, rt.imm32_); 1939 slti(scratch, rs, rt.imm64_);
1828 bne(scratch, zero_reg, offset); 1940 bne(scratch, zero_reg, offset);
1829 } else { 1941 } else {
1830 r2 = scratch; 1942 r2 = scratch;
1831 li(r2, rt); 1943 li(r2, rt);
1832 slt(scratch, rs, r2); 1944 slt(scratch, rs, r2);
1833 bne(scratch, zero_reg, offset); 1945 bne(scratch, zero_reg, offset);
1834 } 1946 }
1835 break; 1947 break;
1836 case less_equal: 1948 case less_equal:
1837 if (rt.imm32_ == 0) { 1949 if (rt.imm64_ == 0) {
1838 blez(rs, offset); 1950 blez(rs, offset);
1839 } else { 1951 } else {
1840 r2 = scratch; 1952 r2 = scratch;
1841 li(r2, rt); 1953 li(r2, rt);
1842 slt(scratch, r2, rs); 1954 slt(scratch, r2, rs);
1843 beq(scratch, zero_reg, offset); 1955 beq(scratch, zero_reg, offset);
1844 } 1956 }
1845 break; 1957 break;
1846 // Unsigned comparison. 1958 // Unsigned comparison.
1847 case Ugreater: 1959 case Ugreater:
1848 if (rt.imm32_ == 0) { 1960 if (rt.imm64_ == 0) {
1849 bgtz(rs, offset); 1961 bgtz(rs, offset);
1850 } else { 1962 } else {
1851 r2 = scratch; 1963 r2 = scratch;
1852 li(r2, rt); 1964 li(r2, rt);
1853 sltu(scratch, r2, rs); 1965 sltu(scratch, r2, rs);
1854 bne(scratch, zero_reg, offset); 1966 bne(scratch, zero_reg, offset);
1855 } 1967 }
1856 break; 1968 break;
1857 case Ugreater_equal: 1969 case Ugreater_equal:
1858 if (rt.imm32_ == 0) { 1970 if (rt.imm64_ == 0) {
1859 bgez(rs, offset); 1971 bgez(rs, offset);
1860 } else if (is_int16(rt.imm32_)) { 1972 } else if (is_int16(rt.imm64_)) {
1861 sltiu(scratch, rs, rt.imm32_); 1973 sltiu(scratch, rs, rt.imm64_);
1862 beq(scratch, zero_reg, offset); 1974 beq(scratch, zero_reg, offset);
1863 } else { 1975 } else {
1864 r2 = scratch; 1976 r2 = scratch;
1865 li(r2, rt); 1977 li(r2, rt);
1866 sltu(scratch, rs, r2); 1978 sltu(scratch, rs, r2);
1867 beq(scratch, zero_reg, offset); 1979 beq(scratch, zero_reg, offset);
1868 } 1980 }
1869 break; 1981 break;
1870 case Uless: 1982 case Uless:
1871 if (rt.imm32_ == 0) { 1983 if (rt.imm64_ == 0) {
1872 // No code needs to be emitted. 1984 // No code needs to be emitted.
1873 return; 1985 return;
1874 } else if (is_int16(rt.imm32_)) { 1986 } else if (is_int16(rt.imm64_)) {
1875 sltiu(scratch, rs, rt.imm32_); 1987 sltiu(scratch, rs, rt.imm64_);
1876 bne(scratch, zero_reg, offset); 1988 bne(scratch, zero_reg, offset);
1877 } else { 1989 } else {
1878 r2 = scratch; 1990 r2 = scratch;
1879 li(r2, rt); 1991 li(r2, rt);
1880 sltu(scratch, rs, r2); 1992 sltu(scratch, rs, r2);
1881 bne(scratch, zero_reg, offset); 1993 bne(scratch, zero_reg, offset);
1882 } 1994 }
1883 break; 1995 break;
1884 case Uless_equal: 1996 case Uless_equal:
1885 if (rt.imm32_ == 0) { 1997 if (rt.imm64_ == 0) {
1886 b(offset); 1998 b(offset);
1887 } else { 1999 } else {
1888 r2 = scratch; 2000 r2 = scratch;
1889 li(r2, rt); 2001 li(r2, rt);
1890 sltu(scratch, r2, rs); 2002 sltu(scratch, r2, rs);
1891 beq(scratch, zero_reg, offset); 2003 beq(scratch, zero_reg, offset);
1892 } 2004 }
1893 break; 2005 break;
1894 default: 2006 default:
1895 UNREACHABLE(); 2007 UNREACHABLE();
(...skipping 148 matching lines...) Expand 10 before | Expand all | Expand 10 after
2044 break; 2156 break;
2045 case ne: 2157 case ne:
2046 ASSERT(!scratch.is(rs)); 2158 ASSERT(!scratch.is(rs));
2047 r2 = scratch; 2159 r2 = scratch;
2048 li(r2, rt); 2160 li(r2, rt);
2049 offset = shifted_branch_offset(L, false); 2161 offset = shifted_branch_offset(L, false);
2050 bne(rs, r2, offset); 2162 bne(rs, r2, offset);
2051 break; 2163 break;
2052 // Signed comparison. 2164 // Signed comparison.
2053 case greater: 2165 case greater:
2054 if (rt.imm32_ == 0) { 2166 if (rt.imm64_ == 0) {
2055 offset = shifted_branch_offset(L, false); 2167 offset = shifted_branch_offset(L, false);
2056 bgtz(rs, offset); 2168 bgtz(rs, offset);
2057 } else { 2169 } else {
2058 ASSERT(!scratch.is(rs)); 2170 ASSERT(!scratch.is(rs));
2059 r2 = scratch; 2171 r2 = scratch;
2060 li(r2, rt); 2172 li(r2, rt);
2061 slt(scratch, r2, rs); 2173 slt(scratch, r2, rs);
2062 offset = shifted_branch_offset(L, false); 2174 offset = shifted_branch_offset(L, false);
2063 bne(scratch, zero_reg, offset); 2175 bne(scratch, zero_reg, offset);
2064 } 2176 }
2065 break; 2177 break;
2066 case greater_equal: 2178 case greater_equal:
2067 if (rt.imm32_ == 0) { 2179 if (rt.imm64_ == 0) {
2068 offset = shifted_branch_offset(L, false); 2180 offset = shifted_branch_offset(L, false);
2069 bgez(rs, offset); 2181 bgez(rs, offset);
2070 } else if (is_int16(rt.imm32_)) { 2182 } else if (is_int16(rt.imm64_)) {
2071 slti(scratch, rs, rt.imm32_); 2183 slti(scratch, rs, rt.imm64_);
2072 offset = shifted_branch_offset(L, false); 2184 offset = shifted_branch_offset(L, false);
2073 beq(scratch, zero_reg, offset); 2185 beq(scratch, zero_reg, offset);
2074 } else { 2186 } else {
2075 ASSERT(!scratch.is(rs)); 2187 ASSERT(!scratch.is(rs));
2076 r2 = scratch; 2188 r2 = scratch;
2077 li(r2, rt); 2189 li(r2, rt);
2078 slt(scratch, rs, r2); 2190 slt(scratch, rs, r2);
2079 offset = shifted_branch_offset(L, false); 2191 offset = shifted_branch_offset(L, false);
2080 beq(scratch, zero_reg, offset); 2192 beq(scratch, zero_reg, offset);
2081 } 2193 }
2082 break; 2194 break;
2083 case less: 2195 case less:
2084 if (rt.imm32_ == 0) { 2196 if (rt.imm64_ == 0) {
2085 offset = shifted_branch_offset(L, false); 2197 offset = shifted_branch_offset(L, false);
2086 bltz(rs, offset); 2198 bltz(rs, offset);
2087 } else if (is_int16(rt.imm32_)) { 2199 } else if (is_int16(rt.imm64_)) {
2088 slti(scratch, rs, rt.imm32_); 2200 slti(scratch, rs, rt.imm64_);
2089 offset = shifted_branch_offset(L, false); 2201 offset = shifted_branch_offset(L, false);
2090 bne(scratch, zero_reg, offset); 2202 bne(scratch, zero_reg, offset);
2091 } else { 2203 } else {
2092 ASSERT(!scratch.is(rs)); 2204 ASSERT(!scratch.is(rs));
2093 r2 = scratch; 2205 r2 = scratch;
2094 li(r2, rt); 2206 li(r2, rt);
2095 slt(scratch, rs, r2); 2207 slt(scratch, rs, r2);
2096 offset = shifted_branch_offset(L, false); 2208 offset = shifted_branch_offset(L, false);
2097 bne(scratch, zero_reg, offset); 2209 bne(scratch, zero_reg, offset);
2098 } 2210 }
2099 break; 2211 break;
2100 case less_equal: 2212 case less_equal:
2101 if (rt.imm32_ == 0) { 2213 if (rt.imm64_ == 0) {
2102 offset = shifted_branch_offset(L, false); 2214 offset = shifted_branch_offset(L, false);
2103 blez(rs, offset); 2215 blez(rs, offset);
2104 } else { 2216 } else {
2105 ASSERT(!scratch.is(rs)); 2217 ASSERT(!scratch.is(rs));
2106 r2 = scratch; 2218 r2 = scratch;
2107 li(r2, rt); 2219 li(r2, rt);
2108 slt(scratch, r2, rs); 2220 slt(scratch, r2, rs);
2109 offset = shifted_branch_offset(L, false); 2221 offset = shifted_branch_offset(L, false);
2110 beq(scratch, zero_reg, offset); 2222 beq(scratch, zero_reg, offset);
2111 } 2223 }
2112 break; 2224 break;
2113 // Unsigned comparison. 2225 // Unsigned comparison.
2114 case Ugreater: 2226 case Ugreater:
2115 if (rt.imm32_ == 0) { 2227 if (rt.imm64_ == 0) {
2116 offset = shifted_branch_offset(L, false); 2228 offset = shifted_branch_offset(L, false);
2117 bne(rs, zero_reg, offset); 2229 bne(rs, zero_reg, offset);
2118 } else { 2230 } else {
2119 ASSERT(!scratch.is(rs)); 2231 ASSERT(!scratch.is(rs));
2120 r2 = scratch; 2232 r2 = scratch;
2121 li(r2, rt); 2233 li(r2, rt);
2122 sltu(scratch, r2, rs); 2234 sltu(scratch, r2, rs);
2123 offset = shifted_branch_offset(L, false); 2235 offset = shifted_branch_offset(L, false);
2124 bne(scratch, zero_reg, offset); 2236 bne(scratch, zero_reg, offset);
2125 } 2237 }
2126 break; 2238 break;
2127 case Ugreater_equal: 2239 case Ugreater_equal:
2128 if (rt.imm32_ == 0) { 2240 if (rt.imm64_ == 0) {
2129 offset = shifted_branch_offset(L, false); 2241 offset = shifted_branch_offset(L, false);
2130 bgez(rs, offset); 2242 bgez(rs, offset);
2131 } else if (is_int16(rt.imm32_)) { 2243 } else if (is_int16(rt.imm64_)) {
2132 sltiu(scratch, rs, rt.imm32_); 2244 sltiu(scratch, rs, rt.imm64_);
2133 offset = shifted_branch_offset(L, false); 2245 offset = shifted_branch_offset(L, false);
2134 beq(scratch, zero_reg, offset); 2246 beq(scratch, zero_reg, offset);
2135 } else { 2247 } else {
2136 ASSERT(!scratch.is(rs)); 2248 ASSERT(!scratch.is(rs));
2137 r2 = scratch; 2249 r2 = scratch;
2138 li(r2, rt); 2250 li(r2, rt);
2139 sltu(scratch, rs, r2); 2251 sltu(scratch, rs, r2);
2140 offset = shifted_branch_offset(L, false); 2252 offset = shifted_branch_offset(L, false);
2141 beq(scratch, zero_reg, offset); 2253 beq(scratch, zero_reg, offset);
2142 } 2254 }
2143 break; 2255 break;
2144 case Uless: 2256 case Uless:
2145 if (rt.imm32_ == 0) { 2257 if (rt.imm64_ == 0) {
2146 // No code needs to be emitted. 2258 // No code needs to be emitted.
2147 return; 2259 return;
2148 } else if (is_int16(rt.imm32_)) { 2260 } else if (is_int16(rt.imm64_)) {
2149 sltiu(scratch, rs, rt.imm32_); 2261 sltiu(scratch, rs, rt.imm64_);
2150 offset = shifted_branch_offset(L, false); 2262 offset = shifted_branch_offset(L, false);
2151 bne(scratch, zero_reg, offset); 2263 bne(scratch, zero_reg, offset);
2152 } else { 2264 } else {
2153 ASSERT(!scratch.is(rs)); 2265 ASSERT(!scratch.is(rs));
2154 r2 = scratch; 2266 r2 = scratch;
2155 li(r2, rt); 2267 li(r2, rt);
2156 sltu(scratch, rs, r2); 2268 sltu(scratch, rs, r2);
2157 offset = shifted_branch_offset(L, false); 2269 offset = shifted_branch_offset(L, false);
2158 bne(scratch, zero_reg, offset); 2270 bne(scratch, zero_reg, offset);
2159 } 2271 }
2160 break; 2272 break;
2161 case Uless_equal: 2273 case Uless_equal:
2162 if (rt.imm32_ == 0) { 2274 if (rt.imm64_ == 0) {
2163 offset = shifted_branch_offset(L, false); 2275 offset = shifted_branch_offset(L, false);
2164 beq(rs, zero_reg, offset); 2276 beq(rs, zero_reg, offset);
2165 } else { 2277 } else {
2166 ASSERT(!scratch.is(rs)); 2278 ASSERT(!scratch.is(rs));
2167 r2 = scratch; 2279 r2 = scratch;
2168 li(r2, rt); 2280 li(r2, rt);
2169 sltu(scratch, r2, rs); 2281 sltu(scratch, r2, rs);
2170 offset = shifted_branch_offset(L, false); 2282 offset = shifted_branch_offset(L, false);
2171 beq(scratch, zero_reg, offset); 2283 beq(scratch, zero_reg, offset);
2172 } 2284 }
(...skipping 106 matching lines...) Expand 10 before | Expand all | Expand 10 after
2279 break; 2391 break;
2280 case ne: 2392 case ne:
2281 beq(rs, r2, 2); 2393 beq(rs, r2, 2);
2282 nop(); 2394 nop();
2283 bal(offset); 2395 bal(offset);
2284 break; 2396 break;
2285 2397
2286 // Signed comparison. 2398 // Signed comparison.
2287 case greater: 2399 case greater:
2288 slt(scratch, r2, rs); 2400 slt(scratch, r2, rs);
2289 addiu(scratch, scratch, -1); 2401 daddiu(scratch, scratch, -1);
2290 bgezal(scratch, offset); 2402 bgezal(scratch, offset);
2291 break; 2403 break;
2292 case greater_equal: 2404 case greater_equal:
2293 slt(scratch, rs, r2); 2405 slt(scratch, rs, r2);
2294 addiu(scratch, scratch, -1); 2406 daddiu(scratch, scratch, -1);
2295 bltzal(scratch, offset); 2407 bltzal(scratch, offset);
2296 break; 2408 break;
2297 case less: 2409 case less:
2298 slt(scratch, rs, r2); 2410 slt(scratch, rs, r2);
2299 addiu(scratch, scratch, -1); 2411 daddiu(scratch, scratch, -1);
2300 bgezal(scratch, offset); 2412 bgezal(scratch, offset);
2301 break; 2413 break;
2302 case less_equal: 2414 case less_equal:
2303 slt(scratch, r2, rs); 2415 slt(scratch, r2, rs);
2304 addiu(scratch, scratch, -1); 2416 daddiu(scratch, scratch, -1);
2305 bltzal(scratch, offset); 2417 bltzal(scratch, offset);
2306 break; 2418 break;
2307 2419
2308 // Unsigned comparison. 2420 // Unsigned comparison.
2309 case Ugreater: 2421 case Ugreater:
2310 sltu(scratch, r2, rs); 2422 sltu(scratch, r2, rs);
2311 addiu(scratch, scratch, -1); 2423 daddiu(scratch, scratch, -1);
2312 bgezal(scratch, offset); 2424 bgezal(scratch, offset);
2313 break; 2425 break;
2314 case Ugreater_equal: 2426 case Ugreater_equal:
2315 sltu(scratch, rs, r2); 2427 sltu(scratch, rs, r2);
2316 addiu(scratch, scratch, -1); 2428 daddiu(scratch, scratch, -1);
2317 bltzal(scratch, offset); 2429 bltzal(scratch, offset);
2318 break; 2430 break;
2319 case Uless: 2431 case Uless:
2320 sltu(scratch, rs, r2); 2432 sltu(scratch, rs, r2);
2321 addiu(scratch, scratch, -1); 2433 daddiu(scratch, scratch, -1);
2322 bgezal(scratch, offset); 2434 bgezal(scratch, offset);
2323 break; 2435 break;
2324 case Uless_equal: 2436 case Uless_equal:
2325 sltu(scratch, r2, rs); 2437 sltu(scratch, r2, rs);
2326 addiu(scratch, scratch, -1); 2438 daddiu(scratch, scratch, -1);
2327 bltzal(scratch, offset); 2439 bltzal(scratch, offset);
2328 break; 2440 break;
2329 2441
2330 default: 2442 default:
2331 UNREACHABLE(); 2443 UNREACHABLE();
2332 } 2444 }
2333 } 2445 }
2334 // Emit a nop in the branch delay slot if required. 2446 // Emit a nop in the branch delay slot if required.
2335 if (bdslot == PROTECT) 2447 if (bdslot == PROTECT)
2336 nop(); 2448 nop();
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
2377 case ne: 2489 case ne:
2378 beq(rs, r2, 2); 2490 beq(rs, r2, 2);
2379 nop(); 2491 nop();
2380 offset = shifted_branch_offset(L, false); 2492 offset = shifted_branch_offset(L, false);
2381 bal(offset); 2493 bal(offset);
2382 break; 2494 break;
2383 2495
2384 // Signed comparison. 2496 // Signed comparison.
2385 case greater: 2497 case greater:
2386 slt(scratch, r2, rs); 2498 slt(scratch, r2, rs);
2387 addiu(scratch, scratch, -1); 2499 daddiu(scratch, scratch, -1);
2388 offset = shifted_branch_offset(L, false); 2500 offset = shifted_branch_offset(L, false);
2389 bgezal(scratch, offset); 2501 bgezal(scratch, offset);
2390 break; 2502 break;
2391 case greater_equal: 2503 case greater_equal:
2392 slt(scratch, rs, r2); 2504 slt(scratch, rs, r2);
2393 addiu(scratch, scratch, -1); 2505 daddiu(scratch, scratch, -1);
2394 offset = shifted_branch_offset(L, false); 2506 offset = shifted_branch_offset(L, false);
2395 bltzal(scratch, offset); 2507 bltzal(scratch, offset);
2396 break; 2508 break;
2397 case less: 2509 case less:
2398 slt(scratch, rs, r2); 2510 slt(scratch, rs, r2);
2399 addiu(scratch, scratch, -1); 2511 daddiu(scratch, scratch, -1);
2400 offset = shifted_branch_offset(L, false); 2512 offset = shifted_branch_offset(L, false);
2401 bgezal(scratch, offset); 2513 bgezal(scratch, offset);
2402 break; 2514 break;
2403 case less_equal: 2515 case less_equal:
2404 slt(scratch, r2, rs); 2516 slt(scratch, r2, rs);
2405 addiu(scratch, scratch, -1); 2517 daddiu(scratch, scratch, -1);
2406 offset = shifted_branch_offset(L, false); 2518 offset = shifted_branch_offset(L, false);
2407 bltzal(scratch, offset); 2519 bltzal(scratch, offset);
2408 break; 2520 break;
2409 2521
2410 // Unsigned comparison. 2522 // Unsigned comparison.
2411 case Ugreater: 2523 case Ugreater:
2412 sltu(scratch, r2, rs); 2524 sltu(scratch, r2, rs);
2413 addiu(scratch, scratch, -1); 2525 daddiu(scratch, scratch, -1);
2414 offset = shifted_branch_offset(L, false); 2526 offset = shifted_branch_offset(L, false);
2415 bgezal(scratch, offset); 2527 bgezal(scratch, offset);
2416 break; 2528 break;
2417 case Ugreater_equal: 2529 case Ugreater_equal:
2418 sltu(scratch, rs, r2); 2530 sltu(scratch, rs, r2);
2419 addiu(scratch, scratch, -1); 2531 daddiu(scratch, scratch, -1);
2420 offset = shifted_branch_offset(L, false); 2532 offset = shifted_branch_offset(L, false);
2421 bltzal(scratch, offset); 2533 bltzal(scratch, offset);
2422 break; 2534 break;
2423 case Uless: 2535 case Uless:
2424 sltu(scratch, rs, r2); 2536 sltu(scratch, rs, r2);
2425 addiu(scratch, scratch, -1); 2537 daddiu(scratch, scratch, -1);
2426 offset = shifted_branch_offset(L, false); 2538 offset = shifted_branch_offset(L, false);
2427 bgezal(scratch, offset); 2539 bgezal(scratch, offset);
2428 break; 2540 break;
2429 case Uless_equal: 2541 case Uless_equal:
2430 sltu(scratch, r2, rs); 2542 sltu(scratch, r2, rs);
2431 addiu(scratch, scratch, -1); 2543 daddiu(scratch, scratch, -1);
2432 offset = shifted_branch_offset(L, false); 2544 offset = shifted_branch_offset(L, false);
2433 bltzal(scratch, offset); 2545 bltzal(scratch, offset);
2434 break; 2546 break;
2435 2547
2436 default: 2548 default:
2437 UNREACHABLE(); 2549 UNREACHABLE();
2438 } 2550 }
2439 } 2551 }
2440 // Check that offset could actually hold on an int16_t. 2552 // Check that offset could actually hold on an int16_t.
2441 ASSERT(is_int16(offset)); 2553 ASSERT(is_int16(offset));
(...skipping 109 matching lines...) Expand 10 before | Expand all | Expand 10 after
2551 } 2663 }
2552 2664
2553 2665
2554 int MacroAssembler::CallSize(Address target, 2666 int MacroAssembler::CallSize(Address target,
2555 RelocInfo::Mode rmode, 2667 RelocInfo::Mode rmode,
2556 Condition cond, 2668 Condition cond,
2557 Register rs, 2669 Register rs,
2558 const Operand& rt, 2670 const Operand& rt,
2559 BranchDelaySlot bd) { 2671 BranchDelaySlot bd) {
2560 int size = CallSize(t9, cond, rs, rt, bd); 2672 int size = CallSize(t9, cond, rs, rt, bd);
2561 return size + 2 * kInstrSize; 2673 return size + 4 * kInstrSize;
2562 } 2674 }
2563 2675
2564 2676
2565 void MacroAssembler::Call(Address target, 2677 void MacroAssembler::Call(Address target,
2566 RelocInfo::Mode rmode, 2678 RelocInfo::Mode rmode,
2567 Condition cond, 2679 Condition cond,
2568 Register rs, 2680 Register rs,
2569 const Operand& rt, 2681 const Operand& rt,
2570 BranchDelaySlot bd) { 2682 BranchDelaySlot bd) {
2571 BlockTrampolinePoolScope block_trampoline_pool(this); 2683 BlockTrampolinePoolScope block_trampoline_pool(this);
2572 Label start; 2684 Label start;
2573 bind(&start); 2685 bind(&start);
2574 int32_t target_int = reinterpret_cast<int32_t>(target); 2686 int64_t target_int = reinterpret_cast<int64_t>(target);
2575 // Must record previous source positions before the 2687 // Must record previous source positions before the
2576 // li() generates a new code target. 2688 // li() generates a new code target.
2577 positions_recorder()->WriteRecordedPositions(); 2689 positions_recorder()->WriteRecordedPositions();
2578 li(t9, Operand(target_int, rmode), CONSTANT_SIZE); 2690 li(t9, Operand(target_int, rmode), ADDRESS_LOAD);
2579 Call(t9, cond, rs, rt, bd); 2691 Call(t9, cond, rs, rt, bd);
2580 ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd), 2692 ASSERT_EQ(CallSize(target, rmode, cond, rs, rt, bd),
2581 SizeOfCodeGeneratedSince(&start)); 2693 SizeOfCodeGeneratedSince(&start));
2582 } 2694 }
2583 2695
2584 2696
2585 int MacroAssembler::CallSize(Handle<Code> code, 2697 int MacroAssembler::CallSize(Handle<Code> code,
2586 RelocInfo::Mode rmode, 2698 RelocInfo::Mode rmode,
2587 TypeFeedbackId ast_id, 2699 TypeFeedbackId ast_id,
2588 Condition cond, 2700 Condition cond,
(...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after
2621 Register rs, 2733 Register rs,
2622 const Operand& rt, 2734 const Operand& rt,
2623 BranchDelaySlot bd) { 2735 BranchDelaySlot bd) {
2624 Jump(ra, cond, rs, rt, bd); 2736 Jump(ra, cond, rs, rt, bd);
2625 } 2737 }
2626 2738
2627 2739
2628 void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) { 2740 void MacroAssembler::J(Label* L, BranchDelaySlot bdslot) {
2629 BlockTrampolinePoolScope block_trampoline_pool(this); 2741 BlockTrampolinePoolScope block_trampoline_pool(this);
2630 2742
2631 uint32_t imm28; 2743 uint64_t imm28;
2632 imm28 = jump_address(L); 2744 imm28 = jump_address(L);
2633 imm28 &= kImm28Mask; 2745 imm28 &= kImm28Mask;
2634 { BlockGrowBufferScope block_buf_growth(this); 2746 { BlockGrowBufferScope block_buf_growth(this);
2635 // Buffer growth (and relocation) must be blocked for internal references 2747 // Buffer growth (and relocation) must be blocked for internal references
2636 // until associated instructions are emitted and available to be patched. 2748 // until associated instructions are emitted and available to be patched.
2637 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); 2749 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2638 j(imm28); 2750 j(imm28);
2639 } 2751 }
2640 // Emit a nop in the branch delay slot if required. 2752 // Emit a nop in the branch delay slot if required.
2641 if (bdslot == PROTECT) 2753 if (bdslot == PROTECT)
2642 nop(); 2754 nop();
2643 } 2755 }
2644 2756
2645 2757
2646 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) { 2758 void MacroAssembler::Jr(Label* L, BranchDelaySlot bdslot) {
2647 BlockTrampolinePoolScope block_trampoline_pool(this); 2759 BlockTrampolinePoolScope block_trampoline_pool(this);
2648 2760
2649 uint32_t imm32; 2761 uint64_t imm64;
2650 imm32 = jump_address(L); 2762 imm64 = jump_address(L);
2651 { BlockGrowBufferScope block_buf_growth(this); 2763 { BlockGrowBufferScope block_buf_growth(this);
2652 // Buffer growth (and relocation) must be blocked for internal references 2764 // Buffer growth (and relocation) must be blocked for internal references
2653 // until associated instructions are emitted and available to be patched. 2765 // until associated instructions are emitted and available to be patched.
2654 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); 2766 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2655 lui(at, (imm32 & kHiMask) >> kLuiShift); 2767 li(at, Operand(imm64), ADDRESS_LOAD);
2656 ori(at, at, (imm32 & kImm16Mask));
2657 } 2768 }
2658 jr(at); 2769 jr(at);
2659 2770
2660 // Emit a nop in the branch delay slot if required. 2771 // Emit a nop in the branch delay slot if required.
2661 if (bdslot == PROTECT) 2772 if (bdslot == PROTECT)
2662 nop(); 2773 nop();
2663 } 2774 }
2664 2775
2665 2776
2666 void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) { 2777 void MacroAssembler::Jalr(Label* L, BranchDelaySlot bdslot) {
2667 BlockTrampolinePoolScope block_trampoline_pool(this); 2778 BlockTrampolinePoolScope block_trampoline_pool(this);
2668 2779
2669 uint32_t imm32; 2780 uint64_t imm64;
2670 imm32 = jump_address(L); 2781 imm64 = jump_address(L);
2671 { BlockGrowBufferScope block_buf_growth(this); 2782 { BlockGrowBufferScope block_buf_growth(this);
2672 // Buffer growth (and relocation) must be blocked for internal references 2783 // Buffer growth (and relocation) must be blocked for internal references
2673 // until associated instructions are emitted and available to be patched. 2784 // until associated instructions are emitted and available to be patched.
2674 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE); 2785 RecordRelocInfo(RelocInfo::INTERNAL_REFERENCE);
2675 lui(at, (imm32 & kHiMask) >> kLuiShift); 2786 li(at, Operand(imm64), ADDRESS_LOAD);
2676 ori(at, at, (imm32 & kImm16Mask));
2677 } 2787 }
2678 jalr(at); 2788 jalr(at);
2679 2789
2680 // Emit a nop in the branch delay slot if required. 2790 // Emit a nop in the branch delay slot if required.
2681 if (bdslot == PROTECT) 2791 if (bdslot == PROTECT)
2682 nop(); 2792 nop();
2683 } 2793 }
2684 2794
2685 2795
2686 void MacroAssembler::DropAndRet(int drop) { 2796 void MacroAssembler::DropAndRet(int drop) {
2687 Ret(USE_DELAY_SLOT); 2797 Ret(USE_DELAY_SLOT);
2688 addiu(sp, sp, drop * kPointerSize); 2798 daddiu(sp, sp, drop * kPointerSize);
2689 } 2799 }
2690 2800
2691 void MacroAssembler::DropAndRet(int drop, 2801 void MacroAssembler::DropAndRet(int drop,
2692 Condition cond, 2802 Condition cond,
2693 Register r1, 2803 Register r1,
2694 const Operand& r2) { 2804 const Operand& r2) {
2695 // Both Drop and Ret need to be conditional. 2805 // Both Drop and Ret need to be conditional.
2696 Label skip; 2806 Label skip;
2697 if (cond != cc_always) { 2807 if (cond != cc_always) {
2698 Branch(&skip, NegateCondition(cond), r1, r2); 2808 Branch(&skip, NegateCondition(cond), r1, r2);
(...skipping 15 matching lines...) Expand all
2714 if (count <= 0) { 2824 if (count <= 0) {
2715 return; 2825 return;
2716 } 2826 }
2717 2827
2718 Label skip; 2828 Label skip;
2719 2829
2720 if (cond != al) { 2830 if (cond != al) {
2721 Branch(&skip, NegateCondition(cond), reg, op); 2831 Branch(&skip, NegateCondition(cond), reg, op);
2722 } 2832 }
2723 2833
2724 addiu(sp, sp, count * kPointerSize); 2834 daddiu(sp, sp, count * kPointerSize);
2725 2835
2726 if (cond != al) { 2836 if (cond != al) {
2727 bind(&skip); 2837 bind(&skip);
2728 } 2838 }
2729 } 2839 }
2730 2840
2731 2841
2732 2842
2733 void MacroAssembler::Swap(Register reg1, 2843 void MacroAssembler::Swap(Register reg1,
2734 Register reg2, 2844 Register reg2,
(...skipping 14 matching lines...) Expand all
2749 BranchAndLink(target); 2859 BranchAndLink(target);
2750 } 2860 }
2751 2861
2752 2862
2753 void MacroAssembler::Push(Handle<Object> handle) { 2863 void MacroAssembler::Push(Handle<Object> handle) {
2754 li(at, Operand(handle)); 2864 li(at, Operand(handle));
2755 push(at); 2865 push(at);
2756 } 2866 }
2757 2867
2758 2868
2869 void MacroAssembler::PushRegisterAsTwoSmis(Register src, Register scratch) {
2870 ASSERT(!src.is(scratch));
2871 mov(scratch, src);
2872 dsrl32(src, src, 0);
2873 dsll32(src, src, 0);
2874 push(src);
2875 dsll32(scratch, scratch, 0);
2876 push(scratch);
2877 }
2878
2879
2880 void MacroAssembler::PopRegisterAsTwoSmis(Register dst, Register scratch) {
2881 ASSERT(!dst.is(scratch));
2882 pop(scratch);
2883 dsrl32(scratch, scratch, 0);
2884 pop(dst);
2885 dsrl32(dst, dst, 0);
2886 dsll32(dst, dst, 0);
2887 or_(dst, dst, scratch);
2888 }
2889
2890
2759 void MacroAssembler::DebugBreak() { 2891 void MacroAssembler::DebugBreak() {
2760 PrepareCEntryArgs(0); 2892 PrepareCEntryArgs(0);
2761 PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate())); 2893 PrepareCEntryFunction(ExternalReference(Runtime::kDebugBreak, isolate()));
2762 CEntryStub ces(isolate(), 1); 2894 CEntryStub ces(isolate(), 1);
2763 ASSERT(AllowThisStubCall(&ces)); 2895 ASSERT(AllowThisStubCall(&ces));
2764 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK); 2896 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
2765 } 2897 }
2766 2898
2767 2899
2768 // --------------------------------------------------------------------------- 2900 // ---------------------------------------------------------------------------
2769 // Exception handling. 2901 // Exception handling.
2770 2902
2771 void MacroAssembler::PushTryHandler(StackHandler::Kind kind, 2903 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
2772 int handler_index) { 2904 int handler_index) {
2773 // Adjust this code if not the case. 2905 // Adjust this code if not the case.
2774 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); 2906 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2775 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); 2907 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2776 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); 2908 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2777 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); 2909 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2778 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); 2910 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2779 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); 2911 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2780 2912
2781 // For the JSEntry handler, we must preserve a0-a3 and s0. 2913 // For the JSEntry handler, we must preserve a0-a3 and s0.
2782 // t1-t3 are available. We will build up the handler from the bottom by 2914 // a5-a7 are available. We will build up the handler from the bottom by
2783 // pushing on the stack. 2915 // pushing on the stack.
2784 // Set up the code object (t1) and the state (t2) for pushing. 2916 // Set up the code object (a5) and the state (a6) for pushing.
2785 unsigned state = 2917 unsigned state =
2786 StackHandler::IndexField::encode(handler_index) | 2918 StackHandler::IndexField::encode(handler_index) |
2787 StackHandler::KindField::encode(kind); 2919 StackHandler::KindField::encode(kind);
2788 li(t1, Operand(CodeObject()), CONSTANT_SIZE); 2920 li(a5, Operand(CodeObject()), CONSTANT_SIZE);
2789 li(t2, Operand(state)); 2921 li(a6, Operand(state));
2790 2922
2791 // Push the frame pointer, context, state, and code object. 2923 // Push the frame pointer, context, state, and code object.
2792 if (kind == StackHandler::JS_ENTRY) { 2924 if (kind == StackHandler::JS_ENTRY) {
2793 ASSERT_EQ(Smi::FromInt(0), 0); 2925 ASSERT_EQ(Smi::FromInt(0), 0);
2794 // The second zero_reg indicates no context. 2926 // The second zero_reg indicates no context.
2795 // The first zero_reg is the NULL frame pointer. 2927 // The first zero_reg is the NULL frame pointer.
2796 // The operands are reversed to match the order of MultiPush/Pop. 2928 // The operands are reversed to match the order of MultiPush/Pop.
2797 Push(zero_reg, zero_reg, t2, t1); 2929 Push(zero_reg, zero_reg, a6, a5);
2798 } else { 2930 } else {
2799 MultiPush(t1.bit() | t2.bit() | cp.bit() | fp.bit()); 2931 MultiPush(a5.bit() | a6.bit() | cp.bit() | fp.bit());
2800 } 2932 }
2801 2933
2802 // Link the current handler as the next handler. 2934 // Link the current handler as the next handler.
2803 li(t2, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); 2935 li(a6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2804 lw(t1, MemOperand(t2)); 2936 ld(a5, MemOperand(a6));
2805 push(t1); 2937 push(a5);
2806 // Set this new handler as the current one. 2938 // Set this new handler as the current one.
2807 sw(sp, MemOperand(t2)); 2939 sd(sp, MemOperand(a6));
2808 } 2940 }
2809 2941
2810 2942
2811 void MacroAssembler::PopTryHandler() { 2943 void MacroAssembler::PopTryHandler() {
2812 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); 2944 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2813 pop(a1); 2945 pop(a1);
2814 Addu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize)); 2946 Daddu(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
2815 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); 2947 li(at, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2816 sw(a1, MemOperand(at)); 2948 sd(a1, MemOperand(at));
2817 } 2949 }
2818 2950
2819 2951
2820 void MacroAssembler::JumpToHandlerEntry() { 2952 void MacroAssembler::JumpToHandlerEntry() {
2821 // Compute the handler entry address and jump to it. The handler table is 2953 // Compute the handler entry address and jump to it. The handler table is
2822 // a fixed array of (smi-tagged) code offsets. 2954 // a fixed array of (smi-tagged) code offsets.
2823 // v0 = exception, a1 = code object, a2 = state. 2955 // v0 = exception, a1 = code object, a2 = state.
2824 lw(a3, FieldMemOperand(a1, Code::kHandlerTableOffset)); // Handler table. 2956 Uld(a3, FieldMemOperand(a1, Code::kHandlerTableOffset));
2825 Addu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); 2957 Daddu(a3, a3, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
2826 srl(a2, a2, StackHandler::kKindWidth); // Handler index. 2958 dsrl(a2, a2, StackHandler::kKindWidth); // Handler index.
2827 sll(a2, a2, kPointerSizeLog2); 2959 dsll(a2, a2, kPointerSizeLog2);
2828 Addu(a2, a3, a2); 2960 Daddu(a2, a3, a2);
2829 lw(a2, MemOperand(a2)); // Smi-tagged offset. 2961 ld(a2, MemOperand(a2)); // Smi-tagged offset.
2830 Addu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start. 2962 Daddu(a1, a1, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
2831 sra(t9, a2, kSmiTagSize); 2963 dsra32(t9, a2, 0);
2832 Addu(t9, t9, a1); 2964 Daddu(t9, t9, a1);
2833 Jump(t9); // Jump. 2965 Jump(t9); // Jump.
2834 } 2966 }
2835 2967
2836 2968
2837 void MacroAssembler::Throw(Register value) { 2969 void MacroAssembler::Throw(Register value) {
2838 // Adjust this code if not the case. 2970 // Adjust this code if not the case.
2839 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); 2971 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2840 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0); 2972 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
2841 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); 2973 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2842 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); 2974 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2843 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); 2975 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2844 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); 2976 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2845 2977
2846 // The exception is expected in v0. 2978 // The exception is expected in v0.
2847 Move(v0, value); 2979 Move(v0, value);
2848 2980
2849 // Drop the stack pointer to the top of the top handler. 2981 // Drop the stack pointer to the top of the top handler.
2850 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, 2982 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress,
2851 isolate()))); 2983 isolate())));
2852 lw(sp, MemOperand(a3)); 2984 ld(sp, MemOperand(a3));
2853 2985
2854 // Restore the next handler. 2986 // Restore the next handler.
2855 pop(a2); 2987 pop(a2);
2856 sw(a2, MemOperand(a3)); 2988 sd(a2, MemOperand(a3));
2857 2989
2858 // Get the code object (a1) and state (a2). Restore the context and frame 2990 // Get the code object (a1) and state (a2). Restore the context and frame
2859 // pointer. 2991 // pointer.
2860 MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit()); 2992 MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2861 2993
2862 // If the handler is a JS frame, restore the context to the frame. 2994 // If the handler is a JS frame, restore the context to the frame.
2863 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp 2995 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
2864 // or cp. 2996 // or cp.
2865 Label done; 2997 Label done;
2866 Branch(&done, eq, cp, Operand(zero_reg)); 2998 Branch(&done, eq, cp, Operand(zero_reg));
2867 sw(cp, MemOperand(fp, StandardFrameConstants::kContextOffset)); 2999 sd(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
2868 bind(&done); 3000 bind(&done);
2869 3001
2870 JumpToHandlerEntry(); 3002 JumpToHandlerEntry();
2871 } 3003 }
2872 3004
2873 3005
2874 void MacroAssembler::ThrowUncatchable(Register value) { 3006 void MacroAssembler::ThrowUncatchable(Register value) {
2875 // Adjust this code if not the case. 3007 // Adjust this code if not the case.
2876 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize); 3008 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
2877 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize); 3009 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
2878 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize); 3010 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
2879 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize); 3011 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
2880 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize); 3012 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
2881 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize); 3013 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
2882 3014
2883 // The exception is expected in v0. 3015 // The exception is expected in v0.
2884 if (!value.is(v0)) { 3016 if (!value.is(v0)) {
2885 mov(v0, value); 3017 mov(v0, value);
2886 } 3018 }
2887 // Drop the stack pointer to the top of the top stack handler. 3019 // Drop the stack pointer to the top of the top stack handler.
2888 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate()))); 3020 li(a3, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
2889 lw(sp, MemOperand(a3)); 3021 ld(sp, MemOperand(a3));
2890 3022
2891 // Unwind the handlers until the ENTRY handler is found. 3023 // Unwind the handlers until the ENTRY handler is found.
2892 Label fetch_next, check_kind; 3024 Label fetch_next, check_kind;
2893 jmp(&check_kind); 3025 jmp(&check_kind);
2894 bind(&fetch_next); 3026 bind(&fetch_next);
2895 lw(sp, MemOperand(sp, StackHandlerConstants::kNextOffset)); 3027 ld(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
2896 3028
2897 bind(&check_kind); 3029 bind(&check_kind);
2898 STATIC_ASSERT(StackHandler::JS_ENTRY == 0); 3030 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
2899 lw(a2, MemOperand(sp, StackHandlerConstants::kStateOffset)); 3031 ld(a2, MemOperand(sp, StackHandlerConstants::kStateOffset));
2900 And(a2, a2, Operand(StackHandler::KindField::kMask)); 3032 And(a2, a2, Operand(StackHandler::KindField::kMask));
2901 Branch(&fetch_next, ne, a2, Operand(zero_reg)); 3033 Branch(&fetch_next, ne, a2, Operand(zero_reg));
2902 3034
2903 // Set the top handler address to next handler past the top ENTRY handler. 3035 // Set the top handler address to next handler past the top ENTRY handler.
2904 pop(a2); 3036 pop(a2);
2905 sw(a2, MemOperand(a3)); 3037 sd(a2, MemOperand(a3));
2906 3038
2907 // Get the code object (a1) and state (a2). Clear the context and frame 3039 // Get the code object (a1) and state (a2). Clear the context and frame
2908 // pointer (0 was saved in the handler). 3040 // pointer (0 was saved in the handler).
2909 MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit()); 3041 MultiPop(a1.bit() | a2.bit() | cp.bit() | fp.bit());
2910 3042
2911 JumpToHandlerEntry(); 3043 JumpToHandlerEntry();
2912 } 3044 }
2913 3045
2914 3046
2915 void MacroAssembler::Allocate(int object_size, 3047 void MacroAssembler::Allocate(int object_size,
(...skipping 18 matching lines...) Expand all
2934 ASSERT(!result.is(scratch2)); 3066 ASSERT(!result.is(scratch2));
2935 ASSERT(!scratch1.is(scratch2)); 3067 ASSERT(!scratch1.is(scratch2));
2936 ASSERT(!scratch1.is(t9)); 3068 ASSERT(!scratch1.is(t9));
2937 ASSERT(!scratch2.is(t9)); 3069 ASSERT(!scratch2.is(t9));
2938 ASSERT(!result.is(t9)); 3070 ASSERT(!result.is(t9));
2939 3071
2940 // Make object size into bytes. 3072 // Make object size into bytes.
2941 if ((flags & SIZE_IN_WORDS) != 0) { 3073 if ((flags & SIZE_IN_WORDS) != 0) {
2942 object_size *= kPointerSize; 3074 object_size *= kPointerSize;
2943 } 3075 }
2944 ASSERT_EQ(0, object_size & kObjectAlignmentMask); 3076 ASSERT(0 == (object_size & kObjectAlignmentMask));
2945 3077
2946 // Check relative positions of allocation top and limit addresses. 3078 // Check relative positions of allocation top and limit addresses.
2947 // ARM adds additional checks to make sure the ldm instruction can be 3079 // ARM adds additional checks to make sure the ldm instruction can be
2948 // used. On MIPS we don't have ldm so we don't need additional checks either. 3080 // used. On MIPS we don't have ldm so we don't need additional checks either.
2949 ExternalReference allocation_top = 3081 ExternalReference allocation_top =
2950 AllocationUtils::GetAllocationTopReference(isolate(), flags); 3082 AllocationUtils::GetAllocationTopReference(isolate(), flags);
2951 ExternalReference allocation_limit = 3083 ExternalReference allocation_limit =
2952 AllocationUtils::GetAllocationLimitReference(isolate(), flags); 3084 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
2953 3085
2954 intptr_t top = 3086 intptr_t top =
2955 reinterpret_cast<intptr_t>(allocation_top.address()); 3087 reinterpret_cast<intptr_t>(allocation_top.address());
2956 intptr_t limit = 3088 intptr_t limit =
2957 reinterpret_cast<intptr_t>(allocation_limit.address()); 3089 reinterpret_cast<intptr_t>(allocation_limit.address());
2958 ASSERT((limit - top) == kPointerSize); 3090 ASSERT((limit - top) == kPointerSize);
2959 3091
2960 // Set up allocation top address and object size registers. 3092 // Set up allocation top address and object size registers.
2961 Register topaddr = scratch1; 3093 Register topaddr = scratch1;
2962 li(topaddr, Operand(allocation_top)); 3094 li(topaddr, Operand(allocation_top));
2963 3095
2964 // This code stores a temporary value in t9. 3096 // This code stores a temporary value in t9.
2965 if ((flags & RESULT_CONTAINS_TOP) == 0) { 3097 if ((flags & RESULT_CONTAINS_TOP) == 0) {
2966 // Load allocation top into result and allocation limit into t9. 3098 // Load allocation top into result and allocation limit into t9.
2967 lw(result, MemOperand(topaddr)); 3099 ld(result, MemOperand(topaddr));
2968 lw(t9, MemOperand(topaddr, kPointerSize)); 3100 ld(t9, MemOperand(topaddr, kPointerSize));
2969 } else { 3101 } else {
2970 if (emit_debug_code()) { 3102 if (emit_debug_code()) {
2971 // Assert that result actually contains top on entry. t9 is used 3103 // Assert that result actually contains top on entry. t9 is used
2972 // immediately below so this use of t9 does not cause difference with 3104 // immediately below so this use of t9 does not cause difference with
2973 // respect to register content between debug and release mode. 3105 // respect to register content between debug and release mode.
2974 lw(t9, MemOperand(topaddr)); 3106 ld(t9, MemOperand(topaddr));
2975 Check(eq, kUnexpectedAllocationTop, result, Operand(t9)); 3107 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
2976 } 3108 }
2977 // Load allocation limit into t9. Result already contains allocation top. 3109 // Load allocation limit into t9. Result already contains allocation top.
2978 lw(t9, MemOperand(topaddr, limit - top)); 3110 ld(t9, MemOperand(topaddr, limit - top));
2979 } 3111 }
2980 3112
2981 if ((flags & DOUBLE_ALIGNMENT) != 0) { 3113 ASSERT(kPointerSize == kDoubleSize);
2982 // Align the next allocation. Storing the filler map without checking top is 3114 if (emit_debug_code()) {
2983 // safe in new-space because the limit of the heap is aligned there. 3115 And(at, result, Operand(kDoubleAlignmentMask));
2984 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); 3116 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
2985 ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
2986 And(scratch2, result, Operand(kDoubleAlignmentMask));
2987 Label aligned;
2988 Branch(&aligned, eq, scratch2, Operand(zero_reg));
2989 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
2990 Branch(gc_required, Ugreater_equal, result, Operand(t9));
2991 }
2992 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
2993 sw(scratch2, MemOperand(result));
2994 Addu(result, result, Operand(kDoubleSize / 2));
2995 bind(&aligned);
2996 } 3117 }
2997 3118
2998 // Calculate new top and bail out if new space is exhausted. Use result 3119 // Calculate new top and bail out if new space is exhausted. Use result
2999 // to calculate the new top. 3120 // to calculate the new top.
3000 Addu(scratch2, result, Operand(object_size)); 3121 Daddu(scratch2, result, Operand(object_size));
3001 Branch(gc_required, Ugreater, scratch2, Operand(t9)); 3122 Branch(gc_required, Ugreater, scratch2, Operand(t9));
3002 sw(scratch2, MemOperand(topaddr)); 3123 sd(scratch2, MemOperand(topaddr));
3003 3124
3004 // Tag object if requested. 3125 // Tag object if requested.
3005 if ((flags & TAG_OBJECT) != 0) { 3126 if ((flags & TAG_OBJECT) != 0) {
3006 Addu(result, result, Operand(kHeapObjectTag)); 3127 Daddu(result, result, Operand(kHeapObjectTag));
3007 } 3128 }
3008 } 3129 }
3009 3130
3010 3131
3011 void MacroAssembler::Allocate(Register object_size, 3132 void MacroAssembler::Allocate(Register object_size,
3012 Register result, 3133 Register result,
3013 Register scratch1, 3134 Register scratch1,
3014 Register scratch2, 3135 Register scratch2,
3015 Label* gc_required, 3136 Label* gc_required,
3016 AllocationFlags flags) { 3137 AllocationFlags flags) {
(...skipping 27 matching lines...) Expand all
3044 reinterpret_cast<intptr_t>(allocation_limit.address()); 3165 reinterpret_cast<intptr_t>(allocation_limit.address());
3045 ASSERT((limit - top) == kPointerSize); 3166 ASSERT((limit - top) == kPointerSize);
3046 3167
3047 // Set up allocation top address and object size registers. 3168 // Set up allocation top address and object size registers.
3048 Register topaddr = scratch1; 3169 Register topaddr = scratch1;
3049 li(topaddr, Operand(allocation_top)); 3170 li(topaddr, Operand(allocation_top));
3050 3171
3051 // This code stores a temporary value in t9. 3172 // This code stores a temporary value in t9.
3052 if ((flags & RESULT_CONTAINS_TOP) == 0) { 3173 if ((flags & RESULT_CONTAINS_TOP) == 0) {
3053 // Load allocation top into result and allocation limit into t9. 3174 // Load allocation top into result and allocation limit into t9.
3054 lw(result, MemOperand(topaddr)); 3175 ld(result, MemOperand(topaddr));
3055 lw(t9, MemOperand(topaddr, kPointerSize)); 3176 ld(t9, MemOperand(topaddr, kPointerSize));
3056 } else { 3177 } else {
3057 if (emit_debug_code()) { 3178 if (emit_debug_code()) {
3058 // Assert that result actually contains top on entry. t9 is used 3179 // Assert that result actually contains top on entry. t9 is used
3059 // immediately below so this use of t9 does not cause difference with 3180 // immediately below so this use of t9 does not cause difference with
3060 // respect to register content between debug and release mode. 3181 // respect to register content between debug and release mode.
3061 lw(t9, MemOperand(topaddr)); 3182 ld(t9, MemOperand(topaddr));
3062 Check(eq, kUnexpectedAllocationTop, result, Operand(t9)); 3183 Check(eq, kUnexpectedAllocationTop, result, Operand(t9));
3063 } 3184 }
3064 // Load allocation limit into t9. Result already contains allocation top. 3185 // Load allocation limit into t9. Result already contains allocation top.
3065 lw(t9, MemOperand(topaddr, limit - top)); 3186 ld(t9, MemOperand(topaddr, limit - top));
3066 } 3187 }
3067 3188
3068 if ((flags & DOUBLE_ALIGNMENT) != 0) { 3189 ASSERT(kPointerSize == kDoubleSize);
3069 // Align the next allocation. Storing the filler map without checking top is 3190 if (emit_debug_code()) {
3070 // safe in new-space because the limit of the heap is aligned there. 3191 And(at, result, Operand(kDoubleAlignmentMask));
3071 ASSERT((flags & PRETENURE_OLD_POINTER_SPACE) == 0); 3192 Check(eq, kAllocationIsNotDoubleAligned, at, Operand(zero_reg));
3072 ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
3073 And(scratch2, result, Operand(kDoubleAlignmentMask));
3074 Label aligned;
3075 Branch(&aligned, eq, scratch2, Operand(zero_reg));
3076 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
3077 Branch(gc_required, Ugreater_equal, result, Operand(t9));
3078 }
3079 li(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
3080 sw(scratch2, MemOperand(result));
3081 Addu(result, result, Operand(kDoubleSize / 2));
3082 bind(&aligned);
3083 } 3193 }
3084 3194
3085 // Calculate new top and bail out if new space is exhausted. Use result 3195 // Calculate new top and bail out if new space is exhausted. Use result
3086 // to calculate the new top. Object size may be in words so a shift is 3196 // to calculate the new top. Object size may be in words so a shift is
3087 // required to get the number of bytes. 3197 // required to get the number of bytes.
3088 if ((flags & SIZE_IN_WORDS) != 0) { 3198 if ((flags & SIZE_IN_WORDS) != 0) {
3089 sll(scratch2, object_size, kPointerSizeLog2); 3199 dsll(scratch2, object_size, kPointerSizeLog2);
3090 Addu(scratch2, result, scratch2); 3200 Daddu(scratch2, result, scratch2);
3091 } else { 3201 } else {
3092 Addu(scratch2, result, Operand(object_size)); 3202 Daddu(scratch2, result, Operand(object_size));
3093 } 3203 }
3094 Branch(gc_required, Ugreater, scratch2, Operand(t9)); 3204 Branch(gc_required, Ugreater, scratch2, Operand(t9));
3095 3205
3096 // Update allocation top. result temporarily holds the new top. 3206 // Update allocation top. result temporarily holds the new top.
3097 if (emit_debug_code()) { 3207 if (emit_debug_code()) {
3098 And(t9, scratch2, Operand(kObjectAlignmentMask)); 3208 And(t9, scratch2, Operand(kObjectAlignmentMask));
3099 Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg)); 3209 Check(eq, kUnalignedAllocationInNewSpace, t9, Operand(zero_reg));
3100 } 3210 }
3101 sw(scratch2, MemOperand(topaddr)); 3211 sd(scratch2, MemOperand(topaddr));
3102 3212
3103 // Tag object if requested. 3213 // Tag object if requested.
3104 if ((flags & TAG_OBJECT) != 0) { 3214 if ((flags & TAG_OBJECT) != 0) {
3105 Addu(result, result, Operand(kHeapObjectTag)); 3215 Daddu(result, result, Operand(kHeapObjectTag));
3106 } 3216 }
3107 } 3217 }
3108 3218
3109 3219
3110 void MacroAssembler::UndoAllocationInNewSpace(Register object, 3220 void MacroAssembler::UndoAllocationInNewSpace(Register object,
3111 Register scratch) { 3221 Register scratch) {
3112 ExternalReference new_space_allocation_top = 3222 ExternalReference new_space_allocation_top =
3113 ExternalReference::new_space_allocation_top_address(isolate()); 3223 ExternalReference::new_space_allocation_top_address(isolate());
3114 3224
3115 // Make sure the object has no tag before resetting top. 3225 // Make sure the object has no tag before resetting top.
3116 And(object, object, Operand(~kHeapObjectTagMask)); 3226 And(object, object, Operand(~kHeapObjectTagMask));
3117 #ifdef DEBUG 3227 #ifdef DEBUG
3118 // Check that the object un-allocated is below the current top. 3228 // Check that the object un-allocated is below the current top.
3119 li(scratch, Operand(new_space_allocation_top)); 3229 li(scratch, Operand(new_space_allocation_top));
3120 lw(scratch, MemOperand(scratch)); 3230 ld(scratch, MemOperand(scratch));
3121 Check(less, kUndoAllocationOfNonAllocatedMemory, 3231 Check(less, kUndoAllocationOfNonAllocatedMemory,
3122 object, Operand(scratch)); 3232 object, Operand(scratch));
3123 #endif 3233 #endif
3124 // Write the address of the object to un-allocate as the current top. 3234 // Write the address of the object to un-allocate as the current top.
3125 li(scratch, Operand(new_space_allocation_top)); 3235 li(scratch, Operand(new_space_allocation_top));
3126 sw(object, MemOperand(scratch)); 3236 sd(object, MemOperand(scratch));
3127 } 3237 }
3128 3238
3129 3239
3130 void MacroAssembler::AllocateTwoByteString(Register result, 3240 void MacroAssembler::AllocateTwoByteString(Register result,
3131 Register length, 3241 Register length,
3132 Register scratch1, 3242 Register scratch1,
3133 Register scratch2, 3243 Register scratch2,
3134 Register scratch3, 3244 Register scratch3,
3135 Label* gc_required) { 3245 Label* gc_required) {
3136 // Calculate the number of bytes needed for the characters in the string while 3246 // Calculate the number of bytes needed for the characters in the string while
3137 // observing object alignment. 3247 // observing object alignment.
3138 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0); 3248 ASSERT((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3139 sll(scratch1, length, 1); // Length in bytes, not chars. 3249 dsll(scratch1, length, 1); // Length in bytes, not chars.
3140 addiu(scratch1, scratch1, 3250 daddiu(scratch1, scratch1,
3141 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize); 3251 kObjectAlignmentMask + SeqTwoByteString::kHeaderSize);
3142 And(scratch1, scratch1, Operand(~kObjectAlignmentMask)); 3252 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3143 3253
3144 // Allocate two-byte string in new space. 3254 // Allocate two-byte string in new space.
3145 Allocate(scratch1, 3255 Allocate(scratch1,
3146 result, 3256 result,
3147 scratch2, 3257 scratch2,
3148 scratch3, 3258 scratch3,
3149 gc_required, 3259 gc_required,
3150 TAG_OBJECT); 3260 TAG_OBJECT);
(...skipping 10 matching lines...) Expand all
3161 void MacroAssembler::AllocateAsciiString(Register result, 3271 void MacroAssembler::AllocateAsciiString(Register result,
3162 Register length, 3272 Register length,
3163 Register scratch1, 3273 Register scratch1,
3164 Register scratch2, 3274 Register scratch2,
3165 Register scratch3, 3275 Register scratch3,
3166 Label* gc_required) { 3276 Label* gc_required) {
3167 // Calculate the number of bytes needed for the characters in the string 3277 // Calculate the number of bytes needed for the characters in the string
3168 // while observing object alignment. 3278 // while observing object alignment.
3169 ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0); 3279 ASSERT((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
3170 ASSERT(kCharSize == 1); 3280 ASSERT(kCharSize == 1);
3171 addiu(scratch1, length, kObjectAlignmentMask + SeqOneByteString::kHeaderSize); 3281 daddiu(scratch1, length,
3282 kObjectAlignmentMask + SeqOneByteString::kHeaderSize);
3172 And(scratch1, scratch1, Operand(~kObjectAlignmentMask)); 3283 And(scratch1, scratch1, Operand(~kObjectAlignmentMask));
3173 3284
3174 // Allocate ASCII string in new space. 3285 // Allocate ASCII string in new space.
3175 Allocate(scratch1, 3286 Allocate(scratch1,
3176 result, 3287 result,
3177 scratch2, 3288 scratch2,
3178 scratch3, 3289 scratch3,
3179 gc_required, 3290 gc_required,
3180 TAG_OBJECT); 3291 TAG_OBJECT);
3181 3292
(...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after
3267 } 3378 }
3268 3379
3269 3380
3270 // Allocates a heap number or jumps to the label if the young space is full and 3381 // Allocates a heap number or jumps to the label if the young space is full and
3271 // a scavenge is needed. 3382 // a scavenge is needed.
3272 void MacroAssembler::AllocateHeapNumber(Register result, 3383 void MacroAssembler::AllocateHeapNumber(Register result,
3273 Register scratch1, 3384 Register scratch1,
3274 Register scratch2, 3385 Register scratch2,
3275 Register heap_number_map, 3386 Register heap_number_map,
3276 Label* need_gc, 3387 Label* need_gc,
3277 TaggingMode tagging_mode, 3388 TaggingMode tagging_mode) {
3278 MutableMode mode) {
3279 // Allocate an object in the heap for the heap number and tag it as a heap 3389 // Allocate an object in the heap for the heap number and tag it as a heap
3280 // object. 3390 // object.
3281 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc, 3391 Allocate(HeapNumber::kSize, result, scratch1, scratch2, need_gc,
3282 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS); 3392 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3283 3393
3284 Heap::RootListIndex map_index = mode == MUTABLE
3285 ? Heap::kMutableHeapNumberMapRootIndex
3286 : Heap::kHeapNumberMapRootIndex;
3287 AssertIsRoot(heap_number_map, map_index);
3288
3289 // Store heap number map in the allocated object. 3394 // Store heap number map in the allocated object.
3395 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
3290 if (tagging_mode == TAG_RESULT) { 3396 if (tagging_mode == TAG_RESULT) {
3291 sw(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset)); 3397 sd(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset));
3292 } else { 3398 } else {
3293 sw(heap_number_map, MemOperand(result, HeapObject::kMapOffset)); 3399 sd(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3294 } 3400 }
3295 } 3401 }
3296 3402
3297 3403
3298 void MacroAssembler::AllocateHeapNumberWithValue(Register result, 3404 void MacroAssembler::AllocateHeapNumberWithValue(Register result,
3299 FPURegister value, 3405 FPURegister value,
3300 Register scratch1, 3406 Register scratch1,
3301 Register scratch2, 3407 Register scratch2,
3302 Label* gc_required) { 3408 Label* gc_required) {
3303 LoadRoot(t8, Heap::kHeapNumberMapRootIndex); 3409 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
(...skipping 15 matching lines...) Expand all
3319 // Find a temp register in temps list. 3425 // Find a temp register in temps list.
3320 for (int i = 0; i < kNumRegisters; i++) { 3426 for (int i = 0; i < kNumRegisters; i++) {
3321 if ((temps & (1 << i)) != 0) { 3427 if ((temps & (1 << i)) != 0) {
3322 tmp.code_ = i; 3428 tmp.code_ = i;
3323 break; 3429 break;
3324 } 3430 }
3325 } 3431 }
3326 ASSERT(!tmp.is(no_reg)); 3432 ASSERT(!tmp.is(no_reg));
3327 3433
3328 for (int i = 0; i < field_count; i++) { 3434 for (int i = 0; i < field_count; i++) {
3329 lw(tmp, FieldMemOperand(src, i * kPointerSize)); 3435 ld(tmp, FieldMemOperand(src, i * kPointerSize));
3330 sw(tmp, FieldMemOperand(dst, i * kPointerSize)); 3436 sd(tmp, FieldMemOperand(dst, i * kPointerSize));
3331 } 3437 }
3332 } 3438 }
3333 3439
3334 3440
3335 void MacroAssembler::CopyBytes(Register src, 3441 void MacroAssembler::CopyBytes(Register src,
3336 Register dst, 3442 Register dst,
3337 Register length, 3443 Register length,
3338 Register scratch) { 3444 Register scratch) {
3339 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done; 3445 Label align_loop_1, word_loop, byte_loop, byte_loop_1, done;
3340 3446
3341 // Align src before copying in word size chunks. 3447 // Align src before copying in word size chunks.
3342 Branch(&byte_loop, le, length, Operand(kPointerSize)); 3448 Branch(&byte_loop, le, length, Operand(kPointerSize));
3343 bind(&align_loop_1); 3449 bind(&align_loop_1);
3344 And(scratch, src, kPointerSize - 1); 3450 And(scratch, src, kPointerSize - 1);
3345 Branch(&word_loop, eq, scratch, Operand(zero_reg)); 3451 Branch(&word_loop, eq, scratch, Operand(zero_reg));
3346 lbu(scratch, MemOperand(src)); 3452 lbu(scratch, MemOperand(src));
3347 Addu(src, src, 1); 3453 Daddu(src, src, 1);
3348 sb(scratch, MemOperand(dst)); 3454 sb(scratch, MemOperand(dst));
3349 Addu(dst, dst, 1); 3455 Daddu(dst, dst, 1);
3350 Subu(length, length, Operand(1)); 3456 Dsubu(length, length, Operand(1));
3351 Branch(&align_loop_1, ne, length, Operand(zero_reg)); 3457 Branch(&align_loop_1, ne, length, Operand(zero_reg));
3352 3458
3353 // Copy bytes in word size chunks. 3459 // Copy bytes in word size chunks.
3354 bind(&word_loop); 3460 bind(&word_loop);
3355 if (emit_debug_code()) { 3461 if (emit_debug_code()) {
3356 And(scratch, src, kPointerSize - 1); 3462 And(scratch, src, kPointerSize - 1);
3357 Assert(eq, kExpectingAlignmentForCopyBytes, 3463 Assert(eq, kExpectingAlignmentForCopyBytes,
3358 scratch, Operand(zero_reg)); 3464 scratch, Operand(zero_reg));
3359 } 3465 }
3360 Branch(&byte_loop, lt, length, Operand(kPointerSize)); 3466 Branch(&byte_loop, lt, length, Operand(kPointerSize));
3361 lw(scratch, MemOperand(src)); 3467 ld(scratch, MemOperand(src));
3362 Addu(src, src, kPointerSize); 3468 Daddu(src, src, kPointerSize);
3363 3469
3364 // TODO(kalmard) check if this can be optimized to use sw in most cases. 3470 // TODO(kalmard) check if this can be optimized to use sw in most cases.
3365 // Can't use unaligned access - copy byte by byte. 3471 // Can't use unaligned access - copy byte by byte.
3366 if (kArchEndian == kLittle) { 3472 sb(scratch, MemOperand(dst, 0));
3367 sb(scratch, MemOperand(dst, 0)); 3473 dsrl(scratch, scratch, 8);
3368 srl(scratch, scratch, 8); 3474 sb(scratch, MemOperand(dst, 1));
3369 sb(scratch, MemOperand(dst, 1)); 3475 dsrl(scratch, scratch, 8);
3370 srl(scratch, scratch, 8); 3476 sb(scratch, MemOperand(dst, 2));
3371 sb(scratch, MemOperand(dst, 2)); 3477 dsrl(scratch, scratch, 8);
3372 srl(scratch, scratch, 8); 3478 sb(scratch, MemOperand(dst, 3));
3373 sb(scratch, MemOperand(dst, 3)); 3479 dsrl(scratch, scratch, 8);
3374 } else { 3480 sb(scratch, MemOperand(dst, 4));
3375 sb(scratch, MemOperand(dst, 3)); 3481 dsrl(scratch, scratch, 8);
3376 srl(scratch, scratch, 8); 3482 sb(scratch, MemOperand(dst, 5));
3377 sb(scratch, MemOperand(dst, 2)); 3483 dsrl(scratch, scratch, 8);
3378 srl(scratch, scratch, 8); 3484 sb(scratch, MemOperand(dst, 6));
3379 sb(scratch, MemOperand(dst, 1)); 3485 dsrl(scratch, scratch, 8);
3380 srl(scratch, scratch, 8); 3486 sb(scratch, MemOperand(dst, 7));
3381 sb(scratch, MemOperand(dst, 0)); 3487 Daddu(dst, dst, 8);
3382 }
3383 3488
3384 Addu(dst, dst, 4); 3489 Dsubu(length, length, Operand(kPointerSize));
3385
3386 Subu(length, length, Operand(kPointerSize));
3387 Branch(&word_loop); 3490 Branch(&word_loop);
3388 3491
3389 // Copy the last bytes if any left. 3492 // Copy the last bytes if any left.
3390 bind(&byte_loop); 3493 bind(&byte_loop);
3391 Branch(&done, eq, length, Operand(zero_reg)); 3494 Branch(&done, eq, length, Operand(zero_reg));
3392 bind(&byte_loop_1); 3495 bind(&byte_loop_1);
3393 lbu(scratch, MemOperand(src)); 3496 lbu(scratch, MemOperand(src));
3394 Addu(src, src, 1); 3497 Daddu(src, src, 1);
3395 sb(scratch, MemOperand(dst)); 3498 sb(scratch, MemOperand(dst));
3396 Addu(dst, dst, 1); 3499 Daddu(dst, dst, 1);
3397 Subu(length, length, Operand(1)); 3500 Dsubu(length, length, Operand(1));
3398 Branch(&byte_loop_1, ne, length, Operand(zero_reg)); 3501 Branch(&byte_loop_1, ne, length, Operand(zero_reg));
3399 bind(&done); 3502 bind(&done);
3400 } 3503 }
3401 3504
3402 3505
3403 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset, 3506 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3404 Register end_offset, 3507 Register end_offset,
3405 Register filler) { 3508 Register filler) {
3406 Label loop, entry; 3509 Label loop, entry;
3407 Branch(&entry); 3510 Branch(&entry);
3408 bind(&loop); 3511 bind(&loop);
3409 sw(filler, MemOperand(start_offset)); 3512 sd(filler, MemOperand(start_offset));
3410 Addu(start_offset, start_offset, kPointerSize); 3513 Daddu(start_offset, start_offset, kPointerSize);
3411 bind(&entry); 3514 bind(&entry);
3412 Branch(&loop, lt, start_offset, Operand(end_offset)); 3515 Branch(&loop, lt, start_offset, Operand(end_offset));
3413 } 3516 }
3414 3517
3415 3518
3416 void MacroAssembler::CheckFastElements(Register map, 3519 void MacroAssembler::CheckFastElements(Register map,
3417 Register scratch, 3520 Register scratch,
3418 Label* fail) { 3521 Label* fail) {
3419 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0); 3522 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
3420 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1); 3523 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
(...skipping 52 matching lines...) Expand 10 before | Expand all | Expand 10 after
3473 Heap::kHeapNumberMapRootIndex, 3576 Heap::kHeapNumberMapRootIndex,
3474 fail, 3577 fail,
3475 DONT_DO_SMI_CHECK); 3578 DONT_DO_SMI_CHECK);
3476 3579
3477 // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000 3580 // Check for nan: all NaN values have a value greater (signed) than 0x7ff00000
3478 // in the exponent. 3581 // in the exponent.
3479 li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32)); 3582 li(scratch1, Operand(kNaNOrInfinityLowerBoundUpper32));
3480 lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset)); 3583 lw(exponent_reg, FieldMemOperand(value_reg, HeapNumber::kExponentOffset));
3481 Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1)); 3584 Branch(&maybe_nan, ge, exponent_reg, Operand(scratch1));
3482 3585
3483 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); 3586 lwu(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3484 3587
3485 bind(&have_double_value); 3588 bind(&have_double_value);
3486 sll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize); 3589 // dsll(scratch1, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3487 Addu(scratch1, scratch1, elements_reg); 3590 dsra(scratch1, key_reg, 32 - kDoubleSizeLog2);
3488 sw(mantissa_reg, 3591 Daddu(scratch1, scratch1, elements_reg);
3489 FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset 3592 sw(mantissa_reg, FieldMemOperand(
3490 + kHoleNanLower32Offset)); 3593 scratch1, FixedDoubleArray::kHeaderSize - elements_offset));
3491 sw(exponent_reg, 3594 uint32_t offset = FixedDoubleArray::kHeaderSize - elements_offset +
3492 FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - elements_offset 3595 sizeof(kHoleNanLower32);
3493 + kHoleNanUpper32Offset)); 3596 sw(exponent_reg, FieldMemOperand(scratch1, offset));
3494 jmp(&done); 3597 jmp(&done);
3495 3598
3496 bind(&maybe_nan); 3599 bind(&maybe_nan);
3497 // Could be NaN or Infinity. If fraction is not zero, it's NaN, otherwise 3600 // Could be NaN, Infinity or -Infinity. If fraction is not zero, it's NaN,
3498 // it's an Infinity, and the non-NaN code path applies. 3601 // otherwise it's Infinity or -Infinity, and the non-NaN code path applies.
3499 Branch(&is_nan, gt, exponent_reg, Operand(scratch1));
3500 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset)); 3602 lw(mantissa_reg, FieldMemOperand(value_reg, HeapNumber::kMantissaOffset));
3501 Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg)); 3603 Branch(&have_double_value, eq, mantissa_reg, Operand(zero_reg));
3502 bind(&is_nan); 3604 bind(&is_nan);
3503 // Load canonical NaN for storing into the double array. 3605 // Load canonical NaN for storing into the double array.
3504 LoadRoot(at, Heap::kNanValueRootIndex); 3606 LoadRoot(at, Heap::kNanValueRootIndex);
3505 lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset)); 3607 lw(mantissa_reg, FieldMemOperand(at, HeapNumber::kMantissaOffset));
3506 lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset)); 3608 lw(exponent_reg, FieldMemOperand(at, HeapNumber::kExponentOffset));
3507 jmp(&have_double_value); 3609 jmp(&have_double_value);
3508 3610
3509 bind(&smi_value); 3611 bind(&smi_value);
3510 Addu(scratch1, elements_reg, 3612 Daddu(scratch1, elements_reg,
3511 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag - 3613 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag -
3512 elements_offset)); 3614 elements_offset));
3513 sll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize); 3615 // dsll(scratch2, key_reg, kDoubleSizeLog2 - kSmiTagSize);
3514 Addu(scratch1, scratch1, scratch2); 3616 dsra(scratch2, key_reg, 32 - kDoubleSizeLog2);
3617 Daddu(scratch1, scratch1, scratch2);
3515 // scratch1 is now effective address of the double element 3618 // scratch1 is now effective address of the double element
3516 3619
3517 Register untagged_value = elements_reg; 3620 Register untagged_value = elements_reg;
3518 SmiUntag(untagged_value, value_reg); 3621 SmiUntag(untagged_value, value_reg);
3519 mtc1(untagged_value, f2); 3622 mtc1(untagged_value, f2);
3520 cvt_d_w(f0, f2); 3623 cvt_d_w(f0, f2);
3521 sdc1(f0, MemOperand(scratch1, 0)); 3624 sdc1(f0, MemOperand(scratch1, 0));
3522 bind(&done); 3625 bind(&done);
3523 } 3626 }
3524 3627
3525 3628
3526 void MacroAssembler::CompareMapAndBranch(Register obj, 3629 void MacroAssembler::CompareMapAndBranch(Register obj,
3527 Register scratch, 3630 Register scratch,
3528 Handle<Map> map, 3631 Handle<Map> map,
3529 Label* early_success, 3632 Label* early_success,
3530 Condition cond, 3633 Condition cond,
3531 Label* branch_to) { 3634 Label* branch_to) {
3532 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); 3635 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3533 CompareMapAndBranch(scratch, map, early_success, cond, branch_to); 3636 CompareMapAndBranch(scratch, map, early_success, cond, branch_to);
3534 } 3637 }
3535 3638
3536 3639
3537 void MacroAssembler::CompareMapAndBranch(Register obj_map, 3640 void MacroAssembler::CompareMapAndBranch(Register obj_map,
3538 Handle<Map> map, 3641 Handle<Map> map,
3539 Label* early_success, 3642 Label* early_success,
3540 Condition cond, 3643 Condition cond,
3541 Label* branch_to) { 3644 Label* branch_to) {
3542 Branch(branch_to, cond, obj_map, Operand(map)); 3645 Branch(branch_to, cond, obj_map, Operand(map));
(...skipping 16 matching lines...) Expand all
3559 3662
3560 void MacroAssembler::DispatchMap(Register obj, 3663 void MacroAssembler::DispatchMap(Register obj,
3561 Register scratch, 3664 Register scratch,
3562 Handle<Map> map, 3665 Handle<Map> map,
3563 Handle<Code> success, 3666 Handle<Code> success,
3564 SmiCheckType smi_check_type) { 3667 SmiCheckType smi_check_type) {
3565 Label fail; 3668 Label fail;
3566 if (smi_check_type == DO_SMI_CHECK) { 3669 if (smi_check_type == DO_SMI_CHECK) {
3567 JumpIfSmi(obj, &fail); 3670 JumpIfSmi(obj, &fail);
3568 } 3671 }
3569 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); 3672 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3570 Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map)); 3673 Jump(success, RelocInfo::CODE_TARGET, eq, scratch, Operand(map));
3571 bind(&fail); 3674 bind(&fail);
3572 } 3675 }
3573 3676
3574 3677
3575 void MacroAssembler::CheckMap(Register obj, 3678 void MacroAssembler::CheckMap(Register obj,
3576 Register scratch, 3679 Register scratch,
3577 Heap::RootListIndex index, 3680 Heap::RootListIndex index,
3578 Label* fail, 3681 Label* fail,
3579 SmiCheckType smi_check_type) { 3682 SmiCheckType smi_check_type) {
3580 if (smi_check_type == DO_SMI_CHECK) { 3683 if (smi_check_type == DO_SMI_CHECK) {
3581 JumpIfSmi(obj, fail); 3684 JumpIfSmi(obj, fail);
3582 } 3685 }
3583 lw(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); 3686 ld(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
3584 LoadRoot(at, index); 3687 LoadRoot(at, index);
3585 Branch(fail, ne, scratch, Operand(at)); 3688 Branch(fail, ne, scratch, Operand(at));
3586 } 3689 }
3587 3690
3588 3691
3589 void MacroAssembler::MovFromFloatResult(DoubleRegister dst) { 3692 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
3590 if (IsMipsSoftFloatABI) { 3693 if (IsMipsSoftFloatABI) {
3591 if (kArchEndian == kLittle) { 3694 Move(dst, v0, v1);
3592 Move(dst, v0, v1);
3593 } else {
3594 Move(dst, v1, v0);
3595 }
3596 } else { 3695 } else {
3597 Move(dst, f0); // Reg f0 is o32 ABI FP return value. 3696 Move(dst, f0); // Reg f0 is o32 ABI FP return value.
3598 } 3697 }
3599 } 3698 }
3600 3699
3601 3700
3602 void MacroAssembler::MovFromFloatParameter(DoubleRegister dst) { 3701 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
3603 if (IsMipsSoftFloatABI) { 3702 if (IsMipsSoftFloatABI) {
3604 if (kArchEndian == kLittle) { 3703 Move(dst, a0, a1);
3605 Move(dst, a0, a1);
3606 } else {
3607 Move(dst, a1, a0);
3608 }
3609 } else { 3704 } else {
3610 Move(dst, f12); // Reg f12 is o32 ABI FP first argument value. 3705 Move(dst, f12); // Reg f12 is o32 ABI FP first argument value.
3611 } 3706 }
3612 } 3707 }
3613 3708
3614 3709
3615 void MacroAssembler::MovToFloatParameter(DoubleRegister src) { 3710 void MacroAssembler::MovToFloatParameter(DoubleRegister src) {
3616 if (!IsMipsSoftFloatABI) { 3711 if (!IsMipsSoftFloatABI) {
3617 Move(f12, src); 3712 Move(f12, src);
3618 } else { 3713 } else {
3619 if (kArchEndian == kLittle) { 3714 Move(a0, a1, src);
3620 Move(a0, a1, src);
3621 } else {
3622 Move(a1, a0, src);
3623 }
3624 } 3715 }
3625 } 3716 }
3626 3717
3627 3718
3628 void MacroAssembler::MovToFloatResult(DoubleRegister src) { 3719 void MacroAssembler::MovToFloatResult(DoubleRegister src) {
3629 if (!IsMipsSoftFloatABI) { 3720 if (!IsMipsSoftFloatABI) {
3630 Move(f0, src); 3721 Move(f0, src);
3631 } else { 3722 } else {
3632 if (kArchEndian == kLittle) { 3723 Move(v0, v1, src);
3633 Move(v0, v1, src);
3634 } else {
3635 Move(v1, v0, src);
3636 }
3637 } 3724 }
3638 } 3725 }
3639 3726
3640 3727
3641 void MacroAssembler::MovToFloatParameters(DoubleRegister src1, 3728 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3642 DoubleRegister src2) { 3729 DoubleRegister src2) {
3643 if (!IsMipsSoftFloatABI) { 3730 if (!IsMipsSoftFloatABI) {
3731 const DoubleRegister fparg2 = (kMipsAbi == kN64) ? f13 : f14;
3644 if (src2.is(f12)) { 3732 if (src2.is(f12)) {
3645 ASSERT(!src1.is(f14)); 3733 ASSERT(!src1.is(fparg2));
3646 Move(f14, src2); 3734 Move(fparg2, src2);
3647 Move(f12, src1); 3735 Move(f12, src1);
3648 } else { 3736 } else {
3649 Move(f12, src1); 3737 Move(f12, src1);
3650 Move(f14, src2); 3738 Move(fparg2, src2);
3651 } 3739 }
3652 } else { 3740 } else {
3653 if (kArchEndian == kLittle) { 3741 Move(a0, a1, src1);
3654 Move(a0, a1, src1); 3742 Move(a2, a3, src2);
3655 Move(a2, a3, src2);
3656 } else {
3657 Move(a1, a0, src1);
3658 Move(a3, a2, src2);
3659 }
3660 } 3743 }
3661 } 3744 }
3662 3745
3663 3746
3664 // ----------------------------------------------------------------------------- 3747 // -----------------------------------------------------------------------------
3665 // JavaScript invokes. 3748 // JavaScript invokes.
3666 3749
3667 void MacroAssembler::InvokePrologue(const ParameterCount& expected, 3750 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
3668 const ParameterCount& actual, 3751 const ParameterCount& actual,
3669 Handle<Code> code_constant, 3752 Handle<Code> code_constant,
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
3710 } else if (actual.is_immediate()) { 3793 } else if (actual.is_immediate()) {
3711 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate())); 3794 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.immediate()));
3712 li(a0, Operand(actual.immediate())); 3795 li(a0, Operand(actual.immediate()));
3713 } else { 3796 } else {
3714 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg())); 3797 Branch(&regular_invoke, eq, expected.reg(), Operand(actual.reg()));
3715 } 3798 }
3716 3799
3717 if (!definitely_matches) { 3800 if (!definitely_matches) {
3718 if (!code_constant.is_null()) { 3801 if (!code_constant.is_null()) {
3719 li(a3, Operand(code_constant)); 3802 li(a3, Operand(code_constant));
3720 addiu(a3, a3, Code::kHeaderSize - kHeapObjectTag); 3803 daddiu(a3, a3, Code::kHeaderSize - kHeapObjectTag);
3721 } 3804 }
3722 3805
3723 Handle<Code> adaptor = 3806 Handle<Code> adaptor =
3724 isolate()->builtins()->ArgumentsAdaptorTrampoline(); 3807 isolate()->builtins()->ArgumentsAdaptorTrampoline();
3725 if (flag == CALL_FUNCTION) { 3808 if (flag == CALL_FUNCTION) {
3726 call_wrapper.BeforeCall(CallSize(adaptor)); 3809 call_wrapper.BeforeCall(CallSize(adaptor));
3727 Call(adaptor); 3810 Call(adaptor);
3728 call_wrapper.AfterCall(); 3811 call_wrapper.AfterCall();
3729 if (!*definitely_mismatches) { 3812 if (!*definitely_mismatches) {
3730 Branch(done); 3813 Branch(done);
(...skipping 40 matching lines...) Expand 10 before | Expand all | Expand 10 after
3771 const ParameterCount& actual, 3854 const ParameterCount& actual,
3772 InvokeFlag flag, 3855 InvokeFlag flag,
3773 const CallWrapper& call_wrapper) { 3856 const CallWrapper& call_wrapper) {
3774 // You can't call a function without a valid frame. 3857 // You can't call a function without a valid frame.
3775 ASSERT(flag == JUMP_FUNCTION || has_frame()); 3858 ASSERT(flag == JUMP_FUNCTION || has_frame());
3776 3859
3777 // Contract with called JS functions requires that function is passed in a1. 3860 // Contract with called JS functions requires that function is passed in a1.
3778 ASSERT(function.is(a1)); 3861 ASSERT(function.is(a1));
3779 Register expected_reg = a2; 3862 Register expected_reg = a2;
3780 Register code_reg = a3; 3863 Register code_reg = a3;
3781 3864 ld(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset));
3782 lw(code_reg, FieldMemOperand(a1, JSFunction::kSharedFunctionInfoOffset)); 3865 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3783 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); 3866 // The argument count is stored as int32_t on 64-bit platforms.
3867 // TODO(plind): Smi on 32-bit platforms.
3784 lw(expected_reg, 3868 lw(expected_reg,
3785 FieldMemOperand(code_reg, 3869 FieldMemOperand(code_reg,
3786 SharedFunctionInfo::kFormalParameterCountOffset)); 3870 SharedFunctionInfo::kFormalParameterCountOffset));
3787 sra(expected_reg, expected_reg, kSmiTagSize); 3871 ld(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3788 lw(code_reg, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3789
3790 ParameterCount expected(expected_reg); 3872 ParameterCount expected(expected_reg);
3791 InvokeCode(code_reg, expected, actual, flag, call_wrapper); 3873 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
3792 } 3874 }
3793 3875
3794 3876
3795 void MacroAssembler::InvokeFunction(Register function, 3877 void MacroAssembler::InvokeFunction(Register function,
3796 const ParameterCount& expected, 3878 const ParameterCount& expected,
3797 const ParameterCount& actual, 3879 const ParameterCount& actual,
3798 InvokeFlag flag, 3880 InvokeFlag flag,
3799 const CallWrapper& call_wrapper) { 3881 const CallWrapper& call_wrapper) {
3800 // You can't call a function without a valid frame. 3882 // You can't call a function without a valid frame.
3801 ASSERT(flag == JUMP_FUNCTION || has_frame()); 3883 ASSERT(flag == JUMP_FUNCTION || has_frame());
3802 3884
3803 // Contract with called JS functions requires that function is passed in a1. 3885 // Contract with called JS functions requires that function is passed in a1.
3804 ASSERT(function.is(a1)); 3886 ASSERT(function.is(a1));
3805 3887
3806 // Get the function and setup the context. 3888 // Get the function and setup the context.
3807 lw(cp, FieldMemOperand(a1, JSFunction::kContextOffset)); 3889 ld(cp, FieldMemOperand(a1, JSFunction::kContextOffset));
3808 3890
3809 // We call indirectly through the code field in the function to 3891 // We call indirectly through the code field in the function to
3810 // allow recompilation to take effect without changing any of the 3892 // allow recompilation to take effect without changing any of the
3811 // call sites. 3893 // call sites.
3812 lw(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); 3894 ld(a3, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
3813 InvokeCode(a3, expected, actual, flag, call_wrapper); 3895 InvokeCode(a3, expected, actual, flag, call_wrapper);
3814 } 3896 }
3815 3897
3816 3898
3817 void MacroAssembler::InvokeFunction(Handle<JSFunction> function, 3899 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
3818 const ParameterCount& expected, 3900 const ParameterCount& expected,
3819 const ParameterCount& actual, 3901 const ParameterCount& actual,
3820 InvokeFlag flag, 3902 InvokeFlag flag,
3821 const CallWrapper& call_wrapper) { 3903 const CallWrapper& call_wrapper) {
3822 li(a1, function); 3904 li(a1, function);
3823 InvokeFunction(a1, expected, actual, flag, call_wrapper); 3905 InvokeFunction(a1, expected, actual, flag, call_wrapper);
3824 } 3906 }
3825 3907
3826 3908
3827 void MacroAssembler::IsObjectJSObjectType(Register heap_object, 3909 void MacroAssembler::IsObjectJSObjectType(Register heap_object,
3828 Register map, 3910 Register map,
3829 Register scratch, 3911 Register scratch,
3830 Label* fail) { 3912 Label* fail) {
3831 lw(map, FieldMemOperand(heap_object, HeapObject::kMapOffset)); 3913 ld(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
3832 IsInstanceJSObjectType(map, scratch, fail); 3914 IsInstanceJSObjectType(map, scratch, fail);
3833 } 3915 }
3834 3916
3835 3917
3836 void MacroAssembler::IsInstanceJSObjectType(Register map, 3918 void MacroAssembler::IsInstanceJSObjectType(Register map,
3837 Register scratch, 3919 Register scratch,
3838 Label* fail) { 3920 Label* fail) {
3839 lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset)); 3921 lbu(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
3840 Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE)); 3922 Branch(fail, lt, scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
3841 Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE)); 3923 Branch(fail, gt, scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
3842 } 3924 }
3843 3925
3844 3926
3845 void MacroAssembler::IsObjectJSStringType(Register object, 3927 void MacroAssembler::IsObjectJSStringType(Register object,
3846 Register scratch, 3928 Register scratch,
3847 Label* fail) { 3929 Label* fail) {
3848 ASSERT(kNotStringTag != 0); 3930 ASSERT(kNotStringTag != 0);
3849 3931
3850 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); 3932 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3851 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 3933 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3852 And(scratch, scratch, Operand(kIsNotStringMask)); 3934 And(scratch, scratch, Operand(kIsNotStringMask));
3853 Branch(fail, ne, scratch, Operand(zero_reg)); 3935 Branch(fail, ne, scratch, Operand(zero_reg));
3854 } 3936 }
3855 3937
3856 3938
3857 void MacroAssembler::IsObjectNameType(Register object, 3939 void MacroAssembler::IsObjectNameType(Register object,
3858 Register scratch, 3940 Register scratch,
3859 Label* fail) { 3941 Label* fail) {
3860 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); 3942 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
3861 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 3943 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3862 Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE)); 3944 Branch(fail, hi, scratch, Operand(LAST_NAME_TYPE));
3863 } 3945 }
3864 3946
3865 3947
3866 // --------------------------------------------------------------------------- 3948 // ---------------------------------------------------------------------------
3867 // Support functions. 3949 // Support functions.
3868 3950
3869 3951
3870 void MacroAssembler::TryGetFunctionPrototype(Register function, 3952 void MacroAssembler::TryGetFunctionPrototype(Register function,
3871 Register result, 3953 Register result,
3872 Register scratch, 3954 Register scratch,
3873 Label* miss, 3955 Label* miss,
3874 bool miss_on_bound_function) { 3956 bool miss_on_bound_function) {
3875 // Check that the receiver isn't a smi. 3957 // Check that the receiver isn't a smi.
3876 JumpIfSmi(function, miss); 3958 JumpIfSmi(function, miss);
3877 3959
3878 // Check that the function really is a function. Load map into result reg. 3960 // Check that the function really is a function. Load map into result reg.
3879 GetObjectType(function, result, scratch); 3961 GetObjectType(function, result, scratch);
3880 Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE)); 3962 Branch(miss, ne, scratch, Operand(JS_FUNCTION_TYPE));
3881 3963
3882 if (miss_on_bound_function) { 3964 if (miss_on_bound_function) {
3883 lw(scratch, 3965 ld(scratch,
3884 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset)); 3966 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
3885 lw(scratch, 3967 // ld(scratch,
3886 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset)); 3968 // FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3969 // And(scratch, scratch,
3970 // Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction)));
3971 lwu(scratch,
3972 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
3887 And(scratch, scratch, 3973 And(scratch, scratch,
3888 Operand(Smi::FromInt(1 << SharedFunctionInfo::kBoundFunction))); 3974 Operand(1 << SharedFunctionInfo::kBoundFunction));
3889 Branch(miss, ne, scratch, Operand(zero_reg)); 3975 Branch(miss, ne, scratch, Operand(zero_reg));
3890 } 3976 }
3891 3977
3892 // Make sure that the function has an instance prototype. 3978 // Make sure that the function has an instance prototype.
3893 Label non_instance; 3979 Label non_instance;
3894 lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset)); 3980 lbu(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
3895 And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype)); 3981 And(scratch, scratch, Operand(1 << Map::kHasNonInstancePrototype));
3896 Branch(&non_instance, ne, scratch, Operand(zero_reg)); 3982 Branch(&non_instance, ne, scratch, Operand(zero_reg));
3897 3983
3898 // Get the prototype or initial map from the function. 3984 // Get the prototype or initial map from the function.
3899 lw(result, 3985 ld(result,
3900 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 3986 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
3901 3987
3902 // If the prototype or initial map is the hole, don't return it and 3988 // If the prototype or initial map is the hole, don't return it and
3903 // simply miss the cache instead. This will allow us to allocate a 3989 // simply miss the cache instead. This will allow us to allocate a
3904 // prototype object on-demand in the runtime system. 3990 // prototype object on-demand in the runtime system.
3905 LoadRoot(t8, Heap::kTheHoleValueRootIndex); 3991 LoadRoot(t8, Heap::kTheHoleValueRootIndex);
3906 Branch(miss, eq, result, Operand(t8)); 3992 Branch(miss, eq, result, Operand(t8));
3907 3993
3908 // If the function does not have an initial map, we're done. 3994 // If the function does not have an initial map, we're done.
3909 Label done; 3995 Label done;
3910 GetObjectType(result, scratch, scratch); 3996 GetObjectType(result, scratch, scratch);
3911 Branch(&done, ne, scratch, Operand(MAP_TYPE)); 3997 Branch(&done, ne, scratch, Operand(MAP_TYPE));
3912 3998
3913 // Get the prototype from the initial map. 3999 // Get the prototype from the initial map.
3914 lw(result, FieldMemOperand(result, Map::kPrototypeOffset)); 4000 ld(result, FieldMemOperand(result, Map::kPrototypeOffset));
3915 jmp(&done); 4001 jmp(&done);
3916 4002
3917 // Non-instance prototype: Fetch prototype from constructor field 4003 // Non-instance prototype: Fetch prototype from constructor field
3918 // in initial map. 4004 // in initial map.
3919 bind(&non_instance); 4005 bind(&non_instance);
3920 lw(result, FieldMemOperand(result, Map::kConstructorOffset)); 4006 ld(result, FieldMemOperand(result, Map::kConstructorOffset));
3921 4007
3922 // All done. 4008 // All done.
3923 bind(&done); 4009 bind(&done);
3924 } 4010 }
3925 4011
3926 4012
3927 void MacroAssembler::GetObjectType(Register object, 4013 void MacroAssembler::GetObjectType(Register object,
3928 Register map, 4014 Register map,
3929 Register type_reg) { 4015 Register type_reg) {
3930 lw(map, FieldMemOperand(object, HeapObject::kMapOffset)); 4016 ld(map, FieldMemOperand(object, HeapObject::kMapOffset));
3931 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset)); 4017 lbu(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
3932 } 4018 }
3933 4019
3934 4020
3935 // ----------------------------------------------------------------------------- 4021 // -----------------------------------------------------------------------------
3936 // Runtime calls. 4022 // Runtime calls.
3937 4023
3938 void MacroAssembler::CallStub(CodeStub* stub, 4024 void MacroAssembler::CallStub(CodeStub* stub,
3939 TypeFeedbackId ast_id, 4025 TypeFeedbackId ast_id,
3940 Condition cond, 4026 Condition cond,
3941 Register r1, 4027 Register r1,
3942 const Operand& r2, 4028 const Operand& r2,
3943 BranchDelaySlot bd) { 4029 BranchDelaySlot bd) {
3944 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. 4030 ASSERT(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
3945 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, 4031 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id,
3946 cond, r1, r2, bd); 4032 cond, r1, r2, bd);
3947 } 4033 }
3948 4034
3949 4035
3950 void MacroAssembler::TailCallStub(CodeStub* stub, 4036 void MacroAssembler::TailCallStub(CodeStub* stub,
3951 Condition cond, 4037 Condition cond,
3952 Register r1, 4038 Register r1,
3953 const Operand& r2, 4039 const Operand& r2,
3954 BranchDelaySlot bd) { 4040 BranchDelaySlot bd) {
3955 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd); 4041 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond, r1, r2, bd);
3956 } 4042 }
3957 4043
3958 4044
3959 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) { 4045 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
3960 return ref0.address() - ref1.address(); 4046 int64_t offset = (ref0.address() - ref1.address());
4047 ASSERT(static_cast<int>(offset) == offset);
4048 return static_cast<int>(offset);
3961 } 4049 }
3962 4050
3963 4051
3964 void MacroAssembler::CallApiFunctionAndReturn( 4052 void MacroAssembler::CallApiFunctionAndReturn(
3965 Register function_address, 4053 Register function_address,
3966 ExternalReference thunk_ref, 4054 ExternalReference thunk_ref,
3967 int stack_space, 4055 int stack_space,
3968 MemOperand return_value_operand, 4056 MemOperand return_value_operand,
3969 MemOperand* context_restore_operand) { 4057 MemOperand* context_restore_operand) {
3970 ExternalReference next_address = 4058 ExternalReference next_address =
(...skipping 17 matching lines...) Expand all
3988 // Additional parameter is the address of the actual callback. 4076 // Additional parameter is the address of the actual callback.
3989 li(t9, Operand(thunk_ref)); 4077 li(t9, Operand(thunk_ref));
3990 jmp(&end_profiler_check); 4078 jmp(&end_profiler_check);
3991 4079
3992 bind(&profiler_disabled); 4080 bind(&profiler_disabled);
3993 mov(t9, function_address); 4081 mov(t9, function_address);
3994 bind(&end_profiler_check); 4082 bind(&end_profiler_check);
3995 4083
3996 // Allocate HandleScope in callee-save registers. 4084 // Allocate HandleScope in callee-save registers.
3997 li(s3, Operand(next_address)); 4085 li(s3, Operand(next_address));
3998 lw(s0, MemOperand(s3, kNextOffset)); 4086 ld(s0, MemOperand(s3, kNextOffset));
3999 lw(s1, MemOperand(s3, kLimitOffset)); 4087 ld(s1, MemOperand(s3, kLimitOffset));
4000 lw(s2, MemOperand(s3, kLevelOffset)); 4088 ld(s2, MemOperand(s3, kLevelOffset));
4001 Addu(s2, s2, Operand(1)); 4089 Daddu(s2, s2, Operand(1));
4002 sw(s2, MemOperand(s3, kLevelOffset)); 4090 sd(s2, MemOperand(s3, kLevelOffset));
4003 4091
4004 if (FLAG_log_timer_events) { 4092 if (FLAG_log_timer_events) {
4005 FrameScope frame(this, StackFrame::MANUAL); 4093 FrameScope frame(this, StackFrame::MANUAL);
4006 PushSafepointRegisters(); 4094 PushSafepointRegisters();
4007 PrepareCallCFunction(1, a0); 4095 PrepareCallCFunction(1, a0);
4008 li(a0, Operand(ExternalReference::isolate_address(isolate()))); 4096 li(a0, Operand(ExternalReference::isolate_address(isolate())));
4009 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1); 4097 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
4010 PopSafepointRegisters(); 4098 PopSafepointRegisters();
4011 } 4099 }
4012 4100
(...skipping 12 matching lines...) Expand all
4025 PopSafepointRegisters(); 4113 PopSafepointRegisters();
4026 } 4114 }
4027 4115
4028 Label promote_scheduled_exception; 4116 Label promote_scheduled_exception;
4029 Label exception_handled; 4117 Label exception_handled;
4030 Label delete_allocated_handles; 4118 Label delete_allocated_handles;
4031 Label leave_exit_frame; 4119 Label leave_exit_frame;
4032 Label return_value_loaded; 4120 Label return_value_loaded;
4033 4121
4034 // Load value from ReturnValue. 4122 // Load value from ReturnValue.
4035 lw(v0, return_value_operand); 4123 ld(v0, return_value_operand);
4036 bind(&return_value_loaded); 4124 bind(&return_value_loaded);
4037 4125
4038 // No more valid handles (the result handle was the last one). Restore 4126 // No more valid handles (the result handle was the last one). Restore
4039 // previous handle scope. 4127 // previous handle scope.
4040 sw(s0, MemOperand(s3, kNextOffset)); 4128 sd(s0, MemOperand(s3, kNextOffset));
4041 if (emit_debug_code()) { 4129 if (emit_debug_code()) {
4042 lw(a1, MemOperand(s3, kLevelOffset)); 4130 ld(a1, MemOperand(s3, kLevelOffset));
4043 Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2)); 4131 Check(eq, kUnexpectedLevelAfterReturnFromApiCall, a1, Operand(s2));
4044 } 4132 }
4045 Subu(s2, s2, Operand(1)); 4133 Dsubu(s2, s2, Operand(1));
4046 sw(s2, MemOperand(s3, kLevelOffset)); 4134 sd(s2, MemOperand(s3, kLevelOffset));
4047 lw(at, MemOperand(s3, kLimitOffset)); 4135 ld(at, MemOperand(s3, kLimitOffset));
4048 Branch(&delete_allocated_handles, ne, s1, Operand(at)); 4136 Branch(&delete_allocated_handles, ne, s1, Operand(at));
4049 4137
4050 // Check if the function scheduled an exception. 4138 // Check if the function scheduled an exception.
4051 bind(&leave_exit_frame); 4139 bind(&leave_exit_frame);
4052 LoadRoot(t0, Heap::kTheHoleValueRootIndex); 4140 LoadRoot(a4, Heap::kTheHoleValueRootIndex);
4053 li(at, Operand(ExternalReference::scheduled_exception_address(isolate()))); 4141 li(at, Operand(ExternalReference::scheduled_exception_address(isolate())));
4054 lw(t1, MemOperand(at)); 4142 ld(a5, MemOperand(at));
4055 Branch(&promote_scheduled_exception, ne, t0, Operand(t1)); 4143 Branch(&promote_scheduled_exception, ne, a4, Operand(a5));
4056 bind(&exception_handled); 4144 bind(&exception_handled);
4057 4145
4058 bool restore_context = context_restore_operand != NULL; 4146 bool restore_context = context_restore_operand != NULL;
4059 if (restore_context) { 4147 if (restore_context) {
4060 lw(cp, *context_restore_operand); 4148 ld(cp, *context_restore_operand);
4061 } 4149 }
4062 li(s0, Operand(stack_space)); 4150 li(s0, Operand(stack_space));
4063 LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN); 4151 LeaveExitFrame(false, s0, !restore_context, EMIT_RETURN);
4064 4152
4065 bind(&promote_scheduled_exception); 4153 bind(&promote_scheduled_exception);
4066 { 4154 {
4067 FrameScope frame(this, StackFrame::INTERNAL); 4155 FrameScope frame(this, StackFrame::INTERNAL);
4068 CallExternalReference( 4156 CallExternalReference(
4069 ExternalReference(Runtime::kPromoteScheduledException, isolate()), 4157 ExternalReference(Runtime::kPromoteScheduledException, isolate()),
4070 0); 4158 0);
4071 } 4159 }
4072 jmp(&exception_handled); 4160 jmp(&exception_handled);
4073 4161
4074 // HandleScope limit has changed. Delete allocated extensions. 4162 // HandleScope limit has changed. Delete allocated extensions.
4075 bind(&delete_allocated_handles); 4163 bind(&delete_allocated_handles);
4076 sw(s1, MemOperand(s3, kLimitOffset)); 4164 sd(s1, MemOperand(s3, kLimitOffset));
4077 mov(s0, v0); 4165 mov(s0, v0);
4078 mov(a0, v0); 4166 mov(a0, v0);
4079 PrepareCallCFunction(1, s1); 4167 PrepareCallCFunction(1, s1);
4080 li(a0, Operand(ExternalReference::isolate_address(isolate()))); 4168 li(a0, Operand(ExternalReference::isolate_address(isolate())));
4081 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()), 4169 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
4082 1); 4170 1);
4083 mov(v0, s0); 4171 mov(v0, s0);
4084 jmp(&leave_exit_frame); 4172 jmp(&leave_exit_frame);
4085 } 4173 }
4086 4174
(...skipping 19 matching lines...) Expand all
4106 Register scratch1, 4194 Register scratch1,
4107 Register scratch2, 4195 Register scratch2,
4108 Register heap_number_map, 4196 Register heap_number_map,
4109 Label* not_number, 4197 Label* not_number,
4110 ObjectToDoubleFlags flags) { 4198 ObjectToDoubleFlags flags) {
4111 Label done; 4199 Label done;
4112 if ((flags & OBJECT_NOT_SMI) == 0) { 4200 if ((flags & OBJECT_NOT_SMI) == 0) {
4113 Label not_smi; 4201 Label not_smi;
4114 JumpIfNotSmi(object, &not_smi); 4202 JumpIfNotSmi(object, &not_smi);
4115 // Remove smi tag and convert to double. 4203 // Remove smi tag and convert to double.
4116 sra(scratch1, object, kSmiTagSize); 4204 // dsra(scratch1, object, kSmiTagSize);
4205 dsra32(scratch1, object, 0);
4117 mtc1(scratch1, result); 4206 mtc1(scratch1, result);
4118 cvt_d_w(result, result); 4207 cvt_d_w(result, result);
4119 Branch(&done); 4208 Branch(&done);
4120 bind(&not_smi); 4209 bind(&not_smi);
4121 } 4210 }
4122 // Check for heap number and load double value from it. 4211 // Check for heap number and load double value from it.
4123 lw(scratch1, FieldMemOperand(object, HeapObject::kMapOffset)); 4212 ld(scratch1, FieldMemOperand(object, HeapObject::kMapOffset));
4124 Branch(not_number, ne, scratch1, Operand(heap_number_map)); 4213 Branch(not_number, ne, scratch1, Operand(heap_number_map));
4125 4214
4126 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) { 4215 if ((flags & AVOID_NANS_AND_INFINITIES) != 0) {
4127 // If exponent is all ones the number is either a NaN or +/-Infinity. 4216 // If exponent is all ones the number is either a NaN or +/-Infinity.
4128 Register exponent = scratch1; 4217 Register exponent = scratch1;
4129 Register mask_reg = scratch2; 4218 Register mask_reg = scratch2;
4130 lw(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset)); 4219 lwu(exponent, FieldMemOperand(object, HeapNumber::kExponentOffset));
4131 li(mask_reg, HeapNumber::kExponentMask); 4220 li(mask_reg, HeapNumber::kExponentMask);
4132 4221
4133 And(exponent, exponent, mask_reg); 4222 And(exponent, exponent, mask_reg);
4134 Branch(not_number, eq, exponent, Operand(mask_reg)); 4223 Branch(not_number, eq, exponent, Operand(mask_reg));
4135 } 4224 }
4136 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset)); 4225 ldc1(result, FieldMemOperand(object, HeapNumber::kValueOffset));
4137 bind(&done); 4226 bind(&done);
4138 } 4227 }
4139 4228
4140 4229
4141 void MacroAssembler::SmiToDoubleFPURegister(Register smi, 4230 void MacroAssembler::SmiToDoubleFPURegister(Register smi,
4142 FPURegister value, 4231 FPURegister value,
4143 Register scratch1) { 4232 Register scratch1) {
4144 sra(scratch1, smi, kSmiTagSize); 4233 // dsra(scratch1, smi, kSmiTagSize);
4234 dsra32(scratch1, smi, 0);
4145 mtc1(scratch1, value); 4235 mtc1(scratch1, value);
4146 cvt_d_w(value, value); 4236 cvt_d_w(value, value);
4147 } 4237 }
4148 4238
4149 4239
4150 void MacroAssembler::AdduAndCheckForOverflow(Register dst, 4240 void MacroAssembler::AdduAndCheckForOverflow(Register dst,
4151 Register left, 4241 Register left,
4152 Register right, 4242 Register right,
4153 Register overflow_dst, 4243 Register overflow_dst,
4154 Register scratch) { 4244 Register scratch) {
4155 ASSERT(!dst.is(overflow_dst)); 4245 ASSERT(!dst.is(overflow_dst));
4156 ASSERT(!dst.is(scratch)); 4246 ASSERT(!dst.is(scratch));
4157 ASSERT(!overflow_dst.is(scratch)); 4247 ASSERT(!overflow_dst.is(scratch));
4158 ASSERT(!overflow_dst.is(left)); 4248 ASSERT(!overflow_dst.is(left));
4159 ASSERT(!overflow_dst.is(right)); 4249 ASSERT(!overflow_dst.is(right));
4160 4250
4161 if (left.is(right) && dst.is(left)) { 4251 if (left.is(right) && dst.is(left)) {
4162 ASSERT(!dst.is(t9)); 4252 ASSERT(!dst.is(t9));
4163 ASSERT(!scratch.is(t9)); 4253 ASSERT(!scratch.is(t9));
4164 ASSERT(!left.is(t9)); 4254 ASSERT(!left.is(t9));
4165 ASSERT(!right.is(t9)); 4255 ASSERT(!right.is(t9));
4166 ASSERT(!overflow_dst.is(t9)); 4256 ASSERT(!overflow_dst.is(t9));
4167 mov(t9, right); 4257 mov(t9, right);
4168 right = t9; 4258 right = t9;
4169 } 4259 }
4170 4260
4171 if (dst.is(left)) { 4261 if (dst.is(left)) {
4172 mov(scratch, left); // Preserve left. 4262 mov(scratch, left); // Preserve left.
4173 addu(dst, left, right); // Left is overwritten. 4263 daddu(dst, left, right); // Left is overwritten.
4174 xor_(scratch, dst, scratch); // Original left. 4264 xor_(scratch, dst, scratch); // Original left.
4175 xor_(overflow_dst, dst, right); 4265 xor_(overflow_dst, dst, right);
4176 and_(overflow_dst, overflow_dst, scratch); 4266 and_(overflow_dst, overflow_dst, scratch);
4177 } else if (dst.is(right)) { 4267 } else if (dst.is(right)) {
4178 mov(scratch, right); // Preserve right. 4268 mov(scratch, right); // Preserve right.
4179 addu(dst, left, right); // Right is overwritten. 4269 daddu(dst, left, right); // Right is overwritten.
4180 xor_(scratch, dst, scratch); // Original right. 4270 xor_(scratch, dst, scratch); // Original right.
4181 xor_(overflow_dst, dst, left); 4271 xor_(overflow_dst, dst, left);
4182 and_(overflow_dst, overflow_dst, scratch); 4272 and_(overflow_dst, overflow_dst, scratch);
4183 } else { 4273 } else {
4184 addu(dst, left, right); 4274 daddu(dst, left, right);
4185 xor_(overflow_dst, dst, left); 4275 xor_(overflow_dst, dst, left);
4186 xor_(scratch, dst, right); 4276 xor_(scratch, dst, right);
4187 and_(overflow_dst, scratch, overflow_dst); 4277 and_(overflow_dst, scratch, overflow_dst);
4188 } 4278 }
4189 } 4279 }
4190 4280
4191 4281
4192 void MacroAssembler::SubuAndCheckForOverflow(Register dst, 4282 void MacroAssembler::SubuAndCheckForOverflow(Register dst,
4193 Register left, 4283 Register left,
4194 Register right, 4284 Register right,
(...skipping 10 matching lines...) Expand all
4205 // This happens with some crankshaft code. Since Subu works fine if 4295 // This happens with some crankshaft code. Since Subu works fine if
4206 // left == right, let's not make that restriction here. 4296 // left == right, let's not make that restriction here.
4207 if (left.is(right)) { 4297 if (left.is(right)) {
4208 mov(dst, zero_reg); 4298 mov(dst, zero_reg);
4209 mov(overflow_dst, zero_reg); 4299 mov(overflow_dst, zero_reg);
4210 return; 4300 return;
4211 } 4301 }
4212 4302
4213 if (dst.is(left)) { 4303 if (dst.is(left)) {
4214 mov(scratch, left); // Preserve left. 4304 mov(scratch, left); // Preserve left.
4215 subu(dst, left, right); // Left is overwritten. 4305 dsubu(dst, left, right); // Left is overwritten.
4216 xor_(overflow_dst, dst, scratch); // scratch is original left. 4306 xor_(overflow_dst, dst, scratch); // scratch is original left.
4217 xor_(scratch, scratch, right); // scratch is original left. 4307 xor_(scratch, scratch, right); // scratch is original left.
4218 and_(overflow_dst, scratch, overflow_dst); 4308 and_(overflow_dst, scratch, overflow_dst);
4219 } else if (dst.is(right)) { 4309 } else if (dst.is(right)) {
4220 mov(scratch, right); // Preserve right. 4310 mov(scratch, right); // Preserve right.
4221 subu(dst, left, right); // Right is overwritten. 4311 dsubu(dst, left, right); // Right is overwritten.
4222 xor_(overflow_dst, dst, left); 4312 xor_(overflow_dst, dst, left);
4223 xor_(scratch, left, scratch); // Original right. 4313 xor_(scratch, left, scratch); // Original right.
4224 and_(overflow_dst, scratch, overflow_dst); 4314 and_(overflow_dst, scratch, overflow_dst);
4225 } else { 4315 } else {
4226 subu(dst, left, right); 4316 dsubu(dst, left, right);
4227 xor_(overflow_dst, dst, left); 4317 xor_(overflow_dst, dst, left);
4228 xor_(scratch, left, right); 4318 xor_(scratch, left, right);
4229 and_(overflow_dst, scratch, overflow_dst); 4319 and_(overflow_dst, scratch, overflow_dst);
4230 } 4320 }
4231 } 4321 }
4232 4322
4233 4323
4234 void MacroAssembler::CallRuntime(const Runtime::Function* f, 4324 void MacroAssembler::CallRuntime(const Runtime::Function* f,
4235 int num_arguments, 4325 int num_arguments,
4236 SaveFPRegsMode save_doubles) { 4326 SaveFPRegsMode save_doubles) {
(...skipping 74 matching lines...) Expand 10 before | Expand all | Expand 10 after
4311 } else { 4401 } else {
4312 ASSERT(flag == JUMP_FUNCTION); 4402 ASSERT(flag == JUMP_FUNCTION);
4313 Jump(t9); 4403 Jump(t9);
4314 } 4404 }
4315 } 4405 }
4316 4406
4317 4407
4318 void MacroAssembler::GetBuiltinFunction(Register target, 4408 void MacroAssembler::GetBuiltinFunction(Register target,
4319 Builtins::JavaScript id) { 4409 Builtins::JavaScript id) {
4320 // Load the builtins object into target register. 4410 // Load the builtins object into target register.
4321 lw(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 4411 ld(target, MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4322 lw(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset)); 4412 ld(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
4323 // Load the JavaScript builtin function from the builtins object. 4413 // Load the JavaScript builtin function from the builtins object.
4324 lw(target, FieldMemOperand(target, 4414 ld(target, FieldMemOperand(target,
4325 JSBuiltinsObject::OffsetOfFunctionWithId(id))); 4415 JSBuiltinsObject::OffsetOfFunctionWithId(id)));
4326 } 4416 }
4327 4417
4328 4418
4329 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) { 4419 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
4330 ASSERT(!target.is(a1)); 4420 ASSERT(!target.is(a1));
4331 GetBuiltinFunction(a1, id); 4421 GetBuiltinFunction(a1, id);
4332 // Load the code entry point from the builtins object. 4422 // Load the code entry point from the builtins object.
4333 lw(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset)); 4423 ld(target, FieldMemOperand(a1, JSFunction::kCodeEntryOffset));
4334 } 4424 }
4335 4425
4336 4426
4337 void MacroAssembler::SetCounter(StatsCounter* counter, int value, 4427 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
4338 Register scratch1, Register scratch2) { 4428 Register scratch1, Register scratch2) {
4339 if (FLAG_native_code_counters && counter->Enabled()) { 4429 if (FLAG_native_code_counters && counter->Enabled()) {
4340 li(scratch1, Operand(value)); 4430 li(scratch1, Operand(value));
4341 li(scratch2, Operand(ExternalReference(counter))); 4431 li(scratch2, Operand(ExternalReference(counter)));
4342 sw(scratch1, MemOperand(scratch2)); 4432 sd(scratch1, MemOperand(scratch2));
4343 } 4433 }
4344 } 4434 }
4345 4435
4346 4436
4347 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value, 4437 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
4348 Register scratch1, Register scratch2) { 4438 Register scratch1, Register scratch2) {
4349 ASSERT(value > 0); 4439 ASSERT(value > 0);
4350 if (FLAG_native_code_counters && counter->Enabled()) { 4440 if (FLAG_native_code_counters && counter->Enabled()) {
4351 li(scratch2, Operand(ExternalReference(counter))); 4441 li(scratch2, Operand(ExternalReference(counter)));
4352 lw(scratch1, MemOperand(scratch2)); 4442 ld(scratch1, MemOperand(scratch2));
4353 Addu(scratch1, scratch1, Operand(value)); 4443 Daddu(scratch1, scratch1, Operand(value));
4354 sw(scratch1, MemOperand(scratch2)); 4444 sd(scratch1, MemOperand(scratch2));
4355 } 4445 }
4356 } 4446 }
4357 4447
4358 4448
4359 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value, 4449 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
4360 Register scratch1, Register scratch2) { 4450 Register scratch1, Register scratch2) {
4361 ASSERT(value > 0); 4451 ASSERT(value > 0);
4362 if (FLAG_native_code_counters && counter->Enabled()) { 4452 if (FLAG_native_code_counters && counter->Enabled()) {
4363 li(scratch2, Operand(ExternalReference(counter))); 4453 li(scratch2, Operand(ExternalReference(counter)));
4364 lw(scratch1, MemOperand(scratch2)); 4454 ld(scratch1, MemOperand(scratch2));
4365 Subu(scratch1, scratch1, Operand(value)); 4455 Dsubu(scratch1, scratch1, Operand(value));
4366 sw(scratch1, MemOperand(scratch2)); 4456 sd(scratch1, MemOperand(scratch2));
4367 } 4457 }
4368 } 4458 }
4369 4459
4370 4460
4371 // ----------------------------------------------------------------------------- 4461 // -----------------------------------------------------------------------------
4372 // Debugging. 4462 // Debugging.
4373 4463
4374 void MacroAssembler::Assert(Condition cc, BailoutReason reason, 4464 void MacroAssembler::Assert(Condition cc, BailoutReason reason,
4375 Register rs, Operand rt) { 4465 Register rs, Operand rt) {
4376 if (emit_debug_code()) 4466 if (emit_debug_code())
4377 Check(cc, reason, rs, rt); 4467 Check(cc, reason, rs, rt);
4378 } 4468 }
4379 4469
4380 4470
4381 void MacroAssembler::AssertFastElements(Register elements) { 4471 void MacroAssembler::AssertFastElements(Register elements) {
4382 if (emit_debug_code()) { 4472 if (emit_debug_code()) {
4383 ASSERT(!elements.is(at)); 4473 ASSERT(!elements.is(at));
4384 Label ok; 4474 Label ok;
4385 push(elements); 4475 push(elements);
4386 lw(elements, FieldMemOperand(elements, HeapObject::kMapOffset)); 4476 ld(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
4387 LoadRoot(at, Heap::kFixedArrayMapRootIndex); 4477 LoadRoot(at, Heap::kFixedArrayMapRootIndex);
4388 Branch(&ok, eq, elements, Operand(at)); 4478 Branch(&ok, eq, elements, Operand(at));
4389 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex); 4479 LoadRoot(at, Heap::kFixedDoubleArrayMapRootIndex);
4390 Branch(&ok, eq, elements, Operand(at)); 4480 Branch(&ok, eq, elements, Operand(at));
4391 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex); 4481 LoadRoot(at, Heap::kFixedCOWArrayMapRootIndex);
4392 Branch(&ok, eq, elements, Operand(at)); 4482 Branch(&ok, eq, elements, Operand(at));
4393 Abort(kJSObjectWithFastElementsMapHasSlowElements); 4483 Abort(kJSObjectWithFastElementsMapHasSlowElements);
4394 bind(&ok); 4484 bind(&ok);
4395 pop(elements); 4485 pop(elements);
4396 } 4486 }
(...skipping 50 matching lines...) Expand 10 before | Expand all | Expand 10 after
4447 while (abort_instructions++ < kExpectedAbortInstructions) { 4537 while (abort_instructions++ < kExpectedAbortInstructions) {
4448 nop(); 4538 nop();
4449 } 4539 }
4450 } 4540 }
4451 } 4541 }
4452 4542
4453 4543
4454 void MacroAssembler::LoadContext(Register dst, int context_chain_length) { 4544 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
4455 if (context_chain_length > 0) { 4545 if (context_chain_length > 0) {
4456 // Move up the chain of contexts to the context containing the slot. 4546 // Move up the chain of contexts to the context containing the slot.
4457 lw(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX))); 4547 ld(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4458 for (int i = 1; i < context_chain_length; i++) { 4548 for (int i = 1; i < context_chain_length; i++) {
4459 lw(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX))); 4549 ld(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
4460 } 4550 }
4461 } else { 4551 } else {
4462 // Slot is in the current function context. Move it into the 4552 // Slot is in the current function context. Move it into the
4463 // destination register in case we store into it (the write barrier 4553 // destination register in case we store into it (the write barrier
4464 // cannot be allowed to destroy the context in esi). 4554 // cannot be allowed to destroy the context in esi).
4465 Move(dst, cp); 4555 Move(dst, cp);
4466 } 4556 }
4467 } 4557 }
4468 4558
4469 4559
4470 void MacroAssembler::LoadTransitionedArrayMapConditional( 4560 void MacroAssembler::LoadTransitionedArrayMapConditional(
4471 ElementsKind expected_kind, 4561 ElementsKind expected_kind,
4472 ElementsKind transitioned_kind, 4562 ElementsKind transitioned_kind,
4473 Register map_in_out, 4563 Register map_in_out,
4474 Register scratch, 4564 Register scratch,
4475 Label* no_map_match) { 4565 Label* no_map_match) {
4476 // Load the global or builtins object from the current context. 4566 // Load the global or builtins object from the current context.
4477 lw(scratch, 4567 ld(scratch,
4478 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 4568 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4479 lw(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); 4569 ld(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
4480 4570
4481 // Check that the function's map is the same as the expected cached map. 4571 // Check that the function's map is the same as the expected cached map.
4482 lw(scratch, 4572 ld(scratch,
4483 MemOperand(scratch, 4573 MemOperand(scratch,
4484 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); 4574 Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
4485 size_t offset = expected_kind * kPointerSize + 4575 size_t offset = expected_kind * kPointerSize +
4486 FixedArrayBase::kHeaderSize; 4576 FixedArrayBase::kHeaderSize;
4487 lw(at, FieldMemOperand(scratch, offset)); 4577 ld(at, FieldMemOperand(scratch, offset));
4488 Branch(no_map_match, ne, map_in_out, Operand(at)); 4578 Branch(no_map_match, ne, map_in_out, Operand(at));
4489 4579
4490 // Use the transitioned cached map. 4580 // Use the transitioned cached map.
4491 offset = transitioned_kind * kPointerSize + 4581 offset = transitioned_kind * kPointerSize +
4492 FixedArrayBase::kHeaderSize; 4582 FixedArrayBase::kHeaderSize;
4493 lw(map_in_out, FieldMemOperand(scratch, offset)); 4583 ld(map_in_out, FieldMemOperand(scratch, offset));
4494 } 4584 }
4495 4585
4496 4586
4497 void MacroAssembler::LoadGlobalFunction(int index, Register function) { 4587 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
4498 // Load the global or builtins object from the current context. 4588 // Load the global or builtins object from the current context.
4499 lw(function, 4589 ld(function,
4500 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 4590 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
4501 // Load the native context from the global or builtins object. 4591 // Load the native context from the global or builtins object.
4502 lw(function, FieldMemOperand(function, 4592 ld(function, FieldMemOperand(function,
4503 GlobalObject::kNativeContextOffset)); 4593 GlobalObject::kNativeContextOffset));
4504 // Load the function from the native context. 4594 // Load the function from the native context.
4505 lw(function, MemOperand(function, Context::SlotOffset(index))); 4595 ld(function, MemOperand(function, Context::SlotOffset(index)));
4506 } 4596 }
4507 4597
4508 4598
4509 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function, 4599 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
4510 Register map, 4600 Register map,
4511 Register scratch) { 4601 Register scratch) {
4512 // Load the initial map. The global functions all have initial maps. 4602 // Load the initial map. The global functions all have initial maps.
4513 lw(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset)); 4603 ld(map, FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
4514 if (emit_debug_code()) { 4604 if (emit_debug_code()) {
4515 Label ok, fail; 4605 Label ok, fail;
4516 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK); 4606 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
4517 Branch(&ok); 4607 Branch(&ok);
4518 bind(&fail); 4608 bind(&fail);
4519 Abort(kGlobalFunctionsMustHaveInitialMap); 4609 Abort(kGlobalFunctionsMustHaveInitialMap);
4520 bind(&ok); 4610 bind(&ok);
4521 } 4611 }
4522 } 4612 }
4523 4613
4524 4614
4525 void MacroAssembler::StubPrologue() { 4615 void MacroAssembler::StubPrologue() {
4526 Push(ra, fp, cp); 4616 Push(ra, fp, cp);
4527 Push(Smi::FromInt(StackFrame::STUB)); 4617 Push(Smi::FromInt(StackFrame::STUB));
4528 // Adjust FP to point to saved FP. 4618 // Adjust FP to point to saved FP.
4529 Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); 4619 Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4530 } 4620 }
4531 4621
4532 4622
4533 void MacroAssembler::Prologue(bool code_pre_aging) { 4623 void MacroAssembler::Prologue(bool code_pre_aging) {
4534 PredictableCodeSizeScope predictible_code_size_scope( 4624 PredictableCodeSizeScope predictible_code_size_scope(
4535 this, kNoCodeAgeSequenceLength); 4625 this, kNoCodeAgeSequenceLength);
4536 // The following three instructions must remain together and unmodified 4626 // The following three instructions must remain together and unmodified
4537 // for code aging to work properly. 4627 // for code aging to work properly.
4538 if (code_pre_aging) { 4628 if (code_pre_aging) {
4539 // Pre-age the code. 4629 // Pre-age the code.
4540 Code* stub = Code::GetPreAgedCodeAgeStub(isolate()); 4630 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
4541 nop(Assembler::CODE_AGE_MARKER_NOP); 4631 nop(Assembler::CODE_AGE_MARKER_NOP);
4542 // Load the stub address to t9 and call it, 4632 // Load the stub address to t9 and call it,
4543 // GetCodeAgeAndParity() extracts the stub address from this instruction. 4633 // GetCodeAgeAndParity() extracts the stub address from this instruction.
4544 li(t9, 4634 li(t9,
4545 Operand(reinterpret_cast<uint32_t>(stub->instruction_start())), 4635 Operand(reinterpret_cast<uint64_t>(stub->instruction_start())),
4546 CONSTANT_SIZE); 4636 ADDRESS_LOAD);
4547 nop(); // Prevent jalr to jal optimization. 4637 nop(); // Prevent jalr to jal optimization.
4548 jalr(t9, a0); 4638 jalr(t9, a0);
4549 nop(); // Branch delay slot nop. 4639 nop(); // Branch delay slot nop.
4550 nop(); // Pad the empty space. 4640 nop(); // Pad the empty space.
4551 } else { 4641 } else {
4552 Push(ra, fp, cp, a1); 4642 Push(ra, fp, cp, a1);
4553 nop(Assembler::CODE_AGE_SEQUENCE_NOP); 4643 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4644 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4645 nop(Assembler::CODE_AGE_SEQUENCE_NOP);
4554 // Adjust fp to point to caller's fp. 4646 // Adjust fp to point to caller's fp.
4555 Addu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp)); 4647 Daddu(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
4556 } 4648 }
4557 } 4649 }
4558 4650
4559 4651
4560 void MacroAssembler::EnterFrame(StackFrame::Type type) { 4652 void MacroAssembler::EnterFrame(StackFrame::Type type) {
4561 addiu(sp, sp, -5 * kPointerSize); 4653 daddiu(sp, sp, -5 * kPointerSize);
4562 li(t8, Operand(Smi::FromInt(type))); 4654 li(t8, Operand(Smi::FromInt(type)));
4563 li(t9, Operand(CodeObject()), CONSTANT_SIZE); 4655 li(t9, Operand(CodeObject()), CONSTANT_SIZE);
4564 sw(ra, MemOperand(sp, 4 * kPointerSize)); 4656 sd(ra, MemOperand(sp, 4 * kPointerSize));
4565 sw(fp, MemOperand(sp, 3 * kPointerSize)); 4657 sd(fp, MemOperand(sp, 3 * kPointerSize));
4566 sw(cp, MemOperand(sp, 2 * kPointerSize)); 4658 sd(cp, MemOperand(sp, 2 * kPointerSize));
4567 sw(t8, MemOperand(sp, 1 * kPointerSize)); 4659 sd(t8, MemOperand(sp, 1 * kPointerSize));
4568 sw(t9, MemOperand(sp, 0 * kPointerSize)); 4660 sd(t9, MemOperand(sp, 0 * kPointerSize));
4569 // Adjust FP to point to saved FP. 4661 // Adjust FP to point to saved FP.
4570 Addu(fp, sp, 4662 Daddu(fp, sp,
4571 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize)); 4663 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
4572 } 4664 }
4573 4665
4574 4666
4575 void MacroAssembler::LeaveFrame(StackFrame::Type type) { 4667 void MacroAssembler::LeaveFrame(StackFrame::Type type) {
4576 mov(sp, fp); 4668 mov(sp, fp);
4577 lw(fp, MemOperand(sp, 0 * kPointerSize)); 4669 ld(fp, MemOperand(sp, 0 * kPointerSize));
4578 lw(ra, MemOperand(sp, 1 * kPointerSize)); 4670 ld(ra, MemOperand(sp, 1 * kPointerSize));
4579 addiu(sp, sp, 2 * kPointerSize); 4671 daddiu(sp, sp, 2 * kPointerSize);
4580 } 4672 }
4581 4673
4582 4674
4583 void MacroAssembler::EnterExitFrame(bool save_doubles, 4675 void MacroAssembler::EnterExitFrame(bool save_doubles,
4584 int stack_space) { 4676 int stack_space) {
4585 // Set up the frame structure on the stack. 4677 // Set up the frame structure on the stack.
4586 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement); 4678 STATIC_ASSERT(2 * kPointerSize == ExitFrameConstants::kCallerSPDisplacement);
4587 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset); 4679 STATIC_ASSERT(1 * kPointerSize == ExitFrameConstants::kCallerPCOffset);
4588 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset); 4680 STATIC_ASSERT(0 * kPointerSize == ExitFrameConstants::kCallerFPOffset);
4589 4681
4590 // This is how the stack will look: 4682 // This is how the stack will look:
4591 // fp + 2 (==kCallerSPDisplacement) - old stack's end 4683 // fp + 2 (==kCallerSPDisplacement) - old stack's end
4592 // [fp + 1 (==kCallerPCOffset)] - saved old ra 4684 // [fp + 1 (==kCallerPCOffset)] - saved old ra
4593 // [fp + 0 (==kCallerFPOffset)] - saved old fp 4685 // [fp + 0 (==kCallerFPOffset)] - saved old fp
4594 // [fp - 1 (==kSPOffset)] - sp of the called function 4686 // [fp - 1 (==kSPOffset)] - sp of the called function
4595 // [fp - 2 (==kCodeOffset)] - CodeObject 4687 // [fp - 2 (==kCodeOffset)] - CodeObject
4596 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the 4688 // fp - (2 + stack_space + alignment) == sp == [fp - kSPOffset] - top of the
4597 // new stack (will contain saved ra) 4689 // new stack (will contain saved ra)
4598 4690
4599 // Save registers. 4691 // Save registers.
4600 addiu(sp, sp, -4 * kPointerSize); 4692 daddiu(sp, sp, -4 * kPointerSize);
4601 sw(ra, MemOperand(sp, 3 * kPointerSize)); 4693 sd(ra, MemOperand(sp, 3 * kPointerSize));
4602 sw(fp, MemOperand(sp, 2 * kPointerSize)); 4694 sd(fp, MemOperand(sp, 2 * kPointerSize));
4603 addiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer. 4695 daddiu(fp, sp, 2 * kPointerSize); // Set up new frame pointer.
4604 4696
4605 if (emit_debug_code()) { 4697 if (emit_debug_code()) {
4606 sw(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset)); 4698 sd(zero_reg, MemOperand(fp, ExitFrameConstants::kSPOffset));
4607 } 4699 }
4608 4700
4609 // Accessed from ExitFrame::code_slot. 4701 // Accessed from ExitFrame::code_slot.
4610 li(t8, Operand(CodeObject()), CONSTANT_SIZE); 4702 li(t8, Operand(CodeObject()), CONSTANT_SIZE);
4611 sw(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset)); 4703 sd(t8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
4612 4704
4613 // Save the frame pointer and the context in top. 4705 // Save the frame pointer and the context in top.
4614 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); 4706 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4615 sw(fp, MemOperand(t8)); 4707 sd(fp, MemOperand(t8));
4616 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); 4708 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4617 sw(cp, MemOperand(t8)); 4709 sd(cp, MemOperand(t8));
4618 4710
4619 const int frame_alignment = MacroAssembler::ActivationFrameAlignment(); 4711 const int frame_alignment = MacroAssembler::ActivationFrameAlignment();
4620 if (save_doubles) { 4712 if (save_doubles) {
4621 // The stack must be allign to 0 modulo 8 for stores with sdc1. 4713 // The stack is already aligned to 0 modulo 8 for stores with sdc1.
4622 ASSERT(kDoubleSize == frame_alignment); 4714 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
4623 if (frame_alignment > 0) { 4715 int space = kNumOfSavedRegisters * kDoubleSize ;
4624 ASSERT(IsPowerOf2(frame_alignment)); 4716 Dsubu(sp, sp, Operand(space));
4625 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4626 }
4627 int space = FPURegister::kMaxNumRegisters * kDoubleSize;
4628 Subu(sp, sp, Operand(space));
4629 // Remember: we only need to save every 2nd double FPU value. 4717 // Remember: we only need to save every 2nd double FPU value.
4630 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) { 4718 for (int i = 0; i < kNumOfSavedRegisters; i++) {
4631 FPURegister reg = FPURegister::from_code(i); 4719 FPURegister reg = FPURegister::from_code(2 * i);
4632 sdc1(reg, MemOperand(sp, i * kDoubleSize)); 4720 sdc1(reg, MemOperand(sp, i * kDoubleSize));
4633 } 4721 }
4634 } 4722 }
4635 4723
4636 // Reserve place for the return address, stack space and an optional slot 4724 // Reserve place for the return address, stack space and an optional slot
4637 // (used by the DirectCEntryStub to hold the return value if a struct is 4725 // (used by the DirectCEntryStub to hold the return value if a struct is
4638 // returned) and align the frame preparing for calling the runtime function. 4726 // returned) and align the frame preparing for calling the runtime function.
4639 ASSERT(stack_space >= 0); 4727 ASSERT(stack_space >= 0);
4640 Subu(sp, sp, Operand((stack_space + 2) * kPointerSize)); 4728 Dsubu(sp, sp, Operand((stack_space + 2) * kPointerSize));
4641 if (frame_alignment > 0) { 4729 if (frame_alignment > 0) {
4642 ASSERT(IsPowerOf2(frame_alignment)); 4730 ASSERT(IsPowerOf2(frame_alignment));
4643 And(sp, sp, Operand(-frame_alignment)); // Align stack. 4731 And(sp, sp, Operand(-frame_alignment)); // Align stack.
4644 } 4732 }
4645 4733
4646 // Set the exit frame sp value to point just before the return address 4734 // Set the exit frame sp value to point just before the return address
4647 // location. 4735 // location.
4648 addiu(at, sp, kPointerSize); 4736 daddiu(at, sp, kPointerSize);
4649 sw(at, MemOperand(fp, ExitFrameConstants::kSPOffset)); 4737 sd(at, MemOperand(fp, ExitFrameConstants::kSPOffset));
4650 } 4738 }
4651 4739
4652 4740
4653 void MacroAssembler::LeaveExitFrame(bool save_doubles, 4741 void MacroAssembler::LeaveExitFrame(bool save_doubles,
4654 Register argument_count, 4742 Register argument_count,
4655 bool restore_context, 4743 bool restore_context,
4656 bool do_return) { 4744 bool do_return) {
4657 // Optionally restore all double registers. 4745 // Optionally restore all double registers.
4658 if (save_doubles) { 4746 if (save_doubles) {
4659 // Remember: we only need to restore every 2nd double FPU value. 4747 // Remember: we only need to restore every 2nd double FPU value.
4660 lw(t8, MemOperand(fp, ExitFrameConstants::kSPOffset)); 4748 int kNumOfSavedRegisters = FPURegister::kMaxNumRegisters / 2;
4661 for (int i = 0; i < FPURegister::kMaxNumRegisters; i+=2) { 4749 Dsubu(t8, fp, Operand(ExitFrameConstants::kFrameSize +
4662 FPURegister reg = FPURegister::from_code(i); 4750 kNumOfSavedRegisters * kDoubleSize));
4663 ldc1(reg, MemOperand(t8, i * kDoubleSize + kPointerSize)); 4751 for (int i = 0; i < kNumOfSavedRegisters; i++) {
4752 FPURegister reg = FPURegister::from_code(2 * i);
4753 ldc1(reg, MemOperand(t8, i * kDoubleSize));
4664 } 4754 }
4665 } 4755 }
4666 4756
4667 // Clear top frame. 4757 // Clear top frame.
4668 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate()))); 4758 li(t8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
4669 sw(zero_reg, MemOperand(t8)); 4759 sd(zero_reg, MemOperand(t8));
4670 4760
4671 // Restore current context from top and clear it in debug mode. 4761 // Restore current context from top and clear it in debug mode.
4672 if (restore_context) { 4762 if (restore_context) {
4673 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); 4763 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4674 lw(cp, MemOperand(t8)); 4764 ld(cp, MemOperand(t8));
4675 } 4765 }
4676 #ifdef DEBUG 4766 #ifdef DEBUG
4677 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); 4767 li(t8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
4678 sw(a3, MemOperand(t8)); 4768 sd(a3, MemOperand(t8));
4679 #endif 4769 #endif
4680 4770
4681 // Pop the arguments, restore registers, and return. 4771 // Pop the arguments, restore registers, and return.
4682 mov(sp, fp); // Respect ABI stack constraint. 4772 mov(sp, fp); // Respect ABI stack constraint.
4683 lw(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset)); 4773 ld(fp, MemOperand(sp, ExitFrameConstants::kCallerFPOffset));
4684 lw(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset)); 4774 ld(ra, MemOperand(sp, ExitFrameConstants::kCallerPCOffset));
4685 4775
4686 if (argument_count.is_valid()) { 4776 if (argument_count.is_valid()) {
4687 sll(t8, argument_count, kPointerSizeLog2); 4777 dsll(t8, argument_count, kPointerSizeLog2);
4688 addu(sp, sp, t8); 4778 daddu(sp, sp, t8);
4689 } 4779 }
4690 4780
4691 if (do_return) { 4781 if (do_return) {
4692 Ret(USE_DELAY_SLOT); 4782 Ret(USE_DELAY_SLOT);
4693 // If returning, the instruction in the delay slot will be the addiu below. 4783 // If returning, the instruction in the delay slot will be the addiu below.
4694 } 4784 }
4695 addiu(sp, sp, 8); 4785 daddiu(sp, sp, 2 * kPointerSize);
4696 } 4786 }
4697 4787
4698 4788
4699 void MacroAssembler::InitializeNewString(Register string, 4789 void MacroAssembler::InitializeNewString(Register string,
4700 Register length, 4790 Register length,
4701 Heap::RootListIndex map_index, 4791 Heap::RootListIndex map_index,
4702 Register scratch1, 4792 Register scratch1,
4703 Register scratch2) { 4793 Register scratch2) {
4704 sll(scratch1, length, kSmiTagSize); 4794 // dsll(scratch1, length, kSmiTagSize);
4795 dsll32(scratch1, length, 0);
4705 LoadRoot(scratch2, map_index); 4796 LoadRoot(scratch2, map_index);
4706 sw(scratch1, FieldMemOperand(string, String::kLengthOffset)); 4797 sd(scratch1, FieldMemOperand(string, String::kLengthOffset));
4707 li(scratch1, Operand(String::kEmptyHashField)); 4798 li(scratch1, Operand(String::kEmptyHashField));
4708 sw(scratch2, FieldMemOperand(string, HeapObject::kMapOffset)); 4799 sd(scratch2, FieldMemOperand(string, HeapObject::kMapOffset));
4709 sw(scratch1, FieldMemOperand(string, String::kHashFieldOffset)); 4800 sd(scratch1, FieldMemOperand(string, String::kHashFieldOffset));
4710 } 4801 }
4711 4802
4712 4803
4713 int MacroAssembler::ActivationFrameAlignment() { 4804 int MacroAssembler::ActivationFrameAlignment() {
4714 #if V8_HOST_ARCH_MIPS 4805 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
4715 // Running on the real platform. Use the alignment as mandated by the local 4806 // Running on the real platform. Use the alignment as mandated by the local
4716 // environment. 4807 // environment.
4717 // Note: This will break if we ever start generating snapshots on one Mips 4808 // Note: This will break if we ever start generating snapshots on one Mips
4718 // platform for another Mips platform with a different alignment. 4809 // platform for another Mips platform with a different alignment.
4719 return base::OS::ActivationFrameAlignment(); 4810 return base::OS::ActivationFrameAlignment();
4720 #else // V8_HOST_ARCH_MIPS 4811 #else // V8_HOST_ARCH_MIPS
4721 // If we are using the simulator then we should always align to the expected 4812 // If we are using the simulator then we should always align to the expected
4722 // alignment. As the simulator is used to generate snapshots we do not know 4813 // alignment. As the simulator is used to generate snapshots we do not know
4723 // if the target platform will need alignment, so this is controlled from a 4814 // if the target platform will need alignment, so this is controlled from a
4724 // flag. 4815 // flag.
(...skipping 17 matching lines...) Expand all
4742 bind(&alignment_as_expected); 4833 bind(&alignment_as_expected);
4743 } 4834 }
4744 } 4835 }
4745 } 4836 }
4746 4837
4747 4838
4748 void MacroAssembler::JumpIfNotPowerOfTwoOrZero( 4839 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
4749 Register reg, 4840 Register reg,
4750 Register scratch, 4841 Register scratch,
4751 Label* not_power_of_two_or_zero) { 4842 Label* not_power_of_two_or_zero) {
4752 Subu(scratch, reg, Operand(1)); 4843 Dsubu(scratch, reg, Operand(1));
4753 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt, 4844 Branch(USE_DELAY_SLOT, not_power_of_two_or_zero, lt,
4754 scratch, Operand(zero_reg)); 4845 scratch, Operand(zero_reg));
4755 and_(at, scratch, reg); // In the delay slot. 4846 and_(at, scratch, reg); // In the delay slot.
4756 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg)); 4847 Branch(not_power_of_two_or_zero, ne, at, Operand(zero_reg));
4757 } 4848 }
4758 4849
4759 4850
4760 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) { 4851 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
4761 ASSERT(!reg.is(overflow)); 4852 ASSERT(!reg.is(overflow));
4762 mov(overflow, reg); // Save original value. 4853 mov(overflow, reg); // Save original value.
(...skipping 11 matching lines...) Expand all
4774 } else { 4865 } else {
4775 ASSERT(!dst.is(src)); 4866 ASSERT(!dst.is(src));
4776 ASSERT(!dst.is(overflow)); 4867 ASSERT(!dst.is(overflow));
4777 ASSERT(!src.is(overflow)); 4868 ASSERT(!src.is(overflow));
4778 SmiTag(dst, src); 4869 SmiTag(dst, src);
4779 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0. 4870 xor_(overflow, dst, src); // Overflow if (value ^ 2 * value) < 0.
4780 } 4871 }
4781 } 4872 }
4782 4873
4783 4874
4875 void MacroAssembler::SmiLoadUntag(Register dst, MemOperand src) {
4876 if (SmiValuesAre32Bits()) {
4877 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
4878 } else {
4879 lw(dst, src);
4880 SmiUntag(dst);
4881 }
4882 }
4883
4884
4885 void MacroAssembler::SmiLoadScale(Register dst, MemOperand src, int scale) {
4886 if (SmiValuesAre32Bits()) {
4887 // TODO(plind): not clear if lw or ld faster here, need micro-benchmark.
4888 lw(dst, UntagSmiMemOperand(src.rm(), src.offset()));
4889 dsll(dst, dst, scale);
4890 } else {
4891 lw(dst, src);
4892 ASSERT(scale >= kSmiTagSize);
4893 sll(dst, dst, scale - kSmiTagSize);
4894 }
4895 }
4896
4897
4898 // Returns 2 values: the Smi and a scaled version of the int within the Smi.
4899 void MacroAssembler::SmiLoadWithScale(Register d_smi,
4900 Register d_scaled,
4901 MemOperand src,
4902 int scale) {
4903 if (SmiValuesAre32Bits()) {
4904 ld(d_smi, src);
4905 dsra(d_scaled, d_smi, kSmiShift - scale);
4906 } else {
4907 lw(d_smi, src);
4908 ASSERT(scale >= kSmiTagSize);
4909 sll(d_scaled, d_smi, scale - kSmiTagSize);
4910 }
4911 }
4912
4913
4914 // Returns 2 values: the untagged Smi (int32) and scaled version of that int.
4915 void MacroAssembler::SmiLoadUntagWithScale(Register d_int,
4916 Register d_scaled,
4917 MemOperand src,
4918 int scale) {
4919 if (SmiValuesAre32Bits()) {
4920 lw(d_int, UntagSmiMemOperand(src.rm(), src.offset()));
4921 dsll(d_scaled, d_int, scale);
4922 } else {
4923 lw(d_int, src);
4924 // Need both the int and the scaled in, so use two instructions.
4925 SmiUntag(d_int);
4926 sll(d_scaled, d_int, scale);
4927 }
4928 }
4929
4930
4784 void MacroAssembler::UntagAndJumpIfSmi(Register dst, 4931 void MacroAssembler::UntagAndJumpIfSmi(Register dst,
4785 Register src, 4932 Register src,
4786 Label* smi_case) { 4933 Label* smi_case) {
4934 // ASSERT(!dst.is(src));
4787 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT); 4935 JumpIfSmi(src, smi_case, at, USE_DELAY_SLOT);
4788 SmiUntag(dst, src); 4936 SmiUntag(dst, src);
4789 } 4937 }
4790 4938
4791 4939
4792 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, 4940 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst,
4793 Register src, 4941 Register src,
4794 Label* non_smi_case) { 4942 Label* non_smi_case) {
4943 // ASSERT(!dst.is(src));
4795 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT); 4944 JumpIfNotSmi(src, non_smi_case, at, USE_DELAY_SLOT);
4796 SmiUntag(dst, src); 4945 SmiUntag(dst, src);
4797 } 4946 }
4798 4947
4799 void MacroAssembler::JumpIfSmi(Register value, 4948 void MacroAssembler::JumpIfSmi(Register value,
4800 Label* smi_label, 4949 Label* smi_label,
4801 Register scratch, 4950 Register scratch,
4802 BranchDelaySlot bd) { 4951 BranchDelaySlot bd) {
4803 ASSERT_EQ(0, kSmiTag); 4952 ASSERT_EQ(0, kSmiTag);
4804 andi(scratch, value, kSmiTagMask); 4953 andi(scratch, value, kSmiTagMask);
4805 Branch(bd, smi_label, eq, scratch, Operand(zero_reg)); 4954 Branch(bd, smi_label, eq, scratch, Operand(zero_reg));
4806 } 4955 }
4807 4956
4808 void MacroAssembler::JumpIfNotSmi(Register value, 4957 void MacroAssembler::JumpIfNotSmi(Register value,
4809 Label* not_smi_label, 4958 Label* not_smi_label,
4810 Register scratch, 4959 Register scratch,
4811 BranchDelaySlot bd) { 4960 BranchDelaySlot bd) {
4812 ASSERT_EQ(0, kSmiTag); 4961 ASSERT_EQ(0, kSmiTag);
4813 andi(scratch, value, kSmiTagMask); 4962 andi(scratch, value, kSmiTagMask);
4814 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg)); 4963 Branch(bd, not_smi_label, ne, scratch, Operand(zero_reg));
4815 } 4964 }
4816 4965
4817 4966
4818 void MacroAssembler::JumpIfNotBothSmi(Register reg1, 4967 void MacroAssembler::JumpIfNotBothSmi(Register reg1,
4819 Register reg2, 4968 Register reg2,
4820 Label* on_not_both_smi) { 4969 Label* on_not_both_smi) {
4821 STATIC_ASSERT(kSmiTag == 0); 4970 STATIC_ASSERT(kSmiTag == 0);
4971 // TODO(plind): Find some better to fix this assert issue.
4972 #if defined(__APPLE__)
4822 ASSERT_EQ(1, kSmiTagMask); 4973 ASSERT_EQ(1, kSmiTagMask);
4974 #else
4975 ASSERT_EQ((uint64_t)1, kSmiTagMask);
4976 #endif
4823 or_(at, reg1, reg2); 4977 or_(at, reg1, reg2);
4824 JumpIfNotSmi(at, on_not_both_smi); 4978 JumpIfNotSmi(at, on_not_both_smi);
4825 } 4979 }
4826 4980
4827 4981
4828 void MacroAssembler::JumpIfEitherSmi(Register reg1, 4982 void MacroAssembler::JumpIfEitherSmi(Register reg1,
4829 Register reg2, 4983 Register reg2,
4830 Label* on_either_smi) { 4984 Label* on_either_smi) {
4831 STATIC_ASSERT(kSmiTag == 0); 4985 STATIC_ASSERT(kSmiTag == 0);
4986 // TODO(plind): Find some better to fix this assert issue.
4987 #if defined(__APPLE__)
4832 ASSERT_EQ(1, kSmiTagMask); 4988 ASSERT_EQ(1, kSmiTagMask);
4989 #else
4990 ASSERT_EQ((uint64_t)1, kSmiTagMask);
4991 #endif
4833 // Both Smi tags must be 1 (not Smi). 4992 // Both Smi tags must be 1 (not Smi).
4834 and_(at, reg1, reg2); 4993 and_(at, reg1, reg2);
4835 JumpIfSmi(at, on_either_smi); 4994 JumpIfSmi(at, on_either_smi);
4836 } 4995 }
4837 4996
4838 4997
4839 void MacroAssembler::AssertNotSmi(Register object) { 4998 void MacroAssembler::AssertNotSmi(Register object) {
4840 if (emit_debug_code()) { 4999 if (emit_debug_code()) {
4841 STATIC_ASSERT(kSmiTag == 0); 5000 STATIC_ASSERT(kSmiTag == 0);
4842 andi(at, object, kSmiTagMask); 5001 andi(at, object, kSmiTagMask);
4843 Check(ne, kOperandIsASmi, at, Operand(zero_reg)); 5002 Check(ne, kOperandIsASmi, at, Operand(zero_reg));
4844 } 5003 }
4845 } 5004 }
4846 5005
4847 5006
4848 void MacroAssembler::AssertSmi(Register object) { 5007 void MacroAssembler::AssertSmi(Register object) {
4849 if (emit_debug_code()) { 5008 if (emit_debug_code()) {
4850 STATIC_ASSERT(kSmiTag == 0); 5009 STATIC_ASSERT(kSmiTag == 0);
4851 andi(at, object, kSmiTagMask); 5010 andi(at, object, kSmiTagMask);
4852 Check(eq, kOperandIsASmi, at, Operand(zero_reg)); 5011 Check(eq, kOperandIsASmi, at, Operand(zero_reg));
4853 } 5012 }
4854 } 5013 }
4855 5014
4856 5015
4857 void MacroAssembler::AssertString(Register object) { 5016 void MacroAssembler::AssertString(Register object) {
4858 if (emit_debug_code()) { 5017 if (emit_debug_code()) {
4859 STATIC_ASSERT(kSmiTag == 0); 5018 STATIC_ASSERT(kSmiTag == 0);
4860 SmiTst(object, t0); 5019 SmiTst(object, a4);
4861 Check(ne, kOperandIsASmiAndNotAString, t0, Operand(zero_reg)); 5020 Check(ne, kOperandIsASmiAndNotAString, a4, Operand(zero_reg));
4862 push(object); 5021 push(object);
4863 lw(object, FieldMemOperand(object, HeapObject::kMapOffset)); 5022 ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
4864 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset)); 5023 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4865 Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE)); 5024 Check(lo, kOperandIsNotAString, object, Operand(FIRST_NONSTRING_TYPE));
4866 pop(object); 5025 pop(object);
4867 } 5026 }
4868 } 5027 }
4869 5028
4870 5029
4871 void MacroAssembler::AssertName(Register object) { 5030 void MacroAssembler::AssertName(Register object) {
4872 if (emit_debug_code()) { 5031 if (emit_debug_code()) {
4873 STATIC_ASSERT(kSmiTag == 0); 5032 STATIC_ASSERT(kSmiTag == 0);
4874 SmiTst(object, t0); 5033 SmiTst(object, a4);
4875 Check(ne, kOperandIsASmiAndNotAName, t0, Operand(zero_reg)); 5034 Check(ne, kOperandIsASmiAndNotAName, a4, Operand(zero_reg));
4876 push(object); 5035 push(object);
4877 lw(object, FieldMemOperand(object, HeapObject::kMapOffset)); 5036 ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
4878 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset)); 5037 lbu(object, FieldMemOperand(object, Map::kInstanceTypeOffset));
4879 Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE)); 5038 Check(le, kOperandIsNotAName, object, Operand(LAST_NAME_TYPE));
4880 pop(object); 5039 pop(object);
4881 } 5040 }
4882 } 5041 }
4883 5042
4884 5043
4885 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object, 5044 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
4886 Register scratch) { 5045 Register scratch) {
4887 if (emit_debug_code()) { 5046 if (emit_debug_code()) {
4888 Label done_checking; 5047 Label done_checking;
4889 AssertNotSmi(object); 5048 AssertNotSmi(object);
4890 LoadRoot(scratch, Heap::kUndefinedValueRootIndex); 5049 LoadRoot(scratch, Heap::kUndefinedValueRootIndex);
4891 Branch(&done_checking, eq, object, Operand(scratch)); 5050 Branch(&done_checking, eq, object, Operand(scratch));
4892 push(object); 5051 push(object);
4893 lw(object, FieldMemOperand(object, HeapObject::kMapOffset)); 5052 ld(object, FieldMemOperand(object, HeapObject::kMapOffset));
4894 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex); 5053 LoadRoot(scratch, Heap::kAllocationSiteMapRootIndex);
4895 Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch)); 5054 Assert(eq, kExpectedUndefinedOrCell, object, Operand(scratch));
4896 pop(object); 5055 pop(object);
4897 bind(&done_checking); 5056 bind(&done_checking);
4898 } 5057 }
4899 } 5058 }
4900 5059
4901 5060
4902 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) { 5061 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
4903 if (emit_debug_code()) { 5062 if (emit_debug_code()) {
4904 ASSERT(!reg.is(at)); 5063 ASSERT(!reg.is(at));
4905 LoadRoot(at, index); 5064 LoadRoot(at, index);
4906 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at)); 5065 Check(eq, kHeapNumberMapRegisterClobbered, reg, Operand(at));
4907 } 5066 }
4908 } 5067 }
4909 5068
4910 5069
4911 void MacroAssembler::JumpIfNotHeapNumber(Register object, 5070 void MacroAssembler::JumpIfNotHeapNumber(Register object,
4912 Register heap_number_map, 5071 Register heap_number_map,
4913 Register scratch, 5072 Register scratch,
4914 Label* on_not_heap_number) { 5073 Label* on_not_heap_number) {
4915 lw(scratch, FieldMemOperand(object, HeapObject::kMapOffset)); 5074 ld(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
4916 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); 5075 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
4917 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map)); 5076 Branch(on_not_heap_number, ne, scratch, Operand(heap_number_map));
4918 } 5077 }
4919 5078
4920 5079
4921 void MacroAssembler::LookupNumberStringCache(Register object, 5080 void MacroAssembler::LookupNumberStringCache(Register object,
4922 Register result, 5081 Register result,
4923 Register scratch1, 5082 Register scratch1,
4924 Register scratch2, 5083 Register scratch2,
4925 Register scratch3, 5084 Register scratch3,
4926 Label* not_found) { 5085 Label* not_found) {
4927 // Use of registers. Register result is used as a temporary. 5086 // Use of registers. Register result is used as a temporary.
4928 Register number_string_cache = result; 5087 Register number_string_cache = result;
4929 Register mask = scratch3; 5088 Register mask = scratch3;
4930 5089
4931 // Load the number string cache. 5090 // Load the number string cache.
4932 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex); 5091 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
4933 5092
4934 // Make the hash mask from the length of the number string cache. It 5093 // Make the hash mask from the length of the number string cache. It
4935 // contains two elements (number and string) for each cache entry. 5094 // contains two elements (number and string) for each cache entry.
4936 lw(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset)); 5095 ld(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
4937 // Divide length by two (length is a smi). 5096 // Divide length by two (length is a smi).
4938 sra(mask, mask, kSmiTagSize + 1); 5097 // dsra(mask, mask, kSmiTagSize + 1);
4939 Addu(mask, mask, -1); // Make mask. 5098 dsra32(mask, mask, 1);
5099 Daddu(mask, mask, -1); // Make mask.
4940 5100
4941 // Calculate the entry in the number string cache. The hash value in the 5101 // Calculate the entry in the number string cache. The hash value in the
4942 // number string cache for smis is just the smi value, and the hash for 5102 // number string cache for smis is just the smi value, and the hash for
4943 // doubles is the xor of the upper and lower words. See 5103 // doubles is the xor of the upper and lower words. See
4944 // Heap::GetNumberStringCache. 5104 // Heap::GetNumberStringCache.
4945 Label is_smi; 5105 Label is_smi;
4946 Label load_result_from_cache; 5106 Label load_result_from_cache;
4947 JumpIfSmi(object, &is_smi); 5107 JumpIfSmi(object, &is_smi);
4948 CheckMap(object, 5108 CheckMap(object,
4949 scratch1, 5109 scratch1,
4950 Heap::kHeapNumberMapRootIndex, 5110 Heap::kHeapNumberMapRootIndex,
4951 not_found, 5111 not_found,
4952 DONT_DO_SMI_CHECK); 5112 DONT_DO_SMI_CHECK);
4953 5113
4954 STATIC_ASSERT(8 == kDoubleSize); 5114 STATIC_ASSERT(8 == kDoubleSize);
4955 Addu(scratch1, 5115 Daddu(scratch1,
4956 object, 5116 object,
4957 Operand(HeapNumber::kValueOffset - kHeapObjectTag)); 5117 Operand(HeapNumber::kValueOffset - kHeapObjectTag));
4958 lw(scratch2, MemOperand(scratch1, kPointerSize)); 5118 ld(scratch2, MemOperand(scratch1, kPointerSize));
4959 lw(scratch1, MemOperand(scratch1, 0)); 5119 ld(scratch1, MemOperand(scratch1, 0));
4960 Xor(scratch1, scratch1, Operand(scratch2)); 5120 Xor(scratch1, scratch1, Operand(scratch2));
4961 And(scratch1, scratch1, Operand(mask)); 5121 And(scratch1, scratch1, Operand(mask));
4962 5122
4963 // Calculate address of entry in string cache: each entry consists 5123 // Calculate address of entry in string cache: each entry consists
4964 // of two pointer sized fields. 5124 // of two pointer sized fields.
4965 sll(scratch1, scratch1, kPointerSizeLog2 + 1); 5125 dsll(scratch1, scratch1, kPointerSizeLog2 + 1);
4966 Addu(scratch1, number_string_cache, scratch1); 5126 Daddu(scratch1, number_string_cache, scratch1);
4967 5127
4968 Register probe = mask; 5128 Register probe = mask;
4969 lw(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize)); 5129 ld(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
4970 JumpIfSmi(probe, not_found); 5130 JumpIfSmi(probe, not_found);
4971 ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset)); 5131 ldc1(f12, FieldMemOperand(object, HeapNumber::kValueOffset));
4972 ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset)); 5132 ldc1(f14, FieldMemOperand(probe, HeapNumber::kValueOffset));
4973 BranchF(&load_result_from_cache, NULL, eq, f12, f14); 5133 BranchF(&load_result_from_cache, NULL, eq, f12, f14);
4974 Branch(not_found); 5134 Branch(not_found);
4975 5135
4976 bind(&is_smi); 5136 bind(&is_smi);
4977 Register scratch = scratch1; 5137 Register scratch = scratch1;
4978 sra(scratch, object, 1); // Shift away the tag. 5138 // dsra(scratch, object, 1); // Shift away the tag.
5139 dsra32(scratch, scratch, 0);
4979 And(scratch, mask, Operand(scratch)); 5140 And(scratch, mask, Operand(scratch));
4980 5141
4981 // Calculate address of entry in string cache: each entry consists 5142 // Calculate address of entry in string cache: each entry consists
4982 // of two pointer sized fields. 5143 // of two pointer sized fields.
4983 sll(scratch, scratch, kPointerSizeLog2 + 1); 5144 dsll(scratch, scratch, kPointerSizeLog2 + 1);
4984 Addu(scratch, number_string_cache, scratch); 5145 Daddu(scratch, number_string_cache, scratch);
4985 5146
4986 // Check if the entry is the smi we are looking for. 5147 // Check if the entry is the smi we are looking for.
4987 lw(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize)); 5148 ld(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
4988 Branch(not_found, ne, object, Operand(probe)); 5149 Branch(not_found, ne, object, Operand(probe));
4989 5150
4990 // Get the result from the cache. 5151 // Get the result from the cache.
4991 bind(&load_result_from_cache); 5152 bind(&load_result_from_cache);
4992 lw(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize)); 5153 ld(result, FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
4993 5154
4994 IncrementCounter(isolate()->counters()->number_to_string_native(), 5155 IncrementCounter(isolate()->counters()->number_to_string_native(),
4995 1, 5156 1,
4996 scratch1, 5157 scratch1,
4997 scratch2); 5158 scratch2);
4998 } 5159 }
4999 5160
5000 5161
5001 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings( 5162 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
5002 Register first, 5163 Register first,
5003 Register second, 5164 Register second,
5004 Register scratch1, 5165 Register scratch1,
5005 Register scratch2, 5166 Register scratch2,
5006 Label* failure) { 5167 Label* failure) {
5007 // Test that both first and second are sequential ASCII strings. 5168 // Test that both first and second are sequential ASCII strings.
5008 // Assume that they are non-smis. 5169 // Assume that they are non-smis.
5009 lw(scratch1, FieldMemOperand(first, HeapObject::kMapOffset)); 5170 ld(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
5010 lw(scratch2, FieldMemOperand(second, HeapObject::kMapOffset)); 5171 ld(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
5011 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); 5172 lbu(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
5012 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset)); 5173 lbu(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
5013 5174
5014 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1, 5175 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1,
5015 scratch2, 5176 scratch2,
5016 scratch1, 5177 scratch1,
5017 scratch2, 5178 scratch2,
5018 failure); 5179 failure);
5019 } 5180 }
5020 5181
(...skipping 38 matching lines...) Expand 10 before | Expand all | Expand 10 after
5059 Label* failure) { 5220 Label* failure) {
5060 const int kFlatAsciiStringMask = 5221 const int kFlatAsciiStringMask =
5061 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask; 5222 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
5062 const int kFlatAsciiStringTag = 5223 const int kFlatAsciiStringTag =
5063 kStringTag | kOneByteStringTag | kSeqStringTag; 5224 kStringTag | kOneByteStringTag | kSeqStringTag;
5064 And(scratch, type, Operand(kFlatAsciiStringMask)); 5225 And(scratch, type, Operand(kFlatAsciiStringMask));
5065 Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag)); 5226 Branch(failure, ne, scratch, Operand(kFlatAsciiStringTag));
5066 } 5227 }
5067 5228
5068 5229
5069 static const int kRegisterPassedArguments = 4; 5230 static const int kRegisterPassedArguments = (kMipsAbi == kN64) ? 8 : 4;
5070 5231
5071 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments, 5232 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
5072 int num_double_arguments) { 5233 int num_double_arguments) {
5073 int stack_passed_words = 0; 5234 int stack_passed_words = 0;
5074 num_reg_arguments += 2 * num_double_arguments; 5235 num_reg_arguments += 2 * num_double_arguments;
5075 5236
5076 // Up to four simple arguments are passed in registers a0..a3. 5237 // O32: Up to four simple arguments are passed in registers a0..a3.
5238 // N64: Up to eight simple arguments are passed in registers a0..a7.
5077 if (num_reg_arguments > kRegisterPassedArguments) { 5239 if (num_reg_arguments > kRegisterPassedArguments) {
5078 stack_passed_words += num_reg_arguments - kRegisterPassedArguments; 5240 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
5079 } 5241 }
5080 stack_passed_words += kCArgSlotCount; 5242 stack_passed_words += kCArgSlotCount;
5081 return stack_passed_words; 5243 return stack_passed_words;
5082 } 5244 }
5083 5245
5084 5246
5085 void MacroAssembler::EmitSeqStringSetCharCheck(Register string, 5247 void MacroAssembler::EmitSeqStringSetCharCheck(Register string,
5086 Register index, 5248 Register index,
5087 Register value, 5249 Register value,
5088 Register scratch, 5250 Register scratch,
5089 uint32_t encoding_mask) { 5251 uint32_t encoding_mask) {
5090 Label is_object; 5252 Label is_object;
5091 SmiTst(string, at); 5253 SmiTst(string, at);
5092 Check(ne, kNonObject, at, Operand(zero_reg)); 5254 Check(ne, kNonObject, at, Operand(zero_reg));
5093 5255
5094 lw(at, FieldMemOperand(string, HeapObject::kMapOffset)); 5256 ld(at, FieldMemOperand(string, HeapObject::kMapOffset));
5095 lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset)); 5257 lbu(at, FieldMemOperand(at, Map::kInstanceTypeOffset));
5096 5258
5097 andi(at, at, kStringRepresentationMask | kStringEncodingMask); 5259 andi(at, at, kStringRepresentationMask | kStringEncodingMask);
5098 li(scratch, Operand(encoding_mask)); 5260 li(scratch, Operand(encoding_mask));
5099 Check(eq, kUnexpectedStringType, at, Operand(scratch)); 5261 Check(eq, kUnexpectedStringType, at, Operand(scratch));
5100 5262
5101 // The index is assumed to be untagged coming in, tag it to compare with the 5263 // TODO(plind): requires Smi size check code for mips32.
5102 // string length without using a temp register, it is restored at the end of
5103 // this function.
5104 Label index_tag_ok, index_tag_bad;
5105 TrySmiTag(index, scratch, &index_tag_bad);
5106 Branch(&index_tag_ok);
5107 bind(&index_tag_bad);
5108 Abort(kIndexIsTooLarge);
5109 bind(&index_tag_ok);
5110 5264
5111 lw(at, FieldMemOperand(string, String::kLengthOffset)); 5265 ld(at, FieldMemOperand(string, String::kLengthOffset));
5112 Check(lt, kIndexIsTooLarge, index, Operand(at)); 5266 Check(lt, kIndexIsTooLarge, index, Operand(at));
5113 5267
5114 ASSERT(Smi::FromInt(0) == 0); 5268 ASSERT(Smi::FromInt(0) == 0);
5115 Check(ge, kIndexIsNegative, index, Operand(zero_reg)); 5269 Check(ge, kIndexIsNegative, index, Operand(zero_reg));
5116
5117 SmiUntag(index, index);
5118 } 5270 }
5119 5271
5120 5272
5121 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, 5273 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5122 int num_double_arguments, 5274 int num_double_arguments,
5123 Register scratch) { 5275 Register scratch) {
5124 int frame_alignment = ActivationFrameAlignment(); 5276 int frame_alignment = ActivationFrameAlignment();
5125 5277
5126 // Up to four simple arguments are passed in registers a0..a3. 5278 // n64: Up to eight simple arguments in a0..a3, a4..a7, No argument slots.
5279 // O32: Up to four simple arguments are passed in registers a0..a3.
5127 // Those four arguments must have reserved argument slots on the stack for 5280 // Those four arguments must have reserved argument slots on the stack for
5128 // mips, even though those argument slots are not normally used. 5281 // mips, even though those argument slots are not normally used.
5129 // Remaining arguments are pushed on the stack, above (higher address than) 5282 // Both ABIs: Remaining arguments are pushed on the stack, above (higher
5130 // the argument slots. 5283 // address than) the (O32) argument slots. (arg slot calculation handled by
5284 // CalculateStackPassedWords()).
5131 int stack_passed_arguments = CalculateStackPassedWords( 5285 int stack_passed_arguments = CalculateStackPassedWords(
5132 num_reg_arguments, num_double_arguments); 5286 num_reg_arguments, num_double_arguments);
5133 if (frame_alignment > kPointerSize) { 5287 if (frame_alignment > kPointerSize) {
5134 // Make stack end at alignment and make room for num_arguments - 4 words 5288 // Make stack end at alignment and make room for num_arguments - 4 words
5135 // and the original value of sp. 5289 // and the original value of sp.
5136 mov(scratch, sp); 5290 mov(scratch, sp);
5137 Subu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize)); 5291 Dsubu(sp, sp, Operand((stack_passed_arguments + 1) * kPointerSize));
5138 ASSERT(IsPowerOf2(frame_alignment)); 5292 ASSERT(IsPowerOf2(frame_alignment));
5139 And(sp, sp, Operand(-frame_alignment)); 5293 And(sp, sp, Operand(-frame_alignment));
5140 sw(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize)); 5294 sd(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
5141 } else { 5295 } else {
5142 Subu(sp, sp, Operand(stack_passed_arguments * kPointerSize)); 5296 Dsubu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5143 } 5297 }
5144 } 5298 }
5145 5299
5146 5300
5147 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments, 5301 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
5148 Register scratch) { 5302 Register scratch) {
5149 PrepareCallCFunction(num_reg_arguments, 0, scratch); 5303 PrepareCallCFunction(num_reg_arguments, 0, scratch);
5150 } 5304 }
5151 5305
5152 5306
(...skipping 27 matching lines...) Expand all
5180 void MacroAssembler::CallCFunctionHelper(Register function, 5334 void MacroAssembler::CallCFunctionHelper(Register function,
5181 int num_reg_arguments, 5335 int num_reg_arguments,
5182 int num_double_arguments) { 5336 int num_double_arguments) {
5183 ASSERT(has_frame()); 5337 ASSERT(has_frame());
5184 // Make sure that the stack is aligned before calling a C function unless 5338 // Make sure that the stack is aligned before calling a C function unless
5185 // running in the simulator. The simulator has its own alignment check which 5339 // running in the simulator. The simulator has its own alignment check which
5186 // provides more information. 5340 // provides more information.
5187 // The argument stots are presumed to have been set up by 5341 // The argument stots are presumed to have been set up by
5188 // PrepareCallCFunction. The C function must be called via t9, for mips ABI. 5342 // PrepareCallCFunction. The C function must be called via t9, for mips ABI.
5189 5343
5190 #if V8_HOST_ARCH_MIPS 5344 #if V8_HOST_ARCH_MIPS || V8_HOST_ARCH_MIPS64
5191 if (emit_debug_code()) { 5345 if (emit_debug_code()) {
5192 int frame_alignment = base::OS::ActivationFrameAlignment(); 5346 int frame_alignment = base::OS::ActivationFrameAlignment();
5193 int frame_alignment_mask = frame_alignment - 1; 5347 int frame_alignment_mask = frame_alignment - 1;
5194 if (frame_alignment > kPointerSize) { 5348 if (frame_alignment > kPointerSize) {
5195 ASSERT(IsPowerOf2(frame_alignment)); 5349 ASSERT(IsPowerOf2(frame_alignment));
5196 Label alignment_as_expected; 5350 Label alignment_as_expected;
5197 And(at, sp, Operand(frame_alignment_mask)); 5351 And(at, sp, Operand(frame_alignment_mask));
5198 Branch(&alignment_as_expected, eq, at, Operand(zero_reg)); 5352 Branch(&alignment_as_expected, eq, at, Operand(zero_reg));
5199 // Don't use Check here, as it will call Runtime_Abort possibly 5353 // Don't use Check here, as it will call Runtime_Abort possibly
5200 // re-entering here. 5354 // re-entering here.
(...skipping 11 matching lines...) Expand all
5212 mov(t9, function); 5366 mov(t9, function);
5213 function = t9; 5367 function = t9;
5214 } 5368 }
5215 5369
5216 Call(function); 5370 Call(function);
5217 5371
5218 int stack_passed_arguments = CalculateStackPassedWords( 5372 int stack_passed_arguments = CalculateStackPassedWords(
5219 num_reg_arguments, num_double_arguments); 5373 num_reg_arguments, num_double_arguments);
5220 5374
5221 if (base::OS::ActivationFrameAlignment() > kPointerSize) { 5375 if (base::OS::ActivationFrameAlignment() > kPointerSize) {
5222 lw(sp, MemOperand(sp, stack_passed_arguments * kPointerSize)); 5376 ld(sp, MemOperand(sp, stack_passed_arguments * kPointerSize));
5223 } else { 5377 } else {
5224 Addu(sp, sp, Operand(stack_passed_arguments * sizeof(kPointerSize))); 5378 Daddu(sp, sp, Operand(stack_passed_arguments * kPointerSize));
5225 } 5379 }
5226 } 5380 }
5227 5381
5228 5382
5229 #undef BRANCH_ARGS_CHECK 5383 #undef BRANCH_ARGS_CHECK
5230 5384
5231 5385
5232 void MacroAssembler::PatchRelocatedValue(Register li_location, 5386 void MacroAssembler::PatchRelocatedValue(Register li_location,
5233 Register scratch, 5387 Register scratch,
5234 Register new_value) { 5388 Register new_value) {
5235 lw(scratch, MemOperand(li_location)); 5389 lwu(scratch, MemOperand(li_location));
5236 // At this point scratch is a lui(at, ...) instruction. 5390 // At this point scratch is a lui(at, ...) instruction.
5237 if (emit_debug_code()) { 5391 if (emit_debug_code()) {
5238 And(scratch, scratch, kOpcodeMask); 5392 And(scratch, scratch, kOpcodeMask);
5239 Check(eq, kTheInstructionToPatchShouldBeALui, 5393 Check(eq, kTheInstructionToPatchShouldBeALui,
5240 scratch, Operand(LUI)); 5394 scratch, Operand(LUI));
5241 lw(scratch, MemOperand(li_location)); 5395 lwu(scratch, MemOperand(li_location));
5242 } 5396 }
5243 srl(t9, new_value, kImm16Bits); 5397 dsrl32(t9, new_value, 0);
5244 Ins(scratch, t9, 0, kImm16Bits); 5398 Ins(scratch, t9, 0, kImm16Bits);
5245 sw(scratch, MemOperand(li_location)); 5399 sw(scratch, MemOperand(li_location));
5246 5400
5247 lw(scratch, MemOperand(li_location, kInstrSize)); 5401 lwu(scratch, MemOperand(li_location, kInstrSize));
5248 // scratch is now ori(at, ...). 5402 // scratch is now ori(at, ...).
5249 if (emit_debug_code()) { 5403 if (emit_debug_code()) {
5250 And(scratch, scratch, kOpcodeMask); 5404 And(scratch, scratch, kOpcodeMask);
5251 Check(eq, kTheInstructionToPatchShouldBeAnOri, 5405 Check(eq, kTheInstructionToPatchShouldBeAnOri,
5252 scratch, Operand(ORI)); 5406 scratch, Operand(ORI));
5253 lw(scratch, MemOperand(li_location, kInstrSize)); 5407 lwu(scratch, MemOperand(li_location, kInstrSize));
5254 } 5408 }
5255 Ins(scratch, new_value, 0, kImm16Bits); 5409 dsrl(t9, new_value, kImm16Bits);
5410 Ins(scratch, t9, 0, kImm16Bits);
5256 sw(scratch, MemOperand(li_location, kInstrSize)); 5411 sw(scratch, MemOperand(li_location, kInstrSize));
5257 5412
5413 lwu(scratch, MemOperand(li_location, kInstrSize * 3));
5414 // scratch is now ori(at, ...).
5415 if (emit_debug_code()) {
5416 And(scratch, scratch, kOpcodeMask);
5417 Check(eq, kTheInstructionToPatchShouldBeAnOri,
5418 scratch, Operand(ORI));
5419 lwu(scratch, MemOperand(li_location, kInstrSize * 3));
5420 }
5421
5422 Ins(scratch, new_value, 0, kImm16Bits);
5423 sw(scratch, MemOperand(li_location, kInstrSize * 3));
5424
5258 // Update the I-cache so the new lui and ori can be executed. 5425 // Update the I-cache so the new lui and ori can be executed.
5259 FlushICache(li_location, 2); 5426 FlushICache(li_location, 4);
5260 } 5427 }
5261 5428
5262 void MacroAssembler::GetRelocatedValue(Register li_location, 5429 void MacroAssembler::GetRelocatedValue(Register li_location,
5263 Register value, 5430 Register value,
5264 Register scratch) { 5431 Register scratch) {
5265 lw(value, MemOperand(li_location)); 5432 lwu(value, MemOperand(li_location));
5266 if (emit_debug_code()) { 5433 if (emit_debug_code()) {
5267 And(value, value, kOpcodeMask); 5434 And(value, value, kOpcodeMask);
5268 Check(eq, kTheInstructionShouldBeALui, 5435 Check(eq, kTheInstructionShouldBeALui,
5269 value, Operand(LUI)); 5436 value, Operand(LUI));
5270 lw(value, MemOperand(li_location)); 5437 lwu(value, MemOperand(li_location));
5271 } 5438 }
5272 5439
5273 // value now holds a lui instruction. Extract the immediate. 5440 // value now holds a lui instruction. Extract the immediate.
5274 sll(value, value, kImm16Bits); 5441 andi(value, value, kImm16Mask);
5442 dsll32(value, value, kImm16Bits);
5275 5443
5276 lw(scratch, MemOperand(li_location, kInstrSize)); 5444 lwu(scratch, MemOperand(li_location, kInstrSize));
5277 if (emit_debug_code()) { 5445 if (emit_debug_code()) {
5278 And(scratch, scratch, kOpcodeMask); 5446 And(scratch, scratch, kOpcodeMask);
5279 Check(eq, kTheInstructionShouldBeAnOri, 5447 Check(eq, kTheInstructionShouldBeAnOri,
5280 scratch, Operand(ORI)); 5448 scratch, Operand(ORI));
5281 lw(scratch, MemOperand(li_location, kInstrSize)); 5449 lwu(scratch, MemOperand(li_location, kInstrSize));
5282 } 5450 }
5283 // "scratch" now holds an ori instruction. Extract the immediate. 5451 // "scratch" now holds an ori instruction. Extract the immediate.
5284 andi(scratch, scratch, kImm16Mask); 5452 andi(scratch, scratch, kImm16Mask);
5453 dsll32(scratch, scratch, 0);
5285 5454
5286 // Merge the results.
5287 or_(value, value, scratch); 5455 or_(value, value, scratch);
5456
5457 lwu(scratch, MemOperand(li_location, kInstrSize * 3));
5458 if (emit_debug_code()) {
5459 And(scratch, scratch, kOpcodeMask);
5460 Check(eq, kTheInstructionShouldBeAnOri,
5461 scratch, Operand(ORI));
5462 lwu(scratch, MemOperand(li_location, kInstrSize * 3));
5463 }
5464 // "scratch" now holds an ori instruction. Extract the immediate.
5465 andi(scratch, scratch, kImm16Mask);
5466 dsll(scratch, scratch, kImm16Bits);
5467
5468 or_(value, value, scratch);
5469 // Sign extend extracted address.
5470 dsra(value, value, kImm16Bits);
5288 } 5471 }
5289 5472
5290 5473
5291 void MacroAssembler::CheckPageFlag( 5474 void MacroAssembler::CheckPageFlag(
5292 Register object, 5475 Register object,
5293 Register scratch, 5476 Register scratch,
5294 int mask, 5477 int mask,
5295 Condition cc, 5478 Condition cc,
5296 Label* condition_met) { 5479 Label* condition_met) {
5297 And(scratch, object, Operand(~Page::kPageAlignmentMask)); 5480 // TODO(plind): Fix li() so we can use constant embedded inside And().
5298 lw(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset)); 5481 // And(scratch, object, Operand(~Page::kPageAlignmentMask));
5482 li(at, Operand(~Page::kPageAlignmentMask), CONSTANT_SIZE); // plind HACK
5483 And(scratch, object, at);
5484 ld(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
5299 And(scratch, scratch, Operand(mask)); 5485 And(scratch, scratch, Operand(mask));
5300 Branch(condition_met, cc, scratch, Operand(zero_reg)); 5486 Branch(condition_met, cc, scratch, Operand(zero_reg));
5301 } 5487 }
5302 5488
5303 5489
5304 void MacroAssembler::CheckMapDeprecated(Handle<Map> map, 5490 void MacroAssembler::CheckMapDeprecated(Handle<Map> map,
5305 Register scratch, 5491 Register scratch,
5306 Label* if_deprecated) { 5492 Label* if_deprecated) {
5307 if (map->CanBeDeprecated()) { 5493 if (map->CanBeDeprecated()) {
5308 li(scratch, Operand(map)); 5494 li(scratch, Operand(map));
5309 lw(scratch, FieldMemOperand(scratch, Map::kBitField3Offset)); 5495 ld(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
5310 And(scratch, scratch, Operand(Map::Deprecated::kMask)); 5496 And(scratch, scratch, Operand(Map::Deprecated::kMask));
5311 Branch(if_deprecated, ne, scratch, Operand(zero_reg)); 5497 Branch(if_deprecated, ne, scratch, Operand(zero_reg));
5312 } 5498 }
5313 } 5499 }
5314 5500
5315 5501
5316 void MacroAssembler::JumpIfBlack(Register object, 5502 void MacroAssembler::JumpIfBlack(Register object,
5317 Register scratch0, 5503 Register scratch0,
5318 Register scratch1, 5504 Register scratch1,
5319 Label* on_black) { 5505 Label* on_black) {
5320 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. 5506 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
5321 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); 5507 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5322 } 5508 }
5323 5509
5324 5510
5325 void MacroAssembler::HasColor(Register object, 5511 void MacroAssembler::HasColor(Register object,
5326 Register bitmap_scratch, 5512 Register bitmap_scratch,
5327 Register mask_scratch, 5513 Register mask_scratch,
5328 Label* has_color, 5514 Label* has_color,
5329 int first_bit, 5515 int first_bit,
5330 int second_bit) { 5516 int second_bit) {
5331 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8)); 5517 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t8));
5332 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9)); 5518 ASSERT(!AreAliased(object, bitmap_scratch, mask_scratch, t9));
5333 5519
5334 GetMarkBits(object, bitmap_scratch, mask_scratch); 5520 GetMarkBits(object, bitmap_scratch, mask_scratch);
5335 5521
5336 Label other_color, word_boundary; 5522 Label other_color;
5337 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); 5523 // Note that we are using a 4-byte aligned 8-byte load.
5524 Uld(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5338 And(t8, t9, Operand(mask_scratch)); 5525 And(t8, t9, Operand(mask_scratch));
5339 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg)); 5526 Branch(&other_color, first_bit == 1 ? eq : ne, t8, Operand(zero_reg));
5340 // Shift left 1 by adding. 5527 // Shift left 1 by adding.
5341 Addu(mask_scratch, mask_scratch, Operand(mask_scratch)); 5528 Daddu(mask_scratch, mask_scratch, Operand(mask_scratch));
5342 Branch(&word_boundary, eq, mask_scratch, Operand(zero_reg));
5343 And(t8, t9, Operand(mask_scratch)); 5529 And(t8, t9, Operand(mask_scratch));
5344 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg)); 5530 Branch(has_color, second_bit == 1 ? ne : eq, t8, Operand(zero_reg));
5345 jmp(&other_color);
5346 5531
5347 bind(&word_boundary);
5348 lw(t9, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kPointerSize));
5349 And(t9, t9, Operand(1));
5350 Branch(has_color, second_bit == 1 ? ne : eq, t9, Operand(zero_reg));
5351 bind(&other_color); 5532 bind(&other_color);
5352 } 5533 }
5353 5534
5354 5535
5355 // Detect some, but not all, common pointer-free objects. This is used by the 5536 // Detect some, but not all, common pointer-free objects. This is used by the
5356 // incremental write barrier which doesn't care about oddballs (they are always 5537 // incremental write barrier which doesn't care about oddballs (they are always
5357 // marked black immediately so this code is not hit). 5538 // marked black immediately so this code is not hit).
5358 void MacroAssembler::JumpIfDataObject(Register value, 5539 void MacroAssembler::JumpIfDataObject(Register value,
5359 Register scratch, 5540 Register scratch,
5360 Label* not_data_object) { 5541 Label* not_data_object) {
5361 ASSERT(!AreAliased(value, scratch, t8, no_reg)); 5542 ASSERT(!AreAliased(value, scratch, t8, no_reg));
5362 Label is_data_object; 5543 Label is_data_object;
5363 lw(scratch, FieldMemOperand(value, HeapObject::kMapOffset)); 5544 ld(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
5364 LoadRoot(t8, Heap::kHeapNumberMapRootIndex); 5545 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5365 Branch(&is_data_object, eq, t8, Operand(scratch)); 5546 Branch(&is_data_object, eq, t8, Operand(scratch));
5366 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1); 5547 ASSERT(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
5367 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80); 5548 ASSERT(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
5368 // If it's a string and it's not a cons string then it's an object containing 5549 // If it's a string and it's not a cons string then it's an object containing
5369 // no GC pointers. 5550 // no GC pointers.
5370 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset)); 5551 lbu(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
5371 And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask)); 5552 And(t8, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
5372 Branch(not_data_object, ne, t8, Operand(zero_reg)); 5553 Branch(not_data_object, ne, t8, Operand(zero_reg));
5373 bind(&is_data_object); 5554 bind(&is_data_object);
5374 } 5555 }
5375 5556
5376 5557
5377 void MacroAssembler::GetMarkBits(Register addr_reg, 5558 void MacroAssembler::GetMarkBits(Register addr_reg,
5378 Register bitmap_reg, 5559 Register bitmap_reg,
5379 Register mask_reg) { 5560 Register mask_reg) {
5380 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg)); 5561 ASSERT(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
5562 // addr_reg is divided into fields:
5563 // |63 page base 20|19 high 8|7 shift 3|2 0|
5564 // 'high' gives the index of the cell holding color bits for the object.
5565 // 'shift' gives the offset in the cell for this object's color.
5381 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask)); 5566 And(bitmap_reg, addr_reg, Operand(~Page::kPageAlignmentMask));
5382 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2); 5567 Ext(mask_reg, addr_reg, kPointerSizeLog2, Bitmap::kBitsPerCellLog2);
5383 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2; 5568 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
5384 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits); 5569 Ext(t8, addr_reg, kLowBits, kPageSizeBits - kLowBits);
5385 sll(t8, t8, kPointerSizeLog2); 5570 dsll(t8, t8, Bitmap::kBytesPerCellLog2);
5386 Addu(bitmap_reg, bitmap_reg, t8); 5571 Daddu(bitmap_reg, bitmap_reg, t8);
5387 li(t8, Operand(1)); 5572 li(t8, Operand(1));
5388 sllv(mask_reg, t8, mask_reg); 5573 dsllv(mask_reg, t8, mask_reg);
5389 } 5574 }
5390 5575
5391 5576
5392 void MacroAssembler::EnsureNotWhite( 5577 void MacroAssembler::EnsureNotWhite(
5393 Register value, 5578 Register value,
5394 Register bitmap_scratch, 5579 Register bitmap_scratch,
5395 Register mask_scratch, 5580 Register mask_scratch,
5396 Register load_scratch, 5581 Register load_scratch,
5397 Label* value_is_white_and_not_data) { 5582 Label* value_is_white_and_not_data) {
5398 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8)); 5583 ASSERT(!AreAliased(value, bitmap_scratch, mask_scratch, t8));
5399 GetMarkBits(value, bitmap_scratch, mask_scratch); 5584 GetMarkBits(value, bitmap_scratch, mask_scratch);
5400 5585
5401 // If the value is black or grey we don't need to do anything. 5586 // If the value is black or grey we don't need to do anything.
5402 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0); 5587 ASSERT(strcmp(Marking::kWhiteBitPattern, "00") == 0);
5403 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0); 5588 ASSERT(strcmp(Marking::kBlackBitPattern, "10") == 0);
5404 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0); 5589 ASSERT(strcmp(Marking::kGreyBitPattern, "11") == 0);
5405 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0); 5590 ASSERT(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
5406 5591
5407 Label done; 5592 Label done;
5408 5593
5409 // Since both black and grey have a 1 in the first position and white does 5594 // Since both black and grey have a 1 in the first position and white does
5410 // not have a 1 there we only need to check one bit. 5595 // not have a 1 there we only need to check one bit.
5411 lw(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); 5596 // Note that we are using a 4-byte aligned 8-byte load.
5597 Uld(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5412 And(t8, mask_scratch, load_scratch); 5598 And(t8, mask_scratch, load_scratch);
5413 Branch(&done, ne, t8, Operand(zero_reg)); 5599 Branch(&done, ne, t8, Operand(zero_reg));
5414 5600
5415 if (emit_debug_code()) { 5601 if (emit_debug_code()) {
5416 // Check for impossible bit pattern. 5602 // Check for impossible bit pattern.
5417 Label ok; 5603 Label ok;
5418 // sll may overflow, making the check conservative. 5604 // sll may overflow, making the check conservative.
5419 sll(t8, mask_scratch, 1); 5605 dsll(t8, mask_scratch, 1);
5420 And(t8, load_scratch, t8); 5606 And(t8, load_scratch, t8);
5421 Branch(&ok, eq, t8, Operand(zero_reg)); 5607 Branch(&ok, eq, t8, Operand(zero_reg));
5422 stop("Impossible marking bit pattern"); 5608 stop("Impossible marking bit pattern");
5423 bind(&ok); 5609 bind(&ok);
5424 } 5610 }
5425 5611
5426 // Value is white. We check whether it is data that doesn't need scanning. 5612 // Value is white. We check whether it is data that doesn't need scanning.
5427 // Currently only checks for HeapNumber and non-cons strings. 5613 // Currently only checks for HeapNumber and non-cons strings.
5428 Register map = load_scratch; // Holds map while checking type. 5614 Register map = load_scratch; // Holds map while checking type.
5429 Register length = load_scratch; // Holds length of object after testing type. 5615 Register length = load_scratch; // Holds length of object after testing type.
5430 Label is_data_object; 5616 Label is_data_object;
5431 5617
5432 // Check for heap-number 5618 // Check for heap-number
5433 lw(map, FieldMemOperand(value, HeapObject::kMapOffset)); 5619 ld(map, FieldMemOperand(value, HeapObject::kMapOffset));
5434 LoadRoot(t8, Heap::kHeapNumberMapRootIndex); 5620 LoadRoot(t8, Heap::kHeapNumberMapRootIndex);
5435 { 5621 {
5436 Label skip; 5622 Label skip;
5437 Branch(&skip, ne, t8, Operand(map)); 5623 Branch(&skip, ne, t8, Operand(map));
5438 li(length, HeapNumber::kSize); 5624 li(length, HeapNumber::kSize);
5439 Branch(&is_data_object); 5625 Branch(&is_data_object);
5440 bind(&skip); 5626 bind(&skip);
5441 } 5627 }
5442 5628
5443 // Check for strings. 5629 // Check for strings.
(...skipping 20 matching lines...) Expand all
5464 Branch(&is_data_object); 5650 Branch(&is_data_object);
5465 bind(&skip); 5651 bind(&skip);
5466 } 5652 }
5467 5653
5468 // Sequential string, either ASCII or UC16. 5654 // Sequential string, either ASCII or UC16.
5469 // For ASCII (char-size of 1) we shift the smi tag away to get the length. 5655 // For ASCII (char-size of 1) we shift the smi tag away to get the length.
5470 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby 5656 // For UC16 (char-size of 2) we just leave the smi tag in place, thereby
5471 // getting the length multiplied by 2. 5657 // getting the length multiplied by 2.
5472 ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4); 5658 ASSERT(kOneByteStringTag == 4 && kStringEncodingMask == 4);
5473 ASSERT(kSmiTag == 0 && kSmiTagSize == 1); 5659 ASSERT(kSmiTag == 0 && kSmiTagSize == 1);
5474 lw(t9, FieldMemOperand(value, String::kLengthOffset)); 5660 lw(t9, UntagSmiFieldMemOperand(value, String::kLengthOffset));
5475 And(t8, instance_type, Operand(kStringEncodingMask)); 5661 And(t8, instance_type, Operand(kStringEncodingMask));
5476 { 5662 {
5477 Label skip; 5663 Label skip;
5478 Branch(&skip, eq, t8, Operand(zero_reg)); 5664 Branch(&skip, ne, t8, Operand(zero_reg));
5479 srl(t9, t9, 1); 5665 // Adjust length for UC16.
5666 dsll(t9, t9, 1);
5480 bind(&skip); 5667 bind(&skip);
5481 } 5668 }
5482 Addu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask)); 5669 Daddu(length, t9, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
5670 ASSERT(!length.is(t8));
5483 And(length, length, Operand(~kObjectAlignmentMask)); 5671 And(length, length, Operand(~kObjectAlignmentMask));
5484 5672
5485 bind(&is_data_object); 5673 bind(&is_data_object);
5486 // Value is a data object, and it is white. Mark it black. Since we know 5674 // Value is a data object, and it is white. Mark it black. Since we know
5487 // that the object is white we can make it black by flipping one bit. 5675 // that the object is white we can make it black by flipping one bit.
5488 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); 5676 Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5489 Or(t8, t8, Operand(mask_scratch)); 5677 Or(t8, t8, Operand(mask_scratch));
5490 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize)); 5678 Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
5491 5679
5492 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask)); 5680 And(bitmap_scratch, bitmap_scratch, Operand(~Page::kPageAlignmentMask));
5493 lw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); 5681 Uld(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5494 Addu(t8, t8, Operand(length)); 5682 Daddu(t8, t8, Operand(length));
5495 sw(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset)); 5683 Usd(t8, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
5496 5684
5497 bind(&done); 5685 bind(&done);
5498 } 5686 }
5499 5687
5500 5688
5501 void MacroAssembler::LoadInstanceDescriptors(Register map, 5689 void MacroAssembler::LoadInstanceDescriptors(Register map,
5502 Register descriptors) { 5690 Register descriptors) {
5503 lw(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset)); 5691 ld(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
5504 } 5692 }
5505 5693
5506 5694
5507 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) { 5695 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
5508 lw(dst, FieldMemOperand(map, Map::kBitField3Offset)); 5696 ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
5509 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst); 5697 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
5510 } 5698 }
5511 5699
5512 5700
5513 void MacroAssembler::EnumLength(Register dst, Register map) { 5701 void MacroAssembler::EnumLength(Register dst, Register map) {
5514 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); 5702 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
5515 lw(dst, FieldMemOperand(map, Map::kBitField3Offset)); 5703 ld(dst, FieldMemOperand(map, Map::kBitField3Offset));
5516 And(dst, dst, Operand(Map::EnumLengthBits::kMask)); 5704 And(dst, dst, Operand(Map::EnumLengthBits::kMask));
5517 SmiTag(dst); 5705 SmiTag(dst);
5518 } 5706 }
5519 5707
5520 5708
5521 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) { 5709 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
5522 Register empty_fixed_array_value = t2; 5710 Register empty_fixed_array_value = a6;
5523 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); 5711 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
5524 Label next, start; 5712 Label next, start;
5525 mov(a2, a0); 5713 mov(a2, a0);
5526 5714
5527 // Check if the enum length field is properly initialized, indicating that 5715 // Check if the enum length field is properly initialized, indicating that
5528 // there is an enum cache. 5716 // there is an enum cache.
5529 lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset)); 5717 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5530 5718
5531 EnumLength(a3, a1); 5719 EnumLength(a3, a1);
5532 Branch( 5720 Branch(
5533 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel))); 5721 call_runtime, eq, a3, Operand(Smi::FromInt(kInvalidEnumCacheSentinel)));
5534 5722
5535 jmp(&start); 5723 jmp(&start);
5536 5724
5537 bind(&next); 5725 bind(&next);
5538 lw(a1, FieldMemOperand(a2, HeapObject::kMapOffset)); 5726 ld(a1, FieldMemOperand(a2, HeapObject::kMapOffset));
5539 5727
5540 // For all objects but the receiver, check that the cache is empty. 5728 // For all objects but the receiver, check that the cache is empty.
5541 EnumLength(a3, a1); 5729 EnumLength(a3, a1);
5542 Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0))); 5730 Branch(call_runtime, ne, a3, Operand(Smi::FromInt(0)));
5543 5731
5544 bind(&start); 5732 bind(&start);
5545 5733
5546 // Check that there are no elements. Register a2 contains the current JS 5734 // Check that there are no elements. Register a2 contains the current JS
5547 // object we've reached through the prototype chain. 5735 // object we've reached through the prototype chain.
5548 Label no_elements; 5736 Label no_elements;
5549 lw(a2, FieldMemOperand(a2, JSObject::kElementsOffset)); 5737 ld(a2, FieldMemOperand(a2, JSObject::kElementsOffset));
5550 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value)); 5738 Branch(&no_elements, eq, a2, Operand(empty_fixed_array_value));
5551 5739
5552 // Second chance, the object may be using the empty slow element dictionary. 5740 // Second chance, the object may be using the empty slow element dictionary.
5553 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex); 5741 LoadRoot(at, Heap::kEmptySlowElementDictionaryRootIndex);
5554 Branch(call_runtime, ne, a2, Operand(at)); 5742 Branch(call_runtime, ne, a2, Operand(at));
5555 5743
5556 bind(&no_elements); 5744 bind(&no_elements);
5557 lw(a2, FieldMemOperand(a1, Map::kPrototypeOffset)); 5745 ld(a2, FieldMemOperand(a1, Map::kPrototypeOffset));
5558 Branch(&next, ne, a2, Operand(null_value)); 5746 Branch(&next, ne, a2, Operand(null_value));
5559 } 5747 }
5560 5748
5561 5749
5562 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { 5750 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
5563 ASSERT(!output_reg.is(input_reg)); 5751 ASSERT(!output_reg.is(input_reg));
5564 Label done; 5752 Label done;
5565 li(output_reg, Operand(255)); 5753 li(output_reg, Operand(255));
5566 // Normal branch: nop in delay slot. 5754 // Normal branch: nop in delay slot.
5567 Branch(&done, gt, input_reg, Operand(output_reg)); 5755 Branch(&done, gt, input_reg, Operand(output_reg));
(...skipping 37 matching lines...) Expand 10 before | Expand all | Expand 10 after
5605 void MacroAssembler::TestJSArrayForAllocationMemento( 5793 void MacroAssembler::TestJSArrayForAllocationMemento(
5606 Register receiver_reg, 5794 Register receiver_reg,
5607 Register scratch_reg, 5795 Register scratch_reg,
5608 Label* no_memento_found, 5796 Label* no_memento_found,
5609 Condition cond, 5797 Condition cond,
5610 Label* allocation_memento_present) { 5798 Label* allocation_memento_present) {
5611 ExternalReference new_space_start = 5799 ExternalReference new_space_start =
5612 ExternalReference::new_space_start(isolate()); 5800 ExternalReference::new_space_start(isolate());
5613 ExternalReference new_space_allocation_top = 5801 ExternalReference new_space_allocation_top =
5614 ExternalReference::new_space_allocation_top_address(isolate()); 5802 ExternalReference::new_space_allocation_top_address(isolate());
5615 Addu(scratch_reg, receiver_reg, 5803 Daddu(scratch_reg, receiver_reg,
5616 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag)); 5804 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
5617 Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start)); 5805 Branch(no_memento_found, lt, scratch_reg, Operand(new_space_start));
5618 li(at, Operand(new_space_allocation_top)); 5806 li(at, Operand(new_space_allocation_top));
5619 lw(at, MemOperand(at)); 5807 ld(at, MemOperand(at));
5620 Branch(no_memento_found, gt, scratch_reg, Operand(at)); 5808 Branch(no_memento_found, gt, scratch_reg, Operand(at));
5621 lw(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize)); 5809 ld(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
5622 if (allocation_memento_present) { 5810 if (allocation_memento_present) {
5623 Branch(allocation_memento_present, cond, scratch_reg, 5811 Branch(allocation_memento_present, cond, scratch_reg,
5624 Operand(isolate()->factory()->allocation_memento_map())); 5812 Operand(isolate()->factory()->allocation_memento_map()));
5625 } 5813 }
5626 } 5814 }
5627 5815
5628 5816
5629 Register GetRegisterThatIsNotOneOf(Register reg1, 5817 Register GetRegisterThatIsNotOneOf(Register reg1,
5630 Register reg2, 5818 Register reg2,
5631 Register reg3, 5819 Register reg3,
(...skipping 26 matching lines...) Expand all
5658 ASSERT(!scratch1.is(scratch0)); 5846 ASSERT(!scratch1.is(scratch0));
5659 Factory* factory = isolate()->factory(); 5847 Factory* factory = isolate()->factory();
5660 Register current = scratch0; 5848 Register current = scratch0;
5661 Label loop_again; 5849 Label loop_again;
5662 5850
5663 // Scratch contained elements pointer. 5851 // Scratch contained elements pointer.
5664 Move(current, object); 5852 Move(current, object);
5665 5853
5666 // Loop based on the map going up the prototype chain. 5854 // Loop based on the map going up the prototype chain.
5667 bind(&loop_again); 5855 bind(&loop_again);
5668 lw(current, FieldMemOperand(current, HeapObject::kMapOffset)); 5856 ld(current, FieldMemOperand(current, HeapObject::kMapOffset));
5669 lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset)); 5857 lb(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
5670 DecodeField<Map::ElementsKindBits>(scratch1); 5858 DecodeField<Map::ElementsKindBits>(scratch1);
5671 Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS)); 5859 Branch(found, eq, scratch1, Operand(DICTIONARY_ELEMENTS));
5672 lw(current, FieldMemOperand(current, Map::kPrototypeOffset)); 5860 ld(current, FieldMemOperand(current, Map::kPrototypeOffset));
5673 Branch(&loop_again, ne, current, Operand(factory->null_value())); 5861 Branch(&loop_again, ne, current, Operand(factory->null_value()));
5674 } 5862 }
5675 5863
5676 5864
5677 bool AreAliased(Register r1, Register r2, Register r3, Register r4) { 5865 bool AreAliased(Register r1, Register r2, Register r3, Register r4) {
5678 if (r1.is(r2)) return true; 5866 if (r1.is(r2)) return true;
5679 if (r1.is(r3)) return true; 5867 if (r1.is(r3)) return true;
5680 if (r1.is(r4)) return true; 5868 if (r1.is(r4)) return true;
5681 if (r2.is(r3)) return true; 5869 if (r2.is(r3)) return true;
5682 if (r2.is(r4)) return true; 5870 if (r2.is(r4)) return true;
(...skipping 14 matching lines...) Expand all
5697 // bytes of instructions without failing with buffer size constraints. 5885 // bytes of instructions without failing with buffer size constraints.
5698 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 5886 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5699 } 5887 }
5700 5888
5701 5889
5702 CodePatcher::~CodePatcher() { 5890 CodePatcher::~CodePatcher() {
5703 // Indicate that code has changed. 5891 // Indicate that code has changed.
5704 if (flush_cache_ == FLUSH) { 5892 if (flush_cache_ == FLUSH) {
5705 CpuFeatures::FlushICache(address_, size_); 5893 CpuFeatures::FlushICache(address_, size_);
5706 } 5894 }
5707
5708 // Check that the code was patched as expected. 5895 // Check that the code was patched as expected.
5709 ASSERT(masm_.pc_ == address_ + size_); 5896 ASSERT(masm_.pc_ == address_ + size_);
5710 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap); 5897 ASSERT(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
5711 } 5898 }
5712 5899
5713 5900
5714 void CodePatcher::Emit(Instr instr) { 5901 void CodePatcher::Emit(Instr instr) {
5715 masm()->emit(instr); 5902 masm()->emit(instr);
5716 } 5903 }
5717 5904
5718 5905
5719 void CodePatcher::Emit(Address addr) { 5906 void CodePatcher::Emit(Address addr) {
5720 masm()->emit(reinterpret_cast<Instr>(addr)); 5907 // masm()->emit(reinterpret_cast<Instr>(addr));
5721 } 5908 }
5722 5909
5723 5910
5724 void CodePatcher::ChangeBranchCondition(Condition cond) { 5911 void CodePatcher::ChangeBranchCondition(Condition cond) {
5725 Instr instr = Assembler::instr_at(masm_.pc_); 5912 Instr instr = Assembler::instr_at(masm_.pc_);
5726 ASSERT(Assembler::IsBranch(instr)); 5913 ASSERT(Assembler::IsBranch(instr));
5727 uint32_t opcode = Assembler::GetOpcodeField(instr); 5914 uint32_t opcode = Assembler::GetOpcodeField(instr);
5728 // Currently only the 'eq' and 'ne' cond values are supported and the simple 5915 // Currently only the 'eq' and 'ne' cond values are supported and the simple
5729 // branch instructions (with opcode being the branch type). 5916 // branch instructions (with opcode being the branch type).
5730 // There are some special cases (see Assembler::IsBranch()) so extending this 5917 // There are some special cases (see Assembler::IsBranch()) so extending this
(...skipping 29 matching lines...) Expand all
5760 Subu(result, result, Operand(dividend)); 5947 Subu(result, result, Operand(dividend));
5761 } 5948 }
5762 if (ms.shift() > 0) sra(result, result, ms.shift()); 5949 if (ms.shift() > 0) sra(result, result, ms.shift());
5763 srl(at, dividend, 31); 5950 srl(at, dividend, 31);
5764 Addu(result, result, Operand(at)); 5951 Addu(result, result, Operand(at));
5765 } 5952 }
5766 5953
5767 5954
5768 } } // namespace v8::internal 5955 } } // namespace v8::internal
5769 5956
5770 #endif // V8_TARGET_ARCH_MIPS 5957 #endif // V8_TARGET_ARCH_MIPS64
OLDNEW
« no previous file with comments | « src/mips64/macro-assembler-mips64.h ('k') | src/mips64/regexp-macro-assembler-mips64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698