Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(134)

Side by Side Diff: src/ppc/macro-assembler-ppc.cc

Issue 714093002: PowerPC specific sub-directories. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ppc/macro-assembler-ppc.h ('k') | src/ppc/regexp-macro-assembler-ppc.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include <assert.h> // For assert
6 #include <limits.h> // For LONG_MIN, LONG_MAX.
7
8 #include "src/v8.h"
9
10 #if V8_TARGET_ARCH_PPC
11
12 #include "src/base/bits.h"
13 #include "src/base/division-by-constant.h"
14 #include "src/bootstrapper.h"
15 #include "src/codegen.h"
16 #include "src/cpu-profiler.h"
17 #include "src/debug.h"
18 #include "src/isolate-inl.h"
19 #include "src/runtime/runtime.h"
20
21 namespace v8 {
22 namespace internal {
23
24 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
25 : Assembler(arg_isolate, buffer, size),
26 generating_stub_(false),
27 has_frame_(false) {
28 if (isolate() != NULL) {
29 code_object_ =
30 Handle<Object>(isolate()->heap()->undefined_value(), isolate());
31 }
32 }
33
34
35 void MacroAssembler::Jump(Register target) {
36 mtctr(target);
37 bctr();
38 }
39
40
41 void MacroAssembler::JumpToJSEntry(Register target) {
42 Move(ip, target);
43 Jump(ip);
44 }
45
46
47 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
48 Condition cond, CRegister cr) {
49 Label skip;
50
51 if (cond != al) b(NegateCondition(cond), &skip, cr);
52
53 DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
54
55 mov(ip, Operand(target, rmode));
56 mtctr(ip);
57 bctr();
58
59 bind(&skip);
60 }
61
62
63 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
64 CRegister cr) {
65 DCHECK(!RelocInfo::IsCodeTarget(rmode));
66 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr);
67 }
68
69
70 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
71 Condition cond) {
72 DCHECK(RelocInfo::IsCodeTarget(rmode));
73 // 'code' is always generated ppc code, never THUMB code
74 AllowDeferredHandleDereference embedding_raw_address;
75 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
76 }
77
78
79 int MacroAssembler::CallSize(Register target) { return 2 * kInstrSize; }
80
81
82 void MacroAssembler::Call(Register target) {
83 BlockTrampolinePoolScope block_trampoline_pool(this);
84 Label start;
85 bind(&start);
86
87 // Statement positions are expected to be recorded when the target
88 // address is loaded.
89 positions_recorder()->WriteRecordedPositions();
90
91 // branch via link register and set LK bit for return point
92 mtctr(target);
93 bctrl();
94
95 DCHECK_EQ(CallSize(target), SizeOfCodeGeneratedSince(&start));
96 }
97
98
99 void MacroAssembler::CallJSEntry(Register target) {
100 DCHECK(target.is(ip));
101 Call(target);
102 }
103
104
105 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
106 Condition cond) {
107 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
108 return (2 + instructions_required_for_mov(mov_operand)) * kInstrSize;
109 }
110
111
112 int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
113 RelocInfo::Mode rmode,
114 Condition cond) {
115 return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
116 }
117
118
119 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
120 Condition cond) {
121 BlockTrampolinePoolScope block_trampoline_pool(this);
122 DCHECK(cond == al);
123
124 #ifdef DEBUG
125 // Check the expected size before generating code to ensure we assume the same
126 // constant pool availability (e.g., whether constant pool is full or not).
127 int expected_size = CallSize(target, rmode, cond);
128 Label start;
129 bind(&start);
130 #endif
131
132 // Statement positions are expected to be recorded when the target
133 // address is loaded.
134 positions_recorder()->WriteRecordedPositions();
135
136 // This can likely be optimized to make use of bc() with 24bit relative
137 //
138 // RecordRelocInfo(x.rmode_, x.imm_);
139 // bc( BA, .... offset, LKset);
140 //
141
142 mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode));
143 mtctr(ip);
144 bctrl();
145
146 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
147 }
148
149
150 int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
151 TypeFeedbackId ast_id, Condition cond) {
152 AllowDeferredHandleDereference using_raw_address;
153 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
154 }
155
156
157 void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
158 TypeFeedbackId ast_id, Condition cond) {
159 BlockTrampolinePoolScope block_trampoline_pool(this);
160 DCHECK(RelocInfo::IsCodeTarget(rmode));
161
162 #ifdef DEBUG
163 // Check the expected size before generating code to ensure we assume the same
164 // constant pool availability (e.g., whether constant pool is full or not).
165 int expected_size = CallSize(code, rmode, ast_id, cond);
166 Label start;
167 bind(&start);
168 #endif
169
170 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
171 SetRecordedAstId(ast_id);
172 rmode = RelocInfo::CODE_TARGET_WITH_ID;
173 }
174 AllowDeferredHandleDereference using_raw_address;
175 Call(reinterpret_cast<Address>(code.location()), rmode, cond);
176 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
177 }
178
179
180 void MacroAssembler::Ret(Condition cond) {
181 DCHECK(cond == al);
182 blr();
183 }
184
185
186 void MacroAssembler::Drop(int count, Condition cond) {
187 DCHECK(cond == al);
188 if (count > 0) {
189 Add(sp, sp, count * kPointerSize, r0);
190 }
191 }
192
193
194 void MacroAssembler::Ret(int drop, Condition cond) {
195 Drop(drop, cond);
196 Ret(cond);
197 }
198
199
200 void MacroAssembler::Call(Label* target) { b(target, SetLK); }
201
202
203 void MacroAssembler::Push(Handle<Object> handle) {
204 mov(r0, Operand(handle));
205 push(r0);
206 }
207
208
209 void MacroAssembler::Move(Register dst, Handle<Object> value) {
210 AllowDeferredHandleDereference smi_check;
211 if (value->IsSmi()) {
212 LoadSmiLiteral(dst, reinterpret_cast<Smi*>(*value));
213 } else {
214 DCHECK(value->IsHeapObject());
215 if (isolate()->heap()->InNewSpace(*value)) {
216 Handle<Cell> cell = isolate()->factory()->NewCell(value);
217 mov(dst, Operand(cell));
218 LoadP(dst, FieldMemOperand(dst, Cell::kValueOffset));
219 } else {
220 mov(dst, Operand(value));
221 }
222 }
223 }
224
225
226 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
227 DCHECK(cond == al);
228 if (!dst.is(src)) {
229 mr(dst, src);
230 }
231 }
232
233
234 void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
235 if (!dst.is(src)) {
236 fmr(dst, src);
237 }
238 }
239
240
241 void MacroAssembler::MultiPush(RegList regs) {
242 int16_t num_to_push = NumberOfBitsSet(regs);
243 int16_t stack_offset = num_to_push * kPointerSize;
244
245 subi(sp, sp, Operand(stack_offset));
246 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
247 if ((regs & (1 << i)) != 0) {
248 stack_offset -= kPointerSize;
249 StoreP(ToRegister(i), MemOperand(sp, stack_offset));
250 }
251 }
252 }
253
254
255 void MacroAssembler::MultiPop(RegList regs) {
256 int16_t stack_offset = 0;
257
258 for (int16_t i = 0; i < kNumRegisters; i++) {
259 if ((regs & (1 << i)) != 0) {
260 LoadP(ToRegister(i), MemOperand(sp, stack_offset));
261 stack_offset += kPointerSize;
262 }
263 }
264 addi(sp, sp, Operand(stack_offset));
265 }
266
267
268 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
269 Condition cond) {
270 DCHECK(cond == al);
271 LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
272 }
273
274
275 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
276 Condition cond) {
277 DCHECK(cond == al);
278 StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
279 }
280
281
282 void MacroAssembler::InNewSpace(Register object, Register scratch,
283 Condition cond, Label* branch) {
284 // N.B. scratch may be same register as object
285 DCHECK(cond == eq || cond == ne);
286 mov(r0, Operand(ExternalReference::new_space_mask(isolate())));
287 and_(scratch, object, r0);
288 mov(r0, Operand(ExternalReference::new_space_start(isolate())));
289 cmp(scratch, r0);
290 b(cond, branch);
291 }
292
293
294 void MacroAssembler::RecordWriteField(
295 Register object, int offset, Register value, Register dst,
296 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
297 RememberedSetAction remembered_set_action, SmiCheck smi_check,
298 PointersToHereCheck pointers_to_here_check_for_value) {
299 // First, check if a write barrier is even needed. The tests below
300 // catch stores of Smis.
301 Label done;
302
303 // Skip barrier if writing a smi.
304 if (smi_check == INLINE_SMI_CHECK) {
305 JumpIfSmi(value, &done);
306 }
307
308 // Although the object register is tagged, the offset is relative to the start
309 // of the object, so so offset must be a multiple of kPointerSize.
310 DCHECK(IsAligned(offset, kPointerSize));
311
312 Add(dst, object, offset - kHeapObjectTag, r0);
313 if (emit_debug_code()) {
314 Label ok;
315 andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
316 beq(&ok, cr0);
317 stop("Unaligned cell in write barrier");
318 bind(&ok);
319 }
320
321 RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
322 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
323
324 bind(&done);
325
326 // Clobber clobbered input registers when running with the debug-code flag
327 // turned on to provoke errors.
328 if (emit_debug_code()) {
329 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
330 mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
331 }
332 }
333
334
335 // Will clobber 4 registers: object, map, dst, ip. The
336 // register 'object' contains a heap object pointer.
337 void MacroAssembler::RecordWriteForMap(Register object, Register map,
338 Register dst,
339 LinkRegisterStatus lr_status,
340 SaveFPRegsMode fp_mode) {
341 if (emit_debug_code()) {
342 LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset));
343 Cmpi(dst, Operand(isolate()->factory()->meta_map()), r0);
344 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
345 }
346
347 if (!FLAG_incremental_marking) {
348 return;
349 }
350
351 if (emit_debug_code()) {
352 LoadP(ip, FieldMemOperand(object, HeapObject::kMapOffset));
353 cmp(ip, map);
354 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
355 }
356
357 Label done;
358
359 // A single check of the map's pages interesting flag suffices, since it is
360 // only set during incremental collection, and then it's also guaranteed that
361 // the from object's page's interesting flag is also set. This optimization
362 // relies on the fact that maps can never be in new space.
363 CheckPageFlag(map,
364 map, // Used as scratch.
365 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
366
367 addi(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
368 if (emit_debug_code()) {
369 Label ok;
370 andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
371 beq(&ok, cr0);
372 stop("Unaligned cell in write barrier");
373 bind(&ok);
374 }
375
376 // Record the actual write.
377 if (lr_status == kLRHasNotBeenSaved) {
378 mflr(r0);
379 push(r0);
380 }
381 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
382 fp_mode);
383 CallStub(&stub);
384 if (lr_status == kLRHasNotBeenSaved) {
385 pop(r0);
386 mtlr(r0);
387 }
388
389 bind(&done);
390
391 // Count number of write barriers in generated code.
392 isolate()->counters()->write_barriers_static()->Increment();
393 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
394
395 // Clobber clobbered registers when running with the debug-code flag
396 // turned on to provoke errors.
397 if (emit_debug_code()) {
398 mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 12)));
399 mov(map, Operand(bit_cast<intptr_t>(kZapValue + 16)));
400 }
401 }
402
403
404 // Will clobber 4 registers: object, address, scratch, ip. The
405 // register 'object' contains a heap object pointer. The heap object
406 // tag is shifted away.
407 void MacroAssembler::RecordWrite(
408 Register object, Register address, Register value,
409 LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode,
410 RememberedSetAction remembered_set_action, SmiCheck smi_check,
411 PointersToHereCheck pointers_to_here_check_for_value) {
412 DCHECK(!object.is(value));
413 if (emit_debug_code()) {
414 LoadP(r0, MemOperand(address));
415 cmp(r0, value);
416 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
417 }
418
419 if (remembered_set_action == OMIT_REMEMBERED_SET &&
420 !FLAG_incremental_marking) {
421 return;
422 }
423
424 // First, check if a write barrier is even needed. The tests below
425 // catch stores of smis and stores into the young generation.
426 Label done;
427
428 if (smi_check == INLINE_SMI_CHECK) {
429 JumpIfSmi(value, &done);
430 }
431
432 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
433 CheckPageFlag(value,
434 value, // Used as scratch.
435 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
436 }
437 CheckPageFlag(object,
438 value, // Used as scratch.
439 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
440
441 // Record the actual write.
442 if (lr_status == kLRHasNotBeenSaved) {
443 mflr(r0);
444 push(r0);
445 }
446 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
447 fp_mode);
448 CallStub(&stub);
449 if (lr_status == kLRHasNotBeenSaved) {
450 pop(r0);
451 mtlr(r0);
452 }
453
454 bind(&done);
455
456 // Count number of write barriers in generated code.
457 isolate()->counters()->write_barriers_static()->Increment();
458 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
459 value);
460
461 // Clobber clobbered registers when running with the debug-code flag
462 // turned on to provoke errors.
463 if (emit_debug_code()) {
464 mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
465 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
466 }
467 }
468
469
470 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
471 Register address, Register scratch,
472 SaveFPRegsMode fp_mode,
473 RememberedSetFinalAction and_then) {
474 Label done;
475 if (emit_debug_code()) {
476 Label ok;
477 JumpIfNotInNewSpace(object, scratch, &ok);
478 stop("Remembered set pointer is in new space");
479 bind(&ok);
480 }
481 // Load store buffer top.
482 ExternalReference store_buffer =
483 ExternalReference::store_buffer_top(isolate());
484 mov(ip, Operand(store_buffer));
485 LoadP(scratch, MemOperand(ip));
486 // Store pointer to buffer and increment buffer top.
487 StoreP(address, MemOperand(scratch));
488 addi(scratch, scratch, Operand(kPointerSize));
489 // Write back new top of buffer.
490 StoreP(scratch, MemOperand(ip));
491 // Call stub on end of buffer.
492 // Check for end of buffer.
493 mov(r0, Operand(StoreBuffer::kStoreBufferOverflowBit));
494 and_(r0, scratch, r0, SetRC);
495
496 if (and_then == kFallThroughAtEnd) {
497 beq(&done, cr0);
498 } else {
499 DCHECK(and_then == kReturnAtEnd);
500 beq(&done, cr0);
501 }
502 mflr(r0);
503 push(r0);
504 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
505 CallStub(&store_buffer_overflow);
506 pop(r0);
507 mtlr(r0);
508 bind(&done);
509 if (and_then == kReturnAtEnd) {
510 Ret();
511 }
512 }
513
514
515 void MacroAssembler::PushFixedFrame(Register marker_reg) {
516 mflr(r0);
517 #if V8_OOL_CONSTANT_POOL
518 if (marker_reg.is_valid()) {
519 Push(r0, fp, kConstantPoolRegister, cp, marker_reg);
520 } else {
521 Push(r0, fp, kConstantPoolRegister, cp);
522 }
523 #else
524 if (marker_reg.is_valid()) {
525 Push(r0, fp, cp, marker_reg);
526 } else {
527 Push(r0, fp, cp);
528 }
529 #endif
530 }
531
532
533 void MacroAssembler::PopFixedFrame(Register marker_reg) {
534 #if V8_OOL_CONSTANT_POOL
535 if (marker_reg.is_valid()) {
536 Pop(r0, fp, kConstantPoolRegister, cp, marker_reg);
537 } else {
538 Pop(r0, fp, kConstantPoolRegister, cp);
539 }
540 #else
541 if (marker_reg.is_valid()) {
542 Pop(r0, fp, cp, marker_reg);
543 } else {
544 Pop(r0, fp, cp);
545 }
546 #endif
547 mtlr(r0);
548 }
549
550
551 // Push and pop all registers that can hold pointers.
552 void MacroAssembler::PushSafepointRegisters() {
553 // Safepoints expect a block of kNumSafepointRegisters values on the
554 // stack, so adjust the stack for unsaved registers.
555 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
556 DCHECK(num_unsaved >= 0);
557 if (num_unsaved > 0) {
558 subi(sp, sp, Operand(num_unsaved * kPointerSize));
559 }
560 MultiPush(kSafepointSavedRegisters);
561 }
562
563
564 void MacroAssembler::PopSafepointRegisters() {
565 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
566 MultiPop(kSafepointSavedRegisters);
567 if (num_unsaved > 0) {
568 addi(sp, sp, Operand(num_unsaved * kPointerSize));
569 }
570 }
571
572
573 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
574 StoreP(src, SafepointRegisterSlot(dst));
575 }
576
577
578 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
579 LoadP(dst, SafepointRegisterSlot(src));
580 }
581
582
583 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
584 // The registers are pushed starting with the highest encoding,
585 // which means that lowest encodings are closest to the stack pointer.
586 RegList regs = kSafepointSavedRegisters;
587 int index = 0;
588
589 DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
590
591 for (int16_t i = 0; i < reg_code; i++) {
592 if ((regs & (1 << i)) != 0) {
593 index++;
594 }
595 }
596
597 return index;
598 }
599
600
601 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
602 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
603 }
604
605
606 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
607 // General purpose registers are pushed last on the stack.
608 int doubles_size = DoubleRegister::NumAllocatableRegisters() * kDoubleSize;
609 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
610 return MemOperand(sp, doubles_size + register_offset);
611 }
612
613
614 void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
615 const DoubleRegister src) {
616 Label done;
617
618 // Test for NaN
619 fcmpu(src, src);
620
621 if (dst.is(src)) {
622 bordered(&done);
623 } else {
624 Label is_nan;
625 bunordered(&is_nan);
626 fmr(dst, src);
627 b(&done);
628 bind(&is_nan);
629 }
630
631 // Replace with canonical NaN.
632 double nan_value = FixedDoubleArray::canonical_not_the_hole_nan_as_double();
633 LoadDoubleLiteral(dst, nan_value, r0);
634
635 bind(&done);
636 }
637
638
639 void MacroAssembler::ConvertIntToDouble(Register src,
640 DoubleRegister double_dst) {
641 MovIntToDouble(double_dst, src, r0);
642 fcfid(double_dst, double_dst);
643 }
644
645
646 void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
647 DoubleRegister double_dst) {
648 MovUnsignedIntToDouble(double_dst, src, r0);
649 fcfid(double_dst, double_dst);
650 }
651
652
653 void MacroAssembler::ConvertIntToFloat(const DoubleRegister dst,
654 const Register src,
655 const Register int_scratch) {
656 MovIntToDouble(dst, src, int_scratch);
657 fcfid(dst, dst);
658 frsp(dst, dst);
659 }
660
661
662 void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
663 #if !V8_TARGET_ARCH_PPC64
664 const Register dst_hi,
665 #endif
666 const Register dst,
667 const DoubleRegister double_dst,
668 FPRoundingMode rounding_mode) {
669 if (rounding_mode == kRoundToZero) {
670 fctidz(double_dst, double_input);
671 } else {
672 SetRoundingMode(rounding_mode);
673 fctid(double_dst, double_input);
674 ResetRoundingMode();
675 }
676
677 MovDoubleToInt64(
678 #if !V8_TARGET_ARCH_PPC64
679 dst_hi,
680 #endif
681 dst, double_dst);
682 }
683
684
685 #if V8_OOL_CONSTANT_POOL
686 void MacroAssembler::LoadConstantPoolPointerRegister(
687 CodeObjectAccessMethod access_method, int ip_code_entry_delta) {
688 Register base;
689 int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize;
690 if (access_method == CAN_USE_IP) {
691 base = ip;
692 constant_pool_offset += ip_code_entry_delta;
693 } else {
694 DCHECK(access_method == CONSTRUCT_INTERNAL_REFERENCE);
695 base = kConstantPoolRegister;
696 ConstantPoolUnavailableScope constant_pool_unavailable(this);
697
698 // CheckBuffer() is called too frequently. This will pre-grow
699 // the buffer if needed to avoid spliting the relocation and instructions
700 EnsureSpaceFor(kMovInstructionsNoConstantPool * kInstrSize);
701
702 uintptr_t code_start = reinterpret_cast<uintptr_t>(pc_) - pc_offset();
703 mov(base, Operand(code_start, RelocInfo::INTERNAL_REFERENCE));
704 }
705 LoadP(kConstantPoolRegister, MemOperand(base, constant_pool_offset));
706 }
707 #endif
708
709
710 void MacroAssembler::StubPrologue(int prologue_offset) {
711 LoadSmiLiteral(r11, Smi::FromInt(StackFrame::STUB));
712 PushFixedFrame(r11);
713 // Adjust FP to point to saved FP.
714 addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
715 #if V8_OOL_CONSTANT_POOL
716 // ip contains prologue address
717 LoadConstantPoolPointerRegister(CAN_USE_IP, -prologue_offset);
718 set_ool_constant_pool_available(true);
719 #endif
720 }
721
722
723 void MacroAssembler::Prologue(bool code_pre_aging, int prologue_offset) {
724 {
725 PredictableCodeSizeScope predictible_code_size_scope(
726 this, kNoCodeAgeSequenceLength);
727 Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
728 // The following instructions must remain together and unmodified
729 // for code aging to work properly.
730 if (code_pre_aging) {
731 // Pre-age the code.
732 // This matches the code found in PatchPlatformCodeAge()
733 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
734 intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
735 // Don't use Call -- we need to preserve ip and lr
736 nop(); // marker to detect sequence (see IsOld)
737 mov(r3, Operand(target));
738 Jump(r3);
739 for (int i = 0; i < kCodeAgingSequenceNops; i++) {
740 nop();
741 }
742 } else {
743 // This matches the code found in GetNoCodeAgeSequence()
744 PushFixedFrame(r4);
745 // Adjust fp to point to saved fp.
746 addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
747 for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
748 nop();
749 }
750 }
751 }
752 #if V8_OOL_CONSTANT_POOL
753 // ip contains prologue address
754 LoadConstantPoolPointerRegister(CAN_USE_IP, -prologue_offset);
755 set_ool_constant_pool_available(true);
756 #endif
757 }
758
759
760 void MacroAssembler::EnterFrame(StackFrame::Type type,
761 bool load_constant_pool_pointer_reg) {
762 if (FLAG_enable_ool_constant_pool && load_constant_pool_pointer_reg) {
763 PushFixedFrame();
764 #if V8_OOL_CONSTANT_POOL
765 // This path should not rely on ip containing code entry.
766 LoadConstantPoolPointerRegister(CONSTRUCT_INTERNAL_REFERENCE);
767 #endif
768 LoadSmiLiteral(ip, Smi::FromInt(type));
769 push(ip);
770 } else {
771 LoadSmiLiteral(ip, Smi::FromInt(type));
772 PushFixedFrame(ip);
773 }
774 // Adjust FP to point to saved FP.
775 addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
776
777 mov(r0, Operand(CodeObject()));
778 push(r0);
779 }
780
781
782 int MacroAssembler::LeaveFrame(StackFrame::Type type, int stack_adjustment) {
783 #if V8_OOL_CONSTANT_POOL
784 ConstantPoolUnavailableScope constant_pool_unavailable(this);
785 #endif
786 // r3: preserved
787 // r4: preserved
788 // r5: preserved
789
790 // Drop the execution stack down to the frame pointer and restore
791 // the caller frame pointer, return address and constant pool pointer.
792 int frame_ends;
793 LoadP(r0, MemOperand(fp, StandardFrameConstants::kCallerPCOffset));
794 LoadP(ip, MemOperand(fp, StandardFrameConstants::kCallerFPOffset));
795 #if V8_OOL_CONSTANT_POOL
796 const int exitOffset = ExitFrameConstants::kConstantPoolOffset;
797 const int standardOffset = StandardFrameConstants::kConstantPoolOffset;
798 const int offset = ((type == StackFrame::EXIT) ? exitOffset : standardOffset);
799 LoadP(kConstantPoolRegister, MemOperand(fp, offset));
800 #endif
801 mtlr(r0);
802 frame_ends = pc_offset();
803 Add(sp, fp, StandardFrameConstants::kCallerSPOffset + stack_adjustment, r0);
804 mr(fp, ip);
805 return frame_ends;
806 }
807
808
809 // ExitFrame layout (probably wrongish.. needs updating)
810 //
811 // SP -> previousSP
812 // LK reserved
813 // code
814 // sp_on_exit (for debug?)
815 // oldSP->prev SP
816 // LK
817 // <parameters on stack>
818
819 // Prior to calling EnterExitFrame, we've got a bunch of parameters
820 // on the stack that we need to wrap a real frame around.. so first
821 // we reserve a slot for LK and push the previous SP which is captured
822 // in the fp register (r31)
823 // Then - we buy a new frame
824
825 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
826 // Set up the frame structure on the stack.
827 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
828 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
829 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
830 DCHECK(stack_space > 0);
831
832 // This is an opportunity to build a frame to wrap
833 // all of the pushes that have happened inside of V8
834 // since we were called from C code
835
836 // replicate ARM frame - TODO make this more closely follow PPC ABI
837 mflr(r0);
838 Push(r0, fp);
839 mr(fp, sp);
840 // Reserve room for saved entry sp and code object.
841 subi(sp, sp, Operand(ExitFrameConstants::kFrameSize));
842
843 if (emit_debug_code()) {
844 li(r8, Operand::Zero());
845 StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
846 }
847 #if V8_OOL_CONSTANT_POOL
848 StoreP(kConstantPoolRegister,
849 MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
850 #endif
851 mov(r8, Operand(CodeObject()));
852 StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
853
854 // Save the frame pointer and the context in top.
855 mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
856 StoreP(fp, MemOperand(r8));
857 mov(r8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
858 StoreP(cp, MemOperand(r8));
859
860 // Optionally save all volatile double registers.
861 if (save_doubles) {
862 SaveFPRegs(sp, 0, DoubleRegister::kNumVolatileRegisters);
863 // Note that d0 will be accessible at
864 // fp - ExitFrameConstants::kFrameSize -
865 // kNumVolatileRegisters * kDoubleSize,
866 // since the sp slot and code slot were pushed after the fp.
867 }
868
869 addi(sp, sp, Operand(-stack_space * kPointerSize));
870
871 // Allocate and align the frame preparing for calling the runtime
872 // function.
873 const int frame_alignment = ActivationFrameAlignment();
874 if (frame_alignment > kPointerSize) {
875 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
876 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
877 }
878 li(r0, Operand::Zero());
879 StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
880
881 // Set the exit frame sp value to point just before the return address
882 // location.
883 addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
884 StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
885 }
886
887
888 void MacroAssembler::InitializeNewString(Register string, Register length,
889 Heap::RootListIndex map_index,
890 Register scratch1, Register scratch2) {
891 SmiTag(scratch1, length);
892 LoadRoot(scratch2, map_index);
893 StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset), r0);
894 li(scratch1, Operand(String::kEmptyHashField));
895 StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset), r0);
896 StoreP(scratch1, FieldMemOperand(string, String::kHashFieldSlot), r0);
897 }
898
899
900 int MacroAssembler::ActivationFrameAlignment() {
901 #if !defined(USE_SIMULATOR)
902 // Running on the real platform. Use the alignment as mandated by the local
903 // environment.
904 // Note: This will break if we ever start generating snapshots on one PPC
905 // platform for another PPC platform with a different alignment.
906 return base::OS::ActivationFrameAlignment();
907 #else // Simulated
908 // If we are using the simulator then we should always align to the expected
909 // alignment. As the simulator is used to generate snapshots we do not know
910 // if the target platform will need alignment, so this is controlled from a
911 // flag.
912 return FLAG_sim_stack_alignment;
913 #endif
914 }
915
916
917 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
918 bool restore_context) {
919 #if V8_OOL_CONSTANT_POOL
920 ConstantPoolUnavailableScope constant_pool_unavailable(this);
921 #endif
922 // Optionally restore all double registers.
923 if (save_doubles) {
924 // Calculate the stack location of the saved doubles and restore them.
925 const int kNumRegs = DoubleRegister::kNumVolatileRegisters;
926 const int offset =
927 (ExitFrameConstants::kFrameSize + kNumRegs * kDoubleSize);
928 addi(r6, fp, Operand(-offset));
929 RestoreFPRegs(r6, 0, kNumRegs);
930 }
931
932 // Clear top frame.
933 li(r6, Operand::Zero());
934 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
935 StoreP(r6, MemOperand(ip));
936
937 // Restore current context from top and clear it in debug mode.
938 if (restore_context) {
939 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
940 LoadP(cp, MemOperand(ip));
941 }
942 #ifdef DEBUG
943 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
944 StoreP(r6, MemOperand(ip));
945 #endif
946
947 // Tear down the exit frame, pop the arguments, and return.
948 LeaveFrame(StackFrame::EXIT);
949
950 if (argument_count.is_valid()) {
951 ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2));
952 add(sp, sp, argument_count);
953 }
954 }
955
956
957 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
958 Move(dst, d1);
959 }
960
961
962 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
963 Move(dst, d1);
964 }
965
966
967 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
968 const ParameterCount& actual,
969 Handle<Code> code_constant,
970 Register code_reg, Label* done,
971 bool* definitely_mismatches,
972 InvokeFlag flag,
973 const CallWrapper& call_wrapper) {
974 bool definitely_matches = false;
975 *definitely_mismatches = false;
976 Label regular_invoke;
977
978 // Check whether the expected and actual arguments count match. If not,
979 // setup registers according to contract with ArgumentsAdaptorTrampoline:
980 // r3: actual arguments count
981 // r4: function (passed through to callee)
982 // r5: expected arguments count
983
984 // The code below is made a lot easier because the calling code already sets
985 // up actual and expected registers according to the contract if values are
986 // passed in registers.
987
988 // ARM has some sanity checks as per below, considering add them for PPC
989 // DCHECK(actual.is_immediate() || actual.reg().is(r3));
990 // DCHECK(expected.is_immediate() || expected.reg().is(r5));
991 // DCHECK((!code_constant.is_null() && code_reg.is(no_reg))
992 // || code_reg.is(r6));
993
994 if (expected.is_immediate()) {
995 DCHECK(actual.is_immediate());
996 if (expected.immediate() == actual.immediate()) {
997 definitely_matches = true;
998 } else {
999 mov(r3, Operand(actual.immediate()));
1000 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
1001 if (expected.immediate() == sentinel) {
1002 // Don't worry about adapting arguments for builtins that
1003 // don't want that done. Skip adaption code by making it look
1004 // like we have a match between expected and actual number of
1005 // arguments.
1006 definitely_matches = true;
1007 } else {
1008 *definitely_mismatches = true;
1009 mov(r5, Operand(expected.immediate()));
1010 }
1011 }
1012 } else {
1013 if (actual.is_immediate()) {
1014 cmpi(expected.reg(), Operand(actual.immediate()));
1015 beq(&regular_invoke);
1016 mov(r3, Operand(actual.immediate()));
1017 } else {
1018 cmp(expected.reg(), actual.reg());
1019 beq(&regular_invoke);
1020 }
1021 }
1022
1023 if (!definitely_matches) {
1024 if (!code_constant.is_null()) {
1025 mov(r6, Operand(code_constant));
1026 addi(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
1027 }
1028
1029 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
1030 if (flag == CALL_FUNCTION) {
1031 call_wrapper.BeforeCall(CallSize(adaptor));
1032 Call(adaptor);
1033 call_wrapper.AfterCall();
1034 if (!*definitely_mismatches) {
1035 b(done);
1036 }
1037 } else {
1038 Jump(adaptor, RelocInfo::CODE_TARGET);
1039 }
1040 bind(&regular_invoke);
1041 }
1042 }
1043
1044
1045 void MacroAssembler::InvokeCode(Register code, const ParameterCount& expected,
1046 const ParameterCount& actual, InvokeFlag flag,
1047 const CallWrapper& call_wrapper) {
1048 // You can't call a function without a valid frame.
1049 DCHECK(flag == JUMP_FUNCTION || has_frame());
1050
1051 Label done;
1052 bool definitely_mismatches = false;
1053 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done,
1054 &definitely_mismatches, flag, call_wrapper);
1055 if (!definitely_mismatches) {
1056 if (flag == CALL_FUNCTION) {
1057 call_wrapper.BeforeCall(CallSize(code));
1058 CallJSEntry(code);
1059 call_wrapper.AfterCall();
1060 } else {
1061 DCHECK(flag == JUMP_FUNCTION);
1062 JumpToJSEntry(code);
1063 }
1064
1065 // Continue here if InvokePrologue does handle the invocation due to
1066 // mismatched parameter counts.
1067 bind(&done);
1068 }
1069 }
1070
1071
1072 void MacroAssembler::InvokeFunction(Register fun, const ParameterCount& actual,
1073 InvokeFlag flag,
1074 const CallWrapper& call_wrapper) {
1075 // You can't call a function without a valid frame.
1076 DCHECK(flag == JUMP_FUNCTION || has_frame());
1077
1078 // Contract with called JS functions requires that function is passed in r4.
1079 DCHECK(fun.is(r4));
1080
1081 Register expected_reg = r5;
1082 Register code_reg = ip;
1083
1084 LoadP(code_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
1085 LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1086 LoadWordArith(expected_reg,
1087 FieldMemOperand(
1088 code_reg, SharedFunctionInfo::kFormalParameterCountOffset));
1089 #if !defined(V8_TARGET_ARCH_PPC64)
1090 SmiUntag(expected_reg);
1091 #endif
1092 LoadP(code_reg, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
1093
1094 ParameterCount expected(expected_reg);
1095 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
1096 }
1097
1098
1099 void MacroAssembler::InvokeFunction(Register function,
1100 const ParameterCount& expected,
1101 const ParameterCount& actual,
1102 InvokeFlag flag,
1103 const CallWrapper& call_wrapper) {
1104 // You can't call a function without a valid frame.
1105 DCHECK(flag == JUMP_FUNCTION || has_frame());
1106
1107 // Contract with called JS functions requires that function is passed in r4.
1108 DCHECK(function.is(r4));
1109
1110 // Get the function and setup the context.
1111 LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1112
1113 // We call indirectly through the code field in the function to
1114 // allow recompilation to take effect without changing any of the
1115 // call sites.
1116 LoadP(ip, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
1117 InvokeCode(ip, expected, actual, flag, call_wrapper);
1118 }
1119
1120
1121 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1122 const ParameterCount& expected,
1123 const ParameterCount& actual,
1124 InvokeFlag flag,
1125 const CallWrapper& call_wrapper) {
1126 Move(r4, function);
1127 InvokeFunction(r4, expected, actual, flag, call_wrapper);
1128 }
1129
1130
1131 void MacroAssembler::IsObjectJSObjectType(Register heap_object, Register map,
1132 Register scratch, Label* fail) {
1133 LoadP(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
1134 IsInstanceJSObjectType(map, scratch, fail);
1135 }
1136
1137
1138 void MacroAssembler::IsInstanceJSObjectType(Register map, Register scratch,
1139 Label* fail) {
1140 lbz(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1141 cmpi(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1142 blt(fail);
1143 cmpi(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1144 bgt(fail);
1145 }
1146
1147
1148 void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
1149 Label* fail) {
1150 DCHECK(kNotStringTag != 0);
1151
1152 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1153 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1154 andi(r0, scratch, Operand(kIsNotStringMask));
1155 bne(fail, cr0);
1156 }
1157
1158
1159 void MacroAssembler::IsObjectNameType(Register object, Register scratch,
1160 Label* fail) {
1161 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1162 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1163 cmpi(scratch, Operand(LAST_NAME_TYPE));
1164 bgt(fail);
1165 }
1166
1167
1168 void MacroAssembler::DebugBreak() {
1169 li(r3, Operand::Zero());
1170 mov(r4, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1171 CEntryStub ces(isolate(), 1);
1172 DCHECK(AllowThisStubCall(&ces));
1173 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
1174 }
1175
1176
1177 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
1178 int handler_index) {
1179 // Adjust this code if not the case.
1180 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1181 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1182 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1183 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1184 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1185 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1186
1187 // For the JSEntry handler, we must preserve r1-r7, r0,r8-r15 are available.
1188 // We want the stack to look like
1189 // sp -> NextOffset
1190 // CodeObject
1191 // state
1192 // context
1193 // frame pointer
1194
1195 // Link the current handler as the next handler.
1196 mov(r8, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1197 LoadP(r0, MemOperand(r8));
1198 StorePU(r0, MemOperand(sp, -StackHandlerConstants::kSize));
1199 // Set this new handler as the current one.
1200 StoreP(sp, MemOperand(r8));
1201
1202 if (kind == StackHandler::JS_ENTRY) {
1203 li(r8, Operand::Zero()); // NULL frame pointer.
1204 StoreP(r8, MemOperand(sp, StackHandlerConstants::kFPOffset));
1205 LoadSmiLiteral(r8, Smi::FromInt(0)); // Indicates no context.
1206 StoreP(r8, MemOperand(sp, StackHandlerConstants::kContextOffset));
1207 } else {
1208 // still not sure if fp is right
1209 StoreP(fp, MemOperand(sp, StackHandlerConstants::kFPOffset));
1210 StoreP(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
1211 }
1212 unsigned state = StackHandler::IndexField::encode(handler_index) |
1213 StackHandler::KindField::encode(kind);
1214 LoadIntLiteral(r8, state);
1215 StoreP(r8, MemOperand(sp, StackHandlerConstants::kStateOffset));
1216 mov(r8, Operand(CodeObject()));
1217 StoreP(r8, MemOperand(sp, StackHandlerConstants::kCodeOffset));
1218 }
1219
1220
1221 void MacroAssembler::PopTryHandler() {
1222 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1223 pop(r4);
1224 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1225 addi(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1226 StoreP(r4, MemOperand(ip));
1227 }
1228
1229
1230 // PPC - make use of ip as a temporary register
1231 void MacroAssembler::JumpToHandlerEntry() {
1232 // Compute the handler entry address and jump to it. The handler table is
1233 // a fixed array of (smi-tagged) code offsets.
1234 // r3 = exception, r4 = code object, r5 = state.
1235 #if V8_OOL_CONSTANT_POOL
1236 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1237 LoadP(kConstantPoolRegister, FieldMemOperand(r4, Code::kConstantPoolOffset));
1238 #endif
1239 LoadP(r6, FieldMemOperand(r4, Code::kHandlerTableOffset)); // Handler table.
1240 addi(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1241 srwi(r5, r5, Operand(StackHandler::kKindWidth)); // Handler index.
1242 slwi(ip, r5, Operand(kPointerSizeLog2));
1243 add(ip, r6, ip);
1244 LoadP(r5, MemOperand(ip)); // Smi-tagged offset.
1245 addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
1246 SmiUntag(ip, r5);
1247 add(r0, r4, ip);
1248 mtctr(r0);
1249 bctr();
1250 }
1251
1252
1253 void MacroAssembler::Throw(Register value) {
1254 // Adjust this code if not the case.
1255 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1256 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1257 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1258 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1259 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1260 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1261 Label skip;
1262
1263 // The exception is expected in r3.
1264 if (!value.is(r3)) {
1265 mr(r3, value);
1266 }
1267 // Drop the stack pointer to the top of the top handler.
1268 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1269 LoadP(sp, MemOperand(r6));
1270 // Restore the next handler.
1271 pop(r5);
1272 StoreP(r5, MemOperand(r6));
1273
1274 // Get the code object (r4) and state (r5). Restore the context and frame
1275 // pointer.
1276 pop(r4);
1277 pop(r5);
1278 pop(cp);
1279 pop(fp);
1280
1281 // If the handler is a JS frame, restore the context to the frame.
1282 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
1283 // or cp.
1284 cmpi(cp, Operand::Zero());
1285 beq(&skip);
1286 StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1287 bind(&skip);
1288
1289 JumpToHandlerEntry();
1290 }
1291
1292
1293 void MacroAssembler::ThrowUncatchable(Register value) {
1294 // Adjust this code if not the case.
1295 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1296 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1297 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1298 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1299 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1300 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1301
1302 // The exception is expected in r3.
1303 if (!value.is(r3)) {
1304 mr(r3, value);
1305 }
1306 // Drop the stack pointer to the top of the top stack handler.
1307 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1308 LoadP(sp, MemOperand(r6));
1309
1310 // Unwind the handlers until the ENTRY handler is found.
1311 Label fetch_next, check_kind;
1312 b(&check_kind);
1313 bind(&fetch_next);
1314 LoadP(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
1315
1316 bind(&check_kind);
1317 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1318 LoadP(r5, MemOperand(sp, StackHandlerConstants::kStateOffset));
1319 andi(r0, r5, Operand(StackHandler::KindField::kMask));
1320 bne(&fetch_next, cr0);
1321
1322 // Set the top handler address to next handler past the top ENTRY handler.
1323 pop(r5);
1324 StoreP(r5, MemOperand(r6));
1325 // Get the code object (r4) and state (r5). Clear the context and frame
1326 // pointer (0 was saved in the handler).
1327 pop(r4);
1328 pop(r5);
1329 pop(cp);
1330 pop(fp);
1331
1332 JumpToHandlerEntry();
1333 }
1334
1335
1336 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1337 Register scratch, Label* miss) {
1338 Label same_contexts;
1339
1340 DCHECK(!holder_reg.is(scratch));
1341 DCHECK(!holder_reg.is(ip));
1342 DCHECK(!scratch.is(ip));
1343
1344 // Load current lexical context from the stack frame.
1345 LoadP(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1346 // In debug mode, make sure the lexical context is set.
1347 #ifdef DEBUG
1348 cmpi(scratch, Operand::Zero());
1349 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
1350 #endif
1351
1352 // Load the native context of the current context.
1353 int offset =
1354 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1355 LoadP(scratch, FieldMemOperand(scratch, offset));
1356 LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
1357
1358 // Check the context is a native context.
1359 if (emit_debug_code()) {
1360 // Cannot use ip as a temporary in this verification code. Due to the fact
1361 // that ip is clobbered as part of cmp with an object Operand.
1362 push(holder_reg); // Temporarily save holder on the stack.
1363 // Read the first word and compare to the native_context_map.
1364 LoadP(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1365 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1366 cmp(holder_reg, ip);
1367 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1368 pop(holder_reg); // Restore holder.
1369 }
1370
1371 // Check if both contexts are the same.
1372 LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1373 cmp(scratch, ip);
1374 beq(&same_contexts);
1375
1376 // Check the context is a native context.
1377 if (emit_debug_code()) {
1378 // Cannot use ip as a temporary in this verification code. Due to the fact
1379 // that ip is clobbered as part of cmp with an object Operand.
1380 push(holder_reg); // Temporarily save holder on the stack.
1381 mr(holder_reg, ip); // Move ip to its holding place.
1382 LoadRoot(ip, Heap::kNullValueRootIndex);
1383 cmp(holder_reg, ip);
1384 Check(ne, kJSGlobalProxyContextShouldNotBeNull);
1385
1386 LoadP(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1387 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1388 cmp(holder_reg, ip);
1389 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1390 // Restore ip is not needed. ip is reloaded below.
1391 pop(holder_reg); // Restore holder.
1392 // Restore ip to holder's context.
1393 LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1394 }
1395
1396 // Check that the security token in the calling global object is
1397 // compatible with the security token in the receiving global
1398 // object.
1399 int token_offset =
1400 Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
1401
1402 LoadP(scratch, FieldMemOperand(scratch, token_offset));
1403 LoadP(ip, FieldMemOperand(ip, token_offset));
1404 cmp(scratch, ip);
1405 bne(miss);
1406
1407 bind(&same_contexts);
1408 }
1409
1410
1411 // Compute the hash code from the untagged key. This must be kept in sync with
1412 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1413 // code-stub-hydrogen.cc
1414 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1415 // First of all we assign the hash seed to scratch.
1416 LoadRoot(scratch, Heap::kHashSeedRootIndex);
1417 SmiUntag(scratch);
1418
1419 // Xor original key with a seed.
1420 xor_(t0, t0, scratch);
1421
1422 // Compute the hash code from the untagged key. This must be kept in sync
1423 // with ComputeIntegerHash in utils.h.
1424 //
1425 // hash = ~hash + (hash << 15);
1426 notx(scratch, t0);
1427 slwi(t0, t0, Operand(15));
1428 add(t0, scratch, t0);
1429 // hash = hash ^ (hash >> 12);
1430 srwi(scratch, t0, Operand(12));
1431 xor_(t0, t0, scratch);
1432 // hash = hash + (hash << 2);
1433 slwi(scratch, t0, Operand(2));
1434 add(t0, t0, scratch);
1435 // hash = hash ^ (hash >> 4);
1436 srwi(scratch, t0, Operand(4));
1437 xor_(t0, t0, scratch);
1438 // hash = hash * 2057;
1439 mr(r0, t0);
1440 slwi(scratch, t0, Operand(3));
1441 add(t0, t0, scratch);
1442 slwi(scratch, r0, Operand(11));
1443 add(t0, t0, scratch);
1444 // hash = hash ^ (hash >> 16);
1445 srwi(scratch, t0, Operand(16));
1446 xor_(t0, t0, scratch);
1447 }
1448
1449
1450 void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
1451 Register key, Register result,
1452 Register t0, Register t1,
1453 Register t2) {
1454 // Register use:
1455 //
1456 // elements - holds the slow-case elements of the receiver on entry.
1457 // Unchanged unless 'result' is the same register.
1458 //
1459 // key - holds the smi key on entry.
1460 // Unchanged unless 'result' is the same register.
1461 //
1462 // result - holds the result on exit if the load succeeded.
1463 // Allowed to be the same as 'key' or 'result'.
1464 // Unchanged on bailout so 'key' or 'result' can be used
1465 // in further computation.
1466 //
1467 // Scratch registers:
1468 //
1469 // t0 - holds the untagged key on entry and holds the hash once computed.
1470 //
1471 // t1 - used to hold the capacity mask of the dictionary
1472 //
1473 // t2 - used for the index into the dictionary.
1474 Label done;
1475
1476 GetNumberHash(t0, t1);
1477
1478 // Compute the capacity mask.
1479 LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1480 SmiUntag(t1);
1481 subi(t1, t1, Operand(1));
1482
1483 // Generate an unrolled loop that performs a few probes before giving up.
1484 for (int i = 0; i < kNumberDictionaryProbes; i++) {
1485 // Use t2 for index calculations and keep the hash intact in t0.
1486 mr(t2, t0);
1487 // Compute the masked index: (hash + i + i * i) & mask.
1488 if (i > 0) {
1489 addi(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1490 }
1491 and_(t2, t2, t1);
1492
1493 // Scale the index by multiplying by the element size.
1494 DCHECK(SeededNumberDictionary::kEntrySize == 3);
1495 slwi(ip, t2, Operand(1));
1496 add(t2, t2, ip); // t2 = t2 * 3
1497
1498 // Check if the key is identical to the name.
1499 slwi(t2, t2, Operand(kPointerSizeLog2));
1500 add(t2, elements, t2);
1501 LoadP(ip,
1502 FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
1503 cmp(key, ip);
1504 if (i != kNumberDictionaryProbes - 1) {
1505 beq(&done);
1506 } else {
1507 bne(miss);
1508 }
1509 }
1510
1511 bind(&done);
1512 // Check that the value is a normal property.
1513 // t2: elements + (index * kPointerSize)
1514 const int kDetailsOffset =
1515 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1516 LoadP(t1, FieldMemOperand(t2, kDetailsOffset));
1517 LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask));
1518 and_(r0, t1, ip, SetRC);
1519 bne(miss, cr0);
1520
1521 // Get the value at the masked, scaled index and return.
1522 const int kValueOffset =
1523 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1524 LoadP(result, FieldMemOperand(t2, kValueOffset));
1525 }
1526
1527
1528 void MacroAssembler::Allocate(int object_size, Register result,
1529 Register scratch1, Register scratch2,
1530 Label* gc_required, AllocationFlags flags) {
1531 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
1532 if (!FLAG_inline_new) {
1533 if (emit_debug_code()) {
1534 // Trash the registers to simulate an allocation failure.
1535 li(result, Operand(0x7091));
1536 li(scratch1, Operand(0x7191));
1537 li(scratch2, Operand(0x7291));
1538 }
1539 b(gc_required);
1540 return;
1541 }
1542
1543 DCHECK(!result.is(scratch1));
1544 DCHECK(!result.is(scratch2));
1545 DCHECK(!scratch1.is(scratch2));
1546 DCHECK(!scratch1.is(ip));
1547 DCHECK(!scratch2.is(ip));
1548
1549 // Make object size into bytes.
1550 if ((flags & SIZE_IN_WORDS) != 0) {
1551 object_size *= kPointerSize;
1552 }
1553 DCHECK_EQ(0, static_cast<int>(object_size & kObjectAlignmentMask));
1554
1555 // Check relative positions of allocation top and limit addresses.
1556 ExternalReference allocation_top =
1557 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1558 ExternalReference allocation_limit =
1559 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1560
1561 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1562 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1563 DCHECK((limit - top) == kPointerSize);
1564
1565 // Set up allocation top address register.
1566 Register topaddr = scratch1;
1567 mov(topaddr, Operand(allocation_top));
1568
1569 // This code stores a temporary value in ip. This is OK, as the code below
1570 // does not need ip for implicit literal generation.
1571 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1572 // Load allocation top into result and allocation limit into ip.
1573 LoadP(result, MemOperand(topaddr));
1574 LoadP(ip, MemOperand(topaddr, kPointerSize));
1575 } else {
1576 if (emit_debug_code()) {
1577 // Assert that result actually contains top on entry. ip is used
1578 // immediately below so this use of ip does not cause difference with
1579 // respect to register content between debug and release mode.
1580 LoadP(ip, MemOperand(topaddr));
1581 cmp(result, ip);
1582 Check(eq, kUnexpectedAllocationTop);
1583 }
1584 // Load allocation limit into ip. Result already contains allocation top.
1585 LoadP(ip, MemOperand(topaddr, limit - top), r0);
1586 }
1587
1588 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1589 // Align the next allocation. Storing the filler map without checking top is
1590 // safe in new-space because the limit of the heap is aligned there.
1591 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1592 #if V8_TARGET_ARCH_PPC64
1593 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1594 #else
1595 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1596 andi(scratch2, result, Operand(kDoubleAlignmentMask));
1597 Label aligned;
1598 beq(&aligned, cr0);
1599 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1600 cmpl(result, ip);
1601 bge(gc_required);
1602 }
1603 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1604 stw(scratch2, MemOperand(result));
1605 addi(result, result, Operand(kDoubleSize / 2));
1606 bind(&aligned);
1607 #endif
1608 }
1609
1610 // Calculate new top and bail out if new space is exhausted. Use result
1611 // to calculate the new top.
1612 sub(r0, ip, result);
1613 if (is_int16(object_size)) {
1614 cmpi(r0, Operand(object_size));
1615 blt(gc_required);
1616 addi(scratch2, result, Operand(object_size));
1617 } else {
1618 Cmpi(r0, Operand(object_size), scratch2);
1619 blt(gc_required);
1620 add(scratch2, result, scratch2);
1621 }
1622 StoreP(scratch2, MemOperand(topaddr));
1623
1624 // Tag object if requested.
1625 if ((flags & TAG_OBJECT) != 0) {
1626 addi(result, result, Operand(kHeapObjectTag));
1627 }
1628 }
1629
1630
1631 void MacroAssembler::Allocate(Register object_size, Register result,
1632 Register scratch1, Register scratch2,
1633 Label* gc_required, AllocationFlags flags) {
1634 if (!FLAG_inline_new) {
1635 if (emit_debug_code()) {
1636 // Trash the registers to simulate an allocation failure.
1637 li(result, Operand(0x7091));
1638 li(scratch1, Operand(0x7191));
1639 li(scratch2, Operand(0x7291));
1640 }
1641 b(gc_required);
1642 return;
1643 }
1644
1645 // Assert that the register arguments are different and that none of
1646 // them are ip. ip is used explicitly in the code generated below.
1647 DCHECK(!result.is(scratch1));
1648 DCHECK(!result.is(scratch2));
1649 DCHECK(!scratch1.is(scratch2));
1650 DCHECK(!object_size.is(ip));
1651 DCHECK(!result.is(ip));
1652 DCHECK(!scratch1.is(ip));
1653 DCHECK(!scratch2.is(ip));
1654
1655 // Check relative positions of allocation top and limit addresses.
1656 ExternalReference allocation_top =
1657 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1658 ExternalReference allocation_limit =
1659 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1660 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1661 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1662 DCHECK((limit - top) == kPointerSize);
1663
1664 // Set up allocation top address.
1665 Register topaddr = scratch1;
1666 mov(topaddr, Operand(allocation_top));
1667
1668 // This code stores a temporary value in ip. This is OK, as the code below
1669 // does not need ip for implicit literal generation.
1670 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1671 // Load allocation top into result and allocation limit into ip.
1672 LoadP(result, MemOperand(topaddr));
1673 LoadP(ip, MemOperand(topaddr, kPointerSize));
1674 } else {
1675 if (emit_debug_code()) {
1676 // Assert that result actually contains top on entry. ip is used
1677 // immediately below so this use of ip does not cause difference with
1678 // respect to register content between debug and release mode.
1679 LoadP(ip, MemOperand(topaddr));
1680 cmp(result, ip);
1681 Check(eq, kUnexpectedAllocationTop);
1682 }
1683 // Load allocation limit into ip. Result already contains allocation top.
1684 LoadP(ip, MemOperand(topaddr, limit - top));
1685 }
1686
1687 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1688 // Align the next allocation. Storing the filler map without checking top is
1689 // safe in new-space because the limit of the heap is aligned there.
1690 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1691 #if V8_TARGET_ARCH_PPC64
1692 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1693 #else
1694 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1695 andi(scratch2, result, Operand(kDoubleAlignmentMask));
1696 Label aligned;
1697 beq(&aligned, cr0);
1698 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1699 cmpl(result, ip);
1700 bge(gc_required);
1701 }
1702 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1703 stw(scratch2, MemOperand(result));
1704 addi(result, result, Operand(kDoubleSize / 2));
1705 bind(&aligned);
1706 #endif
1707 }
1708
1709 // Calculate new top and bail out if new space is exhausted. Use result
1710 // to calculate the new top. Object size may be in words so a shift is
1711 // required to get the number of bytes.
1712 sub(r0, ip, result);
1713 if ((flags & SIZE_IN_WORDS) != 0) {
1714 ShiftLeftImm(scratch2, object_size, Operand(kPointerSizeLog2));
1715 cmp(r0, scratch2);
1716 blt(gc_required);
1717 add(scratch2, result, scratch2);
1718 } else {
1719 cmp(r0, object_size);
1720 blt(gc_required);
1721 add(scratch2, result, object_size);
1722 }
1723
1724 // Update allocation top. result temporarily holds the new top.
1725 if (emit_debug_code()) {
1726 andi(r0, scratch2, Operand(kObjectAlignmentMask));
1727 Check(eq, kUnalignedAllocationInNewSpace, cr0);
1728 }
1729 StoreP(scratch2, MemOperand(topaddr));
1730
1731 // Tag object if requested.
1732 if ((flags & TAG_OBJECT) != 0) {
1733 addi(result, result, Operand(kHeapObjectTag));
1734 }
1735 }
1736
1737
1738 void MacroAssembler::UndoAllocationInNewSpace(Register object,
1739 Register scratch) {
1740 ExternalReference new_space_allocation_top =
1741 ExternalReference::new_space_allocation_top_address(isolate());
1742
1743 // Make sure the object has no tag before resetting top.
1744 mov(r0, Operand(~kHeapObjectTagMask));
1745 and_(object, object, r0);
1746 // was.. and_(object, object, Operand(~kHeapObjectTagMask));
1747 #ifdef DEBUG
1748 // Check that the object un-allocated is below the current top.
1749 mov(scratch, Operand(new_space_allocation_top));
1750 LoadP(scratch, MemOperand(scratch));
1751 cmp(object, scratch);
1752 Check(lt, kUndoAllocationOfNonAllocatedMemory);
1753 #endif
1754 // Write the address of the object to un-allocate as the current top.
1755 mov(scratch, Operand(new_space_allocation_top));
1756 StoreP(object, MemOperand(scratch));
1757 }
1758
1759
1760 void MacroAssembler::AllocateTwoByteString(Register result, Register length,
1761 Register scratch1, Register scratch2,
1762 Register scratch3,
1763 Label* gc_required) {
1764 // Calculate the number of bytes needed for the characters in the string while
1765 // observing object alignment.
1766 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1767 slwi(scratch1, length, Operand(1)); // Length in bytes, not chars.
1768 addi(scratch1, scratch1,
1769 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1770 mov(r0, Operand(~kObjectAlignmentMask));
1771 and_(scratch1, scratch1, r0);
1772
1773 // Allocate two-byte string in new space.
1774 Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
1775
1776 // Set the map, length and hash field.
1777 InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
1778 scratch2);
1779 }
1780
1781
1782 void MacroAssembler::AllocateOneByteString(Register result, Register length,
1783 Register scratch1, Register scratch2,
1784 Register scratch3,
1785 Label* gc_required) {
1786 // Calculate the number of bytes needed for the characters in the string while
1787 // observing object alignment.
1788 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1789 DCHECK(kCharSize == 1);
1790 addi(scratch1, length,
1791 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
1792 li(r0, Operand(~kObjectAlignmentMask));
1793 and_(scratch1, scratch1, r0);
1794
1795 // Allocate one-byte string in new space.
1796 Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
1797
1798 // Set the map, length and hash field.
1799 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
1800 scratch1, scratch2);
1801 }
1802
1803
1804 void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
1805 Register scratch1,
1806 Register scratch2,
1807 Label* gc_required) {
1808 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1809 TAG_OBJECT);
1810
1811 InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
1812 scratch2);
1813 }
1814
1815
1816 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
1817 Register scratch1,
1818 Register scratch2,
1819 Label* gc_required) {
1820 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1821 TAG_OBJECT);
1822
1823 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
1824 scratch1, scratch2);
1825 }
1826
1827
1828 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
1829 Register length,
1830 Register scratch1,
1831 Register scratch2,
1832 Label* gc_required) {
1833 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1834 TAG_OBJECT);
1835
1836 InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
1837 scratch2);
1838 }
1839
1840
1841 void MacroAssembler::AllocateOneByteSlicedString(Register result,
1842 Register length,
1843 Register scratch1,
1844 Register scratch2,
1845 Label* gc_required) {
1846 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1847 TAG_OBJECT);
1848
1849 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
1850 scratch1, scratch2);
1851 }
1852
1853
1854 void MacroAssembler::CompareObjectType(Register object, Register map,
1855 Register type_reg, InstanceType type) {
1856 const Register temp = type_reg.is(no_reg) ? r0 : type_reg;
1857
1858 LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
1859 CompareInstanceType(map, temp, type);
1860 }
1861
1862
1863 void MacroAssembler::CheckObjectTypeRange(Register object, Register map,
1864 InstanceType min_type,
1865 InstanceType max_type,
1866 Label* false_label) {
1867 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1868 STATIC_ASSERT(LAST_TYPE < 256);
1869 LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
1870 lbz(ip, FieldMemOperand(map, Map::kInstanceTypeOffset));
1871 subi(ip, ip, Operand(min_type));
1872 cmpli(ip, Operand(max_type - min_type));
1873 bgt(false_label);
1874 }
1875
1876
1877 void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
1878 InstanceType type) {
1879 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1880 STATIC_ASSERT(LAST_TYPE < 256);
1881 lbz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1882 cmpi(type_reg, Operand(type));
1883 }
1884
1885
1886 void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
1887 DCHECK(!obj.is(r0));
1888 LoadRoot(r0, index);
1889 cmp(obj, r0);
1890 }
1891
1892
1893 void MacroAssembler::CheckFastElements(Register map, Register scratch,
1894 Label* fail) {
1895 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
1896 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
1897 STATIC_ASSERT(FAST_ELEMENTS == 2);
1898 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
1899 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1900 STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000);
1901 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
1902 bgt(fail);
1903 }
1904
1905
1906 void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
1907 Label* fail) {
1908 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
1909 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
1910 STATIC_ASSERT(FAST_ELEMENTS == 2);
1911 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
1912 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1913 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
1914 ble(fail);
1915 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
1916 bgt(fail);
1917 }
1918
1919
1920 void MacroAssembler::CheckFastSmiElements(Register map, Register scratch,
1921 Label* fail) {
1922 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
1923 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
1924 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1925 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
1926 bgt(fail);
1927 }
1928
1929
1930 void MacroAssembler::StoreNumberToDoubleElements(
1931 Register value_reg, Register key_reg, Register elements_reg,
1932 Register scratch1, DoubleRegister double_scratch, Label* fail,
1933 int elements_offset) {
1934 Label smi_value, store;
1935
1936 // Handle smi values specially.
1937 JumpIfSmi(value_reg, &smi_value);
1938
1939 // Ensure that the object is a heap number
1940 CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
1941 DONT_DO_SMI_CHECK);
1942
1943 lfd(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
1944 // Force a canonical NaN.
1945 CanonicalizeNaN(double_scratch);
1946 b(&store);
1947
1948 bind(&smi_value);
1949 SmiToDouble(double_scratch, value_reg);
1950
1951 bind(&store);
1952 SmiToDoubleArrayOffset(scratch1, key_reg);
1953 add(scratch1, elements_reg, scratch1);
1954 stfd(double_scratch, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize -
1955 elements_offset));
1956 }
1957
1958
1959 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
1960 Register right,
1961 Register overflow_dst,
1962 Register scratch) {
1963 DCHECK(!dst.is(overflow_dst));
1964 DCHECK(!dst.is(scratch));
1965 DCHECK(!overflow_dst.is(scratch));
1966 DCHECK(!overflow_dst.is(left));
1967 DCHECK(!overflow_dst.is(right));
1968
1969 // C = A+B; C overflows if A/B have same sign and C has diff sign than A
1970 if (dst.is(left)) {
1971 mr(scratch, left); // Preserve left.
1972 add(dst, left, right); // Left is overwritten.
1973 xor_(scratch, dst, scratch); // Original left.
1974 xor_(overflow_dst, dst, right);
1975 } else if (dst.is(right)) {
1976 mr(scratch, right); // Preserve right.
1977 add(dst, left, right); // Right is overwritten.
1978 xor_(scratch, dst, scratch); // Original right.
1979 xor_(overflow_dst, dst, left);
1980 } else {
1981 add(dst, left, right);
1982 xor_(overflow_dst, dst, left);
1983 xor_(scratch, dst, right);
1984 }
1985 and_(overflow_dst, scratch, overflow_dst, SetRC);
1986 }
1987
1988
1989 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
1990 intptr_t right,
1991 Register overflow_dst,
1992 Register scratch) {
1993 Register original_left = left;
1994 DCHECK(!dst.is(overflow_dst));
1995 DCHECK(!dst.is(scratch));
1996 DCHECK(!overflow_dst.is(scratch));
1997 DCHECK(!overflow_dst.is(left));
1998
1999 // C = A+B; C overflows if A/B have same sign and C has diff sign than A
2000 if (dst.is(left)) {
2001 // Preserve left.
2002 original_left = overflow_dst;
2003 mr(original_left, left);
2004 }
2005 Add(dst, left, right, scratch);
2006 xor_(overflow_dst, dst, original_left);
2007 if (right >= 0) {
2008 and_(overflow_dst, overflow_dst, dst, SetRC);
2009 } else {
2010 andc(overflow_dst, overflow_dst, dst, SetRC);
2011 }
2012 }
2013
2014
2015 void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left,
2016 Register right,
2017 Register overflow_dst,
2018 Register scratch) {
2019 DCHECK(!dst.is(overflow_dst));
2020 DCHECK(!dst.is(scratch));
2021 DCHECK(!overflow_dst.is(scratch));
2022 DCHECK(!overflow_dst.is(left));
2023 DCHECK(!overflow_dst.is(right));
2024
2025 // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
2026 if (dst.is(left)) {
2027 mr(scratch, left); // Preserve left.
2028 sub(dst, left, right); // Left is overwritten.
2029 xor_(overflow_dst, dst, scratch);
2030 xor_(scratch, scratch, right);
2031 and_(overflow_dst, overflow_dst, scratch, SetRC);
2032 } else if (dst.is(right)) {
2033 mr(scratch, right); // Preserve right.
2034 sub(dst, left, right); // Right is overwritten.
2035 xor_(overflow_dst, dst, left);
2036 xor_(scratch, left, scratch);
2037 and_(overflow_dst, overflow_dst, scratch, SetRC);
2038 } else {
2039 sub(dst, left, right);
2040 xor_(overflow_dst, dst, left);
2041 xor_(scratch, left, right);
2042 and_(overflow_dst, scratch, overflow_dst, SetRC);
2043 }
2044 }
2045
2046
2047 void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
2048 Label* early_success) {
2049 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2050 CompareMap(scratch, map, early_success);
2051 }
2052
2053
2054 void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map,
2055 Label* early_success) {
2056 mov(r0, Operand(map));
2057 cmp(obj_map, r0);
2058 }
2059
2060
2061 void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map,
2062 Label* fail, SmiCheckType smi_check_type) {
2063 if (smi_check_type == DO_SMI_CHECK) {
2064 JumpIfSmi(obj, fail);
2065 }
2066
2067 Label success;
2068 CompareMap(obj, scratch, map, &success);
2069 bne(fail);
2070 bind(&success);
2071 }
2072
2073
2074 void MacroAssembler::CheckMap(Register obj, Register scratch,
2075 Heap::RootListIndex index, Label* fail,
2076 SmiCheckType smi_check_type) {
2077 if (smi_check_type == DO_SMI_CHECK) {
2078 JumpIfSmi(obj, fail);
2079 }
2080 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2081 LoadRoot(r0, index);
2082 cmp(scratch, r0);
2083 bne(fail);
2084 }
2085
2086
2087 void MacroAssembler::DispatchMap(Register obj, Register scratch,
2088 Handle<Map> map, Handle<Code> success,
2089 SmiCheckType smi_check_type) {
2090 Label fail;
2091 if (smi_check_type == DO_SMI_CHECK) {
2092 JumpIfSmi(obj, &fail);
2093 }
2094 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2095 mov(r0, Operand(map));
2096 cmp(scratch, r0);
2097 bne(&fail);
2098 Jump(success, RelocInfo::CODE_TARGET, al);
2099 bind(&fail);
2100 }
2101
2102
2103 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
2104 Register scratch, Label* miss,
2105 bool miss_on_bound_function) {
2106 Label non_instance;
2107 if (miss_on_bound_function) {
2108 // Check that the receiver isn't a smi.
2109 JumpIfSmi(function, miss);
2110
2111 // Check that the function really is a function. Load map into result reg.
2112 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
2113 bne(miss);
2114
2115 LoadP(scratch,
2116 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2117 lwz(scratch,
2118 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2119 TestBit(scratch,
2120 #if V8_TARGET_ARCH_PPC64
2121 SharedFunctionInfo::kBoundFunction,
2122 #else
2123 SharedFunctionInfo::kBoundFunction + kSmiTagSize,
2124 #endif
2125 r0);
2126 bne(miss, cr0);
2127
2128 // Make sure that the function has an instance prototype.
2129 lbz(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2130 andi(r0, scratch, Operand(1 << Map::kHasNonInstancePrototype));
2131 bne(&non_instance, cr0);
2132 }
2133
2134 // Get the prototype or initial map from the function.
2135 LoadP(result,
2136 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2137
2138 // If the prototype or initial map is the hole, don't return it and
2139 // simply miss the cache instead. This will allow us to allocate a
2140 // prototype object on-demand in the runtime system.
2141 LoadRoot(r0, Heap::kTheHoleValueRootIndex);
2142 cmp(result, r0);
2143 beq(miss);
2144
2145 // If the function does not have an initial map, we're done.
2146 Label done;
2147 CompareObjectType(result, scratch, scratch, MAP_TYPE);
2148 bne(&done);
2149
2150 // Get the prototype from the initial map.
2151 LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
2152
2153 if (miss_on_bound_function) {
2154 b(&done);
2155
2156 // Non-instance prototype: Fetch prototype from constructor field
2157 // in initial map.
2158 bind(&non_instance);
2159 LoadP(result, FieldMemOperand(result, Map::kConstructorOffset));
2160 }
2161
2162 // All done.
2163 bind(&done);
2164 }
2165
2166
2167 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
2168 Condition cond) {
2169 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
2170 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
2171 }
2172
2173
2174 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2175 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2176 }
2177
2178
2179 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
2180 return ref0.address() - ref1.address();
2181 }
2182
2183
2184 void MacroAssembler::CallApiFunctionAndReturn(
2185 Register function_address, ExternalReference thunk_ref, int stack_space,
2186 MemOperand return_value_operand, MemOperand* context_restore_operand) {
2187 ExternalReference next_address =
2188 ExternalReference::handle_scope_next_address(isolate());
2189 const int kNextOffset = 0;
2190 const int kLimitOffset = AddressOffset(
2191 ExternalReference::handle_scope_limit_address(isolate()), next_address);
2192 const int kLevelOffset = AddressOffset(
2193 ExternalReference::handle_scope_level_address(isolate()), next_address);
2194
2195 DCHECK(function_address.is(r4) || function_address.is(r5));
2196 Register scratch = r6;
2197
2198 Label profiler_disabled;
2199 Label end_profiler_check;
2200 mov(scratch, Operand(ExternalReference::is_profiling_address(isolate())));
2201 lbz(scratch, MemOperand(scratch, 0));
2202 cmpi(scratch, Operand::Zero());
2203 beq(&profiler_disabled);
2204
2205 // Additional parameter is the address of the actual callback.
2206 mov(scratch, Operand(thunk_ref));
2207 jmp(&end_profiler_check);
2208
2209 bind(&profiler_disabled);
2210 mr(scratch, function_address);
2211 bind(&end_profiler_check);
2212
2213 // Allocate HandleScope in callee-save registers.
2214 // r17 - next_address
2215 // r14 - next_address->kNextOffset
2216 // r15 - next_address->kLimitOffset
2217 // r16 - next_address->kLevelOffset
2218 mov(r17, Operand(next_address));
2219 LoadP(r14, MemOperand(r17, kNextOffset));
2220 LoadP(r15, MemOperand(r17, kLimitOffset));
2221 lwz(r16, MemOperand(r17, kLevelOffset));
2222 addi(r16, r16, Operand(1));
2223 stw(r16, MemOperand(r17, kLevelOffset));
2224
2225 if (FLAG_log_timer_events) {
2226 FrameScope frame(this, StackFrame::MANUAL);
2227 PushSafepointRegisters();
2228 PrepareCallCFunction(1, r3);
2229 mov(r3, Operand(ExternalReference::isolate_address(isolate())));
2230 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
2231 PopSafepointRegisters();
2232 }
2233
2234 // Native call returns to the DirectCEntry stub which redirects to the
2235 // return address pushed on stack (could have moved after GC).
2236 // DirectCEntry stub itself is generated early and never moves.
2237 DirectCEntryStub stub(isolate());
2238 stub.GenerateCall(this, scratch);
2239
2240 if (FLAG_log_timer_events) {
2241 FrameScope frame(this, StackFrame::MANUAL);
2242 PushSafepointRegisters();
2243 PrepareCallCFunction(1, r3);
2244 mov(r3, Operand(ExternalReference::isolate_address(isolate())));
2245 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
2246 PopSafepointRegisters();
2247 }
2248
2249 Label promote_scheduled_exception;
2250 Label exception_handled;
2251 Label delete_allocated_handles;
2252 Label leave_exit_frame;
2253 Label return_value_loaded;
2254
2255 // load value from ReturnValue
2256 LoadP(r3, return_value_operand);
2257 bind(&return_value_loaded);
2258 // No more valid handles (the result handle was the last one). Restore
2259 // previous handle scope.
2260 StoreP(r14, MemOperand(r17, kNextOffset));
2261 if (emit_debug_code()) {
2262 lwz(r4, MemOperand(r17, kLevelOffset));
2263 cmp(r4, r16);
2264 Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
2265 }
2266 subi(r16, r16, Operand(1));
2267 stw(r16, MemOperand(r17, kLevelOffset));
2268 LoadP(r0, MemOperand(r17, kLimitOffset));
2269 cmp(r15, r0);
2270 bne(&delete_allocated_handles);
2271
2272 // Check if the function scheduled an exception.
2273 bind(&leave_exit_frame);
2274 LoadRoot(r14, Heap::kTheHoleValueRootIndex);
2275 mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate())));
2276 LoadP(r15, MemOperand(r15));
2277 cmp(r14, r15);
2278 bne(&promote_scheduled_exception);
2279 bind(&exception_handled);
2280
2281 bool restore_context = context_restore_operand != NULL;
2282 if (restore_context) {
2283 LoadP(cp, *context_restore_operand);
2284 }
2285 // LeaveExitFrame expects unwind space to be in a register.
2286 mov(r14, Operand(stack_space));
2287 LeaveExitFrame(false, r14, !restore_context);
2288 blr();
2289
2290 bind(&promote_scheduled_exception);
2291 {
2292 FrameScope frame(this, StackFrame::INTERNAL);
2293 CallExternalReference(
2294 ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0);
2295 }
2296 jmp(&exception_handled);
2297
2298 // HandleScope limit has changed. Delete allocated extensions.
2299 bind(&delete_allocated_handles);
2300 StoreP(r15, MemOperand(r17, kLimitOffset));
2301 mr(r14, r3);
2302 PrepareCallCFunction(1, r15);
2303 mov(r3, Operand(ExternalReference::isolate_address(isolate())));
2304 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
2305 1);
2306 mr(r3, r14);
2307 b(&leave_exit_frame);
2308 }
2309
2310
2311 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2312 return has_frame_ || !stub->SometimesSetsUpAFrame();
2313 }
2314
2315
2316 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2317 // If the hash field contains an array index pick it out. The assert checks
2318 // that the constants for the maximum number of digits for an array index
2319 // cached in the hash field and the number of bits reserved for it does not
2320 // conflict.
2321 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
2322 (1 << String::kArrayIndexValueBits));
2323 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
2324 }
2325
2326
2327 void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
2328 SmiUntag(ip, smi);
2329 ConvertIntToDouble(ip, value);
2330 }
2331
2332
2333 void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input,
2334 Register scratch1, Register scratch2,
2335 DoubleRegister double_scratch) {
2336 TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch);
2337 }
2338
2339
2340 void MacroAssembler::TryDoubleToInt32Exact(Register result,
2341 DoubleRegister double_input,
2342 Register scratch,
2343 DoubleRegister double_scratch) {
2344 Label done;
2345 DCHECK(!double_input.is(double_scratch));
2346
2347 ConvertDoubleToInt64(double_input,
2348 #if !V8_TARGET_ARCH_PPC64
2349 scratch,
2350 #endif
2351 result, double_scratch);
2352
2353 #if V8_TARGET_ARCH_PPC64
2354 TestIfInt32(result, scratch, r0);
2355 #else
2356 TestIfInt32(scratch, result, r0);
2357 #endif
2358 bne(&done);
2359
2360 // convert back and compare
2361 fcfid(double_scratch, double_scratch);
2362 fcmpu(double_scratch, double_input);
2363 bind(&done);
2364 }
2365
2366
2367 void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input,
2368 Register input_high, Register scratch,
2369 DoubleRegister double_scratch, Label* done,
2370 Label* exact) {
2371 DCHECK(!result.is(input_high));
2372 DCHECK(!double_input.is(double_scratch));
2373 Label exception;
2374
2375 MovDoubleHighToInt(input_high, double_input);
2376
2377 // Test for NaN/Inf
2378 ExtractBitMask(result, input_high, HeapNumber::kExponentMask);
2379 cmpli(result, Operand(0x7ff));
2380 beq(&exception);
2381
2382 // Convert (rounding to -Inf)
2383 ConvertDoubleToInt64(double_input,
2384 #if !V8_TARGET_ARCH_PPC64
2385 scratch,
2386 #endif
2387 result, double_scratch, kRoundToMinusInf);
2388
2389 // Test for overflow
2390 #if V8_TARGET_ARCH_PPC64
2391 TestIfInt32(result, scratch, r0);
2392 #else
2393 TestIfInt32(scratch, result, r0);
2394 #endif
2395 bne(&exception);
2396
2397 // Test for exactness
2398 fcfid(double_scratch, double_scratch);
2399 fcmpu(double_scratch, double_input);
2400 beq(exact);
2401 b(done);
2402
2403 bind(&exception);
2404 }
2405
2406
2407 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2408 DoubleRegister double_input,
2409 Label* done) {
2410 DoubleRegister double_scratch = kScratchDoubleReg;
2411 Register scratch = ip;
2412
2413 ConvertDoubleToInt64(double_input,
2414 #if !V8_TARGET_ARCH_PPC64
2415 scratch,
2416 #endif
2417 result, double_scratch);
2418
2419 // Test for overflow
2420 #if V8_TARGET_ARCH_PPC64
2421 TestIfInt32(result, scratch, r0);
2422 #else
2423 TestIfInt32(scratch, result, r0);
2424 #endif
2425 beq(done);
2426 }
2427
2428
2429 void MacroAssembler::TruncateDoubleToI(Register result,
2430 DoubleRegister double_input) {
2431 Label done;
2432
2433 TryInlineTruncateDoubleToI(result, double_input, &done);
2434
2435 // If we fell through then inline version didn't succeed - call stub instead.
2436 mflr(r0);
2437 push(r0);
2438 // Put input on stack.
2439 stfdu(double_input, MemOperand(sp, -kDoubleSize));
2440
2441 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2442 CallStub(&stub);
2443
2444 addi(sp, sp, Operand(kDoubleSize));
2445 pop(r0);
2446 mtlr(r0);
2447
2448 bind(&done);
2449 }
2450
2451
2452 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2453 Label done;
2454 DoubleRegister double_scratch = kScratchDoubleReg;
2455 DCHECK(!result.is(object));
2456
2457 lfd(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2458 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2459
2460 // If we fell through then inline version didn't succeed - call stub instead.
2461 mflr(r0);
2462 push(r0);
2463 DoubleToIStub stub(isolate(), object, result,
2464 HeapNumber::kValueOffset - kHeapObjectTag, true, true);
2465 CallStub(&stub);
2466 pop(r0);
2467 mtlr(r0);
2468
2469 bind(&done);
2470 }
2471
2472
2473 void MacroAssembler::TruncateNumberToI(Register object, Register result,
2474 Register heap_number_map,
2475 Register scratch1, Label* not_number) {
2476 Label done;
2477 DCHECK(!result.is(object));
2478
2479 UntagAndJumpIfSmi(result, object, &done);
2480 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2481 TruncateHeapNumberToI(result, object);
2482
2483 bind(&done);
2484 }
2485
2486
2487 void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src,
2488 int num_least_bits) {
2489 #if V8_TARGET_ARCH_PPC64
2490 rldicl(dst, src, kBitsPerPointer - kSmiShift,
2491 kBitsPerPointer - num_least_bits);
2492 #else
2493 rlwinm(dst, src, kBitsPerPointer - kSmiShift,
2494 kBitsPerPointer - num_least_bits, 31);
2495 #endif
2496 }
2497
2498
2499 void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
2500 int num_least_bits) {
2501 rlwinm(dst, src, 0, 32 - num_least_bits, 31);
2502 }
2503
2504
2505 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
2506 SaveFPRegsMode save_doubles) {
2507 // All parameters are on the stack. r3 has the return value after call.
2508
2509 // If the expected number of arguments of the runtime function is
2510 // constant, we check that the actual number of arguments match the
2511 // expectation.
2512 CHECK(f->nargs < 0 || f->nargs == num_arguments);
2513
2514 // TODO(1236192): Most runtime routines don't need the number of
2515 // arguments passed in because it is constant. At some point we
2516 // should remove this need and make the runtime routine entry code
2517 // smarter.
2518 mov(r3, Operand(num_arguments));
2519 mov(r4, Operand(ExternalReference(f, isolate())));
2520 CEntryStub stub(isolate(),
2521 #if V8_TARGET_ARCH_PPC64
2522 f->result_size,
2523 #else
2524 1,
2525 #endif
2526 save_doubles);
2527 CallStub(&stub);
2528 }
2529
2530
2531 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2532 int num_arguments) {
2533 mov(r3, Operand(num_arguments));
2534 mov(r4, Operand(ext));
2535
2536 CEntryStub stub(isolate(), 1);
2537 CallStub(&stub);
2538 }
2539
2540
2541 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2542 int num_arguments,
2543 int result_size) {
2544 // TODO(1236192): Most runtime routines don't need the number of
2545 // arguments passed in because it is constant. At some point we
2546 // should remove this need and make the runtime routine entry code
2547 // smarter.
2548 mov(r3, Operand(num_arguments));
2549 JumpToExternalReference(ext);
2550 }
2551
2552
2553 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, int num_arguments,
2554 int result_size) {
2555 TailCallExternalReference(ExternalReference(fid, isolate()), num_arguments,
2556 result_size);
2557 }
2558
2559
2560 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2561 mov(r4, Operand(builtin));
2562 CEntryStub stub(isolate(), 1);
2563 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2564 }
2565
2566
2567 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag,
2568 const CallWrapper& call_wrapper) {
2569 // You can't call a builtin without a valid frame.
2570 DCHECK(flag == JUMP_FUNCTION || has_frame());
2571
2572 GetBuiltinEntry(ip, id);
2573 if (flag == CALL_FUNCTION) {
2574 call_wrapper.BeforeCall(CallSize(ip));
2575 CallJSEntry(ip);
2576 call_wrapper.AfterCall();
2577 } else {
2578 DCHECK(flag == JUMP_FUNCTION);
2579 JumpToJSEntry(ip);
2580 }
2581 }
2582
2583
2584 void MacroAssembler::GetBuiltinFunction(Register target,
2585 Builtins::JavaScript id) {
2586 // Load the builtins object into target register.
2587 LoadP(target,
2588 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2589 LoadP(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
2590 // Load the JavaScript builtin function from the builtins object.
2591 LoadP(target,
2592 FieldMemOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id)),
2593 r0);
2594 }
2595
2596
2597 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2598 DCHECK(!target.is(r4));
2599 GetBuiltinFunction(r4, id);
2600 // Load the code entry point from the builtins object.
2601 LoadP(target, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
2602 }
2603
2604
2605 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2606 Register scratch1, Register scratch2) {
2607 if (FLAG_native_code_counters && counter->Enabled()) {
2608 mov(scratch1, Operand(value));
2609 mov(scratch2, Operand(ExternalReference(counter)));
2610 stw(scratch1, MemOperand(scratch2));
2611 }
2612 }
2613
2614
2615 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2616 Register scratch1, Register scratch2) {
2617 DCHECK(value > 0);
2618 if (FLAG_native_code_counters && counter->Enabled()) {
2619 mov(scratch2, Operand(ExternalReference(counter)));
2620 lwz(scratch1, MemOperand(scratch2));
2621 addi(scratch1, scratch1, Operand(value));
2622 stw(scratch1, MemOperand(scratch2));
2623 }
2624 }
2625
2626
2627 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2628 Register scratch1, Register scratch2) {
2629 DCHECK(value > 0);
2630 if (FLAG_native_code_counters && counter->Enabled()) {
2631 mov(scratch2, Operand(ExternalReference(counter)));
2632 lwz(scratch1, MemOperand(scratch2));
2633 subi(scratch1, scratch1, Operand(value));
2634 stw(scratch1, MemOperand(scratch2));
2635 }
2636 }
2637
2638
2639 void MacroAssembler::Assert(Condition cond, BailoutReason reason,
2640 CRegister cr) {
2641 if (emit_debug_code()) Check(cond, reason, cr);
2642 }
2643
2644
2645 void MacroAssembler::AssertFastElements(Register elements) {
2646 if (emit_debug_code()) {
2647 DCHECK(!elements.is(r0));
2648 Label ok;
2649 push(elements);
2650 LoadP(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2651 LoadRoot(r0, Heap::kFixedArrayMapRootIndex);
2652 cmp(elements, r0);
2653 beq(&ok);
2654 LoadRoot(r0, Heap::kFixedDoubleArrayMapRootIndex);
2655 cmp(elements, r0);
2656 beq(&ok);
2657 LoadRoot(r0, Heap::kFixedCOWArrayMapRootIndex);
2658 cmp(elements, r0);
2659 beq(&ok);
2660 Abort(kJSObjectWithFastElementsMapHasSlowElements);
2661 bind(&ok);
2662 pop(elements);
2663 }
2664 }
2665
2666
2667 void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
2668 Label L;
2669 b(cond, &L, cr);
2670 Abort(reason);
2671 // will not return here
2672 bind(&L);
2673 }
2674
2675
2676 void MacroAssembler::Abort(BailoutReason reason) {
2677 Label abort_start;
2678 bind(&abort_start);
2679 #ifdef DEBUG
2680 const char* msg = GetBailoutReason(reason);
2681 if (msg != NULL) {
2682 RecordComment("Abort message: ");
2683 RecordComment(msg);
2684 }
2685
2686 if (FLAG_trap_on_abort) {
2687 stop(msg);
2688 return;
2689 }
2690 #endif
2691
2692 LoadSmiLiteral(r0, Smi::FromInt(reason));
2693 push(r0);
2694 // Disable stub call restrictions to always allow calls to abort.
2695 if (!has_frame_) {
2696 // We don't actually want to generate a pile of code for this, so just
2697 // claim there is a stack frame, without generating one.
2698 FrameScope scope(this, StackFrame::NONE);
2699 CallRuntime(Runtime::kAbort, 1);
2700 } else {
2701 CallRuntime(Runtime::kAbort, 1);
2702 }
2703 // will not return here
2704 }
2705
2706
2707 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2708 if (context_chain_length > 0) {
2709 // Move up the chain of contexts to the context containing the slot.
2710 LoadP(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2711 for (int i = 1; i < context_chain_length; i++) {
2712 LoadP(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2713 }
2714 } else {
2715 // Slot is in the current function context. Move it into the
2716 // destination register in case we store into it (the write barrier
2717 // cannot be allowed to destroy the context in esi).
2718 mr(dst, cp);
2719 }
2720 }
2721
2722
2723 void MacroAssembler::LoadTransitionedArrayMapConditional(
2724 ElementsKind expected_kind, ElementsKind transitioned_kind,
2725 Register map_in_out, Register scratch, Label* no_map_match) {
2726 // Load the global or builtins object from the current context.
2727 LoadP(scratch,
2728 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2729 LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
2730
2731 // Check that the function's map is the same as the expected cached map.
2732 LoadP(scratch,
2733 MemOperand(scratch, Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2734 size_t offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize;
2735 LoadP(scratch, FieldMemOperand(scratch, offset));
2736 cmp(map_in_out, scratch);
2737 bne(no_map_match);
2738
2739 // Use the transitioned cached map.
2740 offset = transitioned_kind * kPointerSize + FixedArrayBase::kHeaderSize;
2741 LoadP(map_in_out, FieldMemOperand(scratch, offset));
2742 }
2743
2744
2745 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2746 // Load the global or builtins object from the current context.
2747 LoadP(function,
2748 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2749 // Load the native context from the global or builtins object.
2750 LoadP(function,
2751 FieldMemOperand(function, GlobalObject::kNativeContextOffset));
2752 // Load the function from the native context.
2753 LoadP(function, MemOperand(function, Context::SlotOffset(index)), r0);
2754 }
2755
2756
2757 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2758 Register map,
2759 Register scratch) {
2760 // Load the initial map. The global functions all have initial maps.
2761 LoadP(map,
2762 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2763 if (emit_debug_code()) {
2764 Label ok, fail;
2765 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2766 b(&ok);
2767 bind(&fail);
2768 Abort(kGlobalFunctionsMustHaveInitialMap);
2769 bind(&ok);
2770 }
2771 }
2772
2773
2774 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2775 Register reg, Register scratch, Label* not_power_of_two_or_zero) {
2776 subi(scratch, reg, Operand(1));
2777 cmpi(scratch, Operand::Zero());
2778 blt(not_power_of_two_or_zero);
2779 and_(r0, scratch, reg, SetRC);
2780 bne(not_power_of_two_or_zero, cr0);
2781 }
2782
2783
2784 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
2785 Register scratch,
2786 Label* zero_and_neg,
2787 Label* not_power_of_two) {
2788 subi(scratch, reg, Operand(1));
2789 cmpi(scratch, Operand::Zero());
2790 blt(zero_and_neg);
2791 and_(r0, scratch, reg, SetRC);
2792 bne(not_power_of_two, cr0);
2793 }
2794
2795 #if !V8_TARGET_ARCH_PPC64
2796 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
2797 DCHECK(!reg.is(overflow));
2798 mr(overflow, reg); // Save original value.
2799 SmiTag(reg);
2800 xor_(overflow, overflow, reg, SetRC); // Overflow if (value ^ 2 * value) < 0.
2801 }
2802
2803
2804 void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src,
2805 Register overflow) {
2806 if (dst.is(src)) {
2807 // Fall back to slower case.
2808 SmiTagCheckOverflow(dst, overflow);
2809 } else {
2810 DCHECK(!dst.is(src));
2811 DCHECK(!dst.is(overflow));
2812 DCHECK(!src.is(overflow));
2813 SmiTag(dst, src);
2814 xor_(overflow, dst, src, SetRC); // Overflow if (value ^ 2 * value) < 0.
2815 }
2816 }
2817 #endif
2818
2819 void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2,
2820 Label* on_not_both_smi) {
2821 STATIC_ASSERT(kSmiTag == 0);
2822 DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
2823 orx(r0, reg1, reg2, LeaveRC);
2824 JumpIfNotSmi(r0, on_not_both_smi);
2825 }
2826
2827
2828 void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
2829 Label* smi_case) {
2830 STATIC_ASSERT(kSmiTag == 0);
2831 STATIC_ASSERT(kSmiTagSize == 1);
2832 TestBit(src, 0, r0);
2833 SmiUntag(dst, src);
2834 beq(smi_case, cr0);
2835 }
2836
2837
2838 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
2839 Label* non_smi_case) {
2840 STATIC_ASSERT(kSmiTag == 0);
2841 STATIC_ASSERT(kSmiTagSize == 1);
2842 TestBit(src, 0, r0);
2843 SmiUntag(dst, src);
2844 bne(non_smi_case, cr0);
2845 }
2846
2847
2848 void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
2849 Label* on_either_smi) {
2850 STATIC_ASSERT(kSmiTag == 0);
2851 JumpIfSmi(reg1, on_either_smi);
2852 JumpIfSmi(reg2, on_either_smi);
2853 }
2854
2855
2856 void MacroAssembler::AssertNotSmi(Register object) {
2857 if (emit_debug_code()) {
2858 STATIC_ASSERT(kSmiTag == 0);
2859 TestIfSmi(object, r0);
2860 Check(ne, kOperandIsASmi, cr0);
2861 }
2862 }
2863
2864
2865 void MacroAssembler::AssertSmi(Register object) {
2866 if (emit_debug_code()) {
2867 STATIC_ASSERT(kSmiTag == 0);
2868 TestIfSmi(object, r0);
2869 Check(eq, kOperandIsNotSmi, cr0);
2870 }
2871 }
2872
2873
2874 void MacroAssembler::AssertString(Register object) {
2875 if (emit_debug_code()) {
2876 STATIC_ASSERT(kSmiTag == 0);
2877 TestIfSmi(object, r0);
2878 Check(ne, kOperandIsASmiAndNotAString, cr0);
2879 push(object);
2880 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2881 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
2882 pop(object);
2883 Check(lt, kOperandIsNotAString);
2884 }
2885 }
2886
2887
2888 void MacroAssembler::AssertName(Register object) {
2889 if (emit_debug_code()) {
2890 STATIC_ASSERT(kSmiTag == 0);
2891 TestIfSmi(object, r0);
2892 Check(ne, kOperandIsASmiAndNotAName, cr0);
2893 push(object);
2894 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2895 CompareInstanceType(object, object, LAST_NAME_TYPE);
2896 pop(object);
2897 Check(le, kOperandIsNotAName);
2898 }
2899 }
2900
2901
2902 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
2903 Register scratch) {
2904 if (emit_debug_code()) {
2905 Label done_checking;
2906 AssertNotSmi(object);
2907 CompareRoot(object, Heap::kUndefinedValueRootIndex);
2908 beq(&done_checking);
2909 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2910 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
2911 Assert(eq, kExpectedUndefinedOrCell);
2912 bind(&done_checking);
2913 }
2914 }
2915
2916
2917 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
2918 if (emit_debug_code()) {
2919 CompareRoot(reg, index);
2920 Check(eq, kHeapNumberMapRegisterClobbered);
2921 }
2922 }
2923
2924
2925 void MacroAssembler::JumpIfNotHeapNumber(Register object,
2926 Register heap_number_map,
2927 Register scratch,
2928 Label* on_not_heap_number) {
2929 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2930 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2931 cmp(scratch, heap_number_map);
2932 bne(on_not_heap_number);
2933 }
2934
2935
2936 void MacroAssembler::LookupNumberStringCache(Register object, Register result,
2937 Register scratch1,
2938 Register scratch2,
2939 Register scratch3,
2940 Label* not_found) {
2941 // Use of registers. Register result is used as a temporary.
2942 Register number_string_cache = result;
2943 Register mask = scratch3;
2944
2945 // Load the number string cache.
2946 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2947
2948 // Make the hash mask from the length of the number string cache. It
2949 // contains two elements (number and string) for each cache entry.
2950 LoadP(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
2951 // Divide length by two (length is a smi).
2952 ShiftRightArithImm(mask, mask, kSmiTagSize + kSmiShiftSize + 1);
2953 subi(mask, mask, Operand(1)); // Make mask.
2954
2955 // Calculate the entry in the number string cache. The hash value in the
2956 // number string cache for smis is just the smi value, and the hash for
2957 // doubles is the xor of the upper and lower words. See
2958 // Heap::GetNumberStringCache.
2959 Label is_smi;
2960 Label load_result_from_cache;
2961 JumpIfSmi(object, &is_smi);
2962 CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
2963 DONT_DO_SMI_CHECK);
2964
2965 STATIC_ASSERT(8 == kDoubleSize);
2966 lwz(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
2967 lwz(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
2968 xor_(scratch1, scratch1, scratch2);
2969 and_(scratch1, scratch1, mask);
2970
2971 // Calculate address of entry in string cache: each entry consists
2972 // of two pointer sized fields.
2973 ShiftLeftImm(scratch1, scratch1, Operand(kPointerSizeLog2 + 1));
2974 add(scratch1, number_string_cache, scratch1);
2975
2976 Register probe = mask;
2977 LoadP(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
2978 JumpIfSmi(probe, not_found);
2979 lfd(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
2980 lfd(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
2981 fcmpu(d0, d1);
2982 bne(not_found); // The cache did not contain this value.
2983 b(&load_result_from_cache);
2984
2985 bind(&is_smi);
2986 Register scratch = scratch1;
2987 SmiUntag(scratch, object);
2988 and_(scratch, mask, scratch);
2989 // Calculate address of entry in string cache: each entry consists
2990 // of two pointer sized fields.
2991 ShiftLeftImm(scratch, scratch, Operand(kPointerSizeLog2 + 1));
2992 add(scratch, number_string_cache, scratch);
2993
2994 // Check if the entry is the smi we are looking for.
2995 LoadP(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2996 cmp(object, probe);
2997 bne(not_found);
2998
2999 // Get the result from the cache.
3000 bind(&load_result_from_cache);
3001 LoadP(result,
3002 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
3003 IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
3004 scratch1, scratch2);
3005 }
3006
3007
3008 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
3009 Register first, Register second, Register scratch1, Register scratch2,
3010 Label* failure) {
3011 // Test that both first and second are sequential one-byte strings.
3012 // Assume that they are non-smis.
3013 LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
3014 LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
3015 lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
3016 lbz(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
3017
3018 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
3019 scratch2, failure);
3020 }
3021
3022 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
3023 Register second,
3024 Register scratch1,
3025 Register scratch2,
3026 Label* failure) {
3027 // Check that neither is a smi.
3028 and_(scratch1, first, second);
3029 JumpIfSmi(scratch1, failure);
3030 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
3031 scratch2, failure);
3032 }
3033
3034
3035 void MacroAssembler::JumpIfNotUniqueNameInstanceType(Register reg,
3036 Label* not_unique_name) {
3037 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
3038 Label succeed;
3039 andi(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
3040 beq(&succeed, cr0);
3041 cmpi(reg, Operand(SYMBOL_TYPE));
3042 bne(not_unique_name);
3043
3044 bind(&succeed);
3045 }
3046
3047
3048 // Allocates a heap number or jumps to the need_gc label if the young space
3049 // is full and a scavenge is needed.
3050 void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
3051 Register scratch2,
3052 Register heap_number_map,
3053 Label* gc_required,
3054 TaggingMode tagging_mode,
3055 MutableMode mode) {
3056 // Allocate an object in the heap for the heap number and tag it as a heap
3057 // object.
3058 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3059 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3060
3061 Heap::RootListIndex map_index = mode == MUTABLE
3062 ? Heap::kMutableHeapNumberMapRootIndex
3063 : Heap::kHeapNumberMapRootIndex;
3064 AssertIsRoot(heap_number_map, map_index);
3065
3066 // Store heap number map in the allocated object.
3067 if (tagging_mode == TAG_RESULT) {
3068 StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset),
3069 r0);
3070 } else {
3071 StoreP(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3072 }
3073 }
3074
3075
3076 void MacroAssembler::AllocateHeapNumberWithValue(
3077 Register result, DoubleRegister value, Register scratch1, Register scratch2,
3078 Register heap_number_map, Label* gc_required) {
3079 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
3080 stfd(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3081 }
3082
3083
3084 // Copies a fixed number of fields of heap objects from src to dst.
3085 void MacroAssembler::CopyFields(Register dst, Register src, RegList temps,
3086 int field_count) {
3087 // At least one bit set in the first 15 registers.
3088 DCHECK((temps & ((1 << 15) - 1)) != 0);
3089 DCHECK((temps & dst.bit()) == 0);
3090 DCHECK((temps & src.bit()) == 0);
3091 // Primitive implementation using only one temporary register.
3092
3093 Register tmp = no_reg;
3094 // Find a temp register in temps list.
3095 for (int i = 0; i < 15; i++) {
3096 if ((temps & (1 << i)) != 0) {
3097 tmp.set_code(i);
3098 break;
3099 }
3100 }
3101 DCHECK(!tmp.is(no_reg));
3102
3103 for (int i = 0; i < field_count; i++) {
3104 LoadP(tmp, FieldMemOperand(src, i * kPointerSize), r0);
3105 StoreP(tmp, FieldMemOperand(dst, i * kPointerSize), r0);
3106 }
3107 }
3108
3109
3110 void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
3111 Register scratch) {
3112 Label align_loop, aligned, word_loop, byte_loop, byte_loop_1, done;
3113
3114 DCHECK(!scratch.is(r0));
3115
3116 cmpi(length, Operand::Zero());
3117 beq(&done);
3118
3119 // Check src alignment and length to see whether word_loop is possible
3120 andi(scratch, src, Operand(kPointerSize - 1));
3121 beq(&aligned, cr0);
3122 subfic(scratch, scratch, Operand(kPointerSize * 2));
3123 cmp(length, scratch);
3124 blt(&byte_loop);
3125
3126 // Align src before copying in word size chunks.
3127 subi(scratch, scratch, Operand(kPointerSize));
3128 mtctr(scratch);
3129 bind(&align_loop);
3130 lbz(scratch, MemOperand(src));
3131 addi(src, src, Operand(1));
3132 subi(length, length, Operand(1));
3133 stb(scratch, MemOperand(dst));
3134 addi(dst, dst, Operand(1));
3135 bdnz(&align_loop);
3136
3137 bind(&aligned);
3138
3139 // Copy bytes in word size chunks.
3140 if (emit_debug_code()) {
3141 andi(r0, src, Operand(kPointerSize - 1));
3142 Assert(eq, kExpectingAlignmentForCopyBytes, cr0);
3143 }
3144
3145 ShiftRightImm(scratch, length, Operand(kPointerSizeLog2));
3146 cmpi(scratch, Operand::Zero());
3147 beq(&byte_loop);
3148
3149 mtctr(scratch);
3150 bind(&word_loop);
3151 LoadP(scratch, MemOperand(src));
3152 addi(src, src, Operand(kPointerSize));
3153 subi(length, length, Operand(kPointerSize));
3154 if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
3155 // currently false for PPC - but possible future opt
3156 StoreP(scratch, MemOperand(dst));
3157 addi(dst, dst, Operand(kPointerSize));
3158 } else {
3159 #if V8_TARGET_LITTLE_ENDIAN
3160 stb(scratch, MemOperand(dst, 0));
3161 ShiftRightImm(scratch, scratch, Operand(8));
3162 stb(scratch, MemOperand(dst, 1));
3163 ShiftRightImm(scratch, scratch, Operand(8));
3164 stb(scratch, MemOperand(dst, 2));
3165 ShiftRightImm(scratch, scratch, Operand(8));
3166 stb(scratch, MemOperand(dst, 3));
3167 #if V8_TARGET_ARCH_PPC64
3168 ShiftRightImm(scratch, scratch, Operand(8));
3169 stb(scratch, MemOperand(dst, 4));
3170 ShiftRightImm(scratch, scratch, Operand(8));
3171 stb(scratch, MemOperand(dst, 5));
3172 ShiftRightImm(scratch, scratch, Operand(8));
3173 stb(scratch, MemOperand(dst, 6));
3174 ShiftRightImm(scratch, scratch, Operand(8));
3175 stb(scratch, MemOperand(dst, 7));
3176 #endif
3177 #else
3178 #if V8_TARGET_ARCH_PPC64
3179 stb(scratch, MemOperand(dst, 7));
3180 ShiftRightImm(scratch, scratch, Operand(8));
3181 stb(scratch, MemOperand(dst, 6));
3182 ShiftRightImm(scratch, scratch, Operand(8));
3183 stb(scratch, MemOperand(dst, 5));
3184 ShiftRightImm(scratch, scratch, Operand(8));
3185 stb(scratch, MemOperand(dst, 4));
3186 ShiftRightImm(scratch, scratch, Operand(8));
3187 #endif
3188 stb(scratch, MemOperand(dst, 3));
3189 ShiftRightImm(scratch, scratch, Operand(8));
3190 stb(scratch, MemOperand(dst, 2));
3191 ShiftRightImm(scratch, scratch, Operand(8));
3192 stb(scratch, MemOperand(dst, 1));
3193 ShiftRightImm(scratch, scratch, Operand(8));
3194 stb(scratch, MemOperand(dst, 0));
3195 #endif
3196 addi(dst, dst, Operand(kPointerSize));
3197 }
3198 bdnz(&word_loop);
3199
3200 // Copy the last bytes if any left.
3201 cmpi(length, Operand::Zero());
3202 beq(&done);
3203
3204 bind(&byte_loop);
3205 mtctr(length);
3206 bind(&byte_loop_1);
3207 lbz(scratch, MemOperand(src));
3208 addi(src, src, Operand(1));
3209 stb(scratch, MemOperand(dst));
3210 addi(dst, dst, Operand(1));
3211 bdnz(&byte_loop_1);
3212
3213 bind(&done);
3214 }
3215
3216
3217 void MacroAssembler::InitializeNFieldsWithFiller(Register start_offset,
3218 Register count,
3219 Register filler) {
3220 Label loop;
3221 mtctr(count);
3222 bind(&loop);
3223 StoreP(filler, MemOperand(start_offset));
3224 addi(start_offset, start_offset, Operand(kPointerSize));
3225 bdnz(&loop);
3226 }
3227
3228 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3229 Register end_offset,
3230 Register filler) {
3231 Label done;
3232 sub(r0, end_offset, start_offset, LeaveOE, SetRC);
3233 beq(&done, cr0);
3234 ShiftRightImm(r0, r0, Operand(kPointerSizeLog2));
3235 InitializeNFieldsWithFiller(start_offset, r0, filler);
3236 bind(&done);
3237 }
3238
3239
3240 void MacroAssembler::SaveFPRegs(Register location, int first, int count) {
3241 DCHECK(count > 0);
3242 int cur = first;
3243 subi(location, location, Operand(count * kDoubleSize));
3244 for (int i = 0; i < count; i++) {
3245 DoubleRegister reg = DoubleRegister::from_code(cur++);
3246 stfd(reg, MemOperand(location, i * kDoubleSize));
3247 }
3248 }
3249
3250
3251 void MacroAssembler::RestoreFPRegs(Register location, int first, int count) {
3252 DCHECK(count > 0);
3253 int cur = first + count - 1;
3254 for (int i = count - 1; i >= 0; i--) {
3255 DoubleRegister reg = DoubleRegister::from_code(cur--);
3256 lfd(reg, MemOperand(location, i * kDoubleSize));
3257 }
3258 addi(location, location, Operand(count * kDoubleSize));
3259 }
3260
3261
3262 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
3263 Register first, Register second, Register scratch1, Register scratch2,
3264 Label* failure) {
3265 const int kFlatOneByteStringMask =
3266 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3267 const int kFlatOneByteStringTag =
3268 kStringTag | kOneByteStringTag | kSeqStringTag;
3269 andi(scratch1, first, Operand(kFlatOneByteStringMask));
3270 andi(scratch2, second, Operand(kFlatOneByteStringMask));
3271 cmpi(scratch1, Operand(kFlatOneByteStringTag));
3272 bne(failure);
3273 cmpi(scratch2, Operand(kFlatOneByteStringTag));
3274 bne(failure);
3275 }
3276
3277
3278 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
3279 Register scratch,
3280 Label* failure) {
3281 const int kFlatOneByteStringMask =
3282 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3283 const int kFlatOneByteStringTag =
3284 kStringTag | kOneByteStringTag | kSeqStringTag;
3285 andi(scratch, type, Operand(kFlatOneByteStringMask));
3286 cmpi(scratch, Operand(kFlatOneByteStringTag));
3287 bne(failure);
3288 }
3289
3290 static const int kRegisterPassedArguments = 8;
3291
3292
3293 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3294 int num_double_arguments) {
3295 int stack_passed_words = 0;
3296 if (num_double_arguments > DoubleRegister::kNumRegisters) {
3297 stack_passed_words +=
3298 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
3299 }
3300 // Up to 8 simple arguments are passed in registers r3..r10.
3301 if (num_reg_arguments > kRegisterPassedArguments) {
3302 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3303 }
3304 return stack_passed_words;
3305 }
3306
3307
3308 void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
3309 Register value,
3310 uint32_t encoding_mask) {
3311 Label is_object;
3312 TestIfSmi(string, r0);
3313 Check(ne, kNonObject, cr0);
3314
3315 LoadP(ip, FieldMemOperand(string, HeapObject::kMapOffset));
3316 lbz(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
3317
3318 andi(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
3319 cmpi(ip, Operand(encoding_mask));
3320 Check(eq, kUnexpectedStringType);
3321
3322 // The index is assumed to be untagged coming in, tag it to compare with the
3323 // string length without using a temp register, it is restored at the end of
3324 // this function.
3325 #if !V8_TARGET_ARCH_PPC64
3326 Label index_tag_ok, index_tag_bad;
3327 JumpIfNotSmiCandidate(index, r0, &index_tag_bad);
3328 #endif
3329 SmiTag(index, index);
3330 #if !V8_TARGET_ARCH_PPC64
3331 b(&index_tag_ok);
3332 bind(&index_tag_bad);
3333 Abort(kIndexIsTooLarge);
3334 bind(&index_tag_ok);
3335 #endif
3336
3337 LoadP(ip, FieldMemOperand(string, String::kLengthOffset));
3338 cmp(index, ip);
3339 Check(lt, kIndexIsTooLarge);
3340
3341 DCHECK(Smi::FromInt(0) == 0);
3342 cmpi(index, Operand::Zero());
3343 Check(ge, kIndexIsNegative);
3344
3345 SmiUntag(index, index);
3346 }
3347
3348
3349 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3350 int num_double_arguments,
3351 Register scratch) {
3352 int frame_alignment = ActivationFrameAlignment();
3353 int stack_passed_arguments =
3354 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
3355 int stack_space = kNumRequiredStackFrameSlots;
3356
3357 if (frame_alignment > kPointerSize) {
3358 // Make stack end at alignment and make room for stack arguments
3359 // -- preserving original value of sp.
3360 mr(scratch, sp);
3361 addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize));
3362 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3363 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
3364 StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3365 } else {
3366 // Make room for stack arguments
3367 stack_space += stack_passed_arguments;
3368 }
3369
3370 // Allocate frame with required slots to make ABI work.
3371 li(r0, Operand::Zero());
3372 StorePU(r0, MemOperand(sp, -stack_space * kPointerSize));
3373 }
3374
3375
3376 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3377 Register scratch) {
3378 PrepareCallCFunction(num_reg_arguments, 0, scratch);
3379 }
3380
3381
3382 void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
3383
3384
3385 void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
3386
3387
3388 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3389 DoubleRegister src2) {
3390 if (src2.is(d1)) {
3391 DCHECK(!src1.is(d2));
3392 Move(d2, src2);
3393 Move(d1, src1);
3394 } else {
3395 Move(d1, src1);
3396 Move(d2, src2);
3397 }
3398 }
3399
3400
3401 void MacroAssembler::CallCFunction(ExternalReference function,
3402 int num_reg_arguments,
3403 int num_double_arguments) {
3404 mov(ip, Operand(function));
3405 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3406 }
3407
3408
3409 void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
3410 int num_double_arguments) {
3411 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3412 }
3413
3414
3415 void MacroAssembler::CallCFunction(ExternalReference function,
3416 int num_arguments) {
3417 CallCFunction(function, num_arguments, 0);
3418 }
3419
3420
3421 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
3422 CallCFunction(function, num_arguments, 0);
3423 }
3424
3425
3426 void MacroAssembler::CallCFunctionHelper(Register function,
3427 int num_reg_arguments,
3428 int num_double_arguments) {
3429 DCHECK(has_frame());
3430 // Just call directly. The function called cannot cause a GC, or
3431 // allow preemption, so the return address in the link register
3432 // stays correct.
3433 #if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
3434 // AIX uses a function descriptor. When calling C code be aware
3435 // of this descriptor and pick up values from it
3436 LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
3437 LoadP(ip, MemOperand(function, 0));
3438 Register dest = ip;
3439 #elif ABI_TOC_ADDRESSABILITY_VIA_IP
3440 Move(ip, function);
3441 Register dest = ip;
3442 #else
3443 Register dest = function;
3444 #endif
3445
3446 Call(dest);
3447
3448 // Remove frame bought in PrepareCallCFunction
3449 int stack_passed_arguments =
3450 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
3451 int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
3452 if (ActivationFrameAlignment() > kPointerSize) {
3453 LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
3454 } else {
3455 addi(sp, sp, Operand(stack_space * kPointerSize));
3456 }
3457 }
3458
3459
3460 void MacroAssembler::FlushICache(Register address, size_t size,
3461 Register scratch) {
3462 if (CpuFeatures::IsSupported(INSTR_AND_DATA_CACHE_COHERENCY)) {
3463 sync();
3464 icbi(r0, address);
3465 isync();
3466 return;
3467 }
3468
3469 Label done;
3470
3471 dcbf(r0, address);
3472 sync();
3473 icbi(r0, address);
3474 isync();
3475
3476 // This code handles ranges which cross a single cacheline boundary.
3477 // scratch is last cacheline which intersects range.
3478 const int kCacheLineSizeLog2 = WhichPowerOf2(CpuFeatures::cache_line_size());
3479
3480 DCHECK(size > 0 && size <= (size_t)(1 << kCacheLineSizeLog2));
3481 addi(scratch, address, Operand(size - 1));
3482 ClearRightImm(scratch, scratch, Operand(kCacheLineSizeLog2));
3483 cmpl(scratch, address);
3484 ble(&done);
3485
3486 dcbf(r0, scratch);
3487 sync();
3488 icbi(r0, scratch);
3489 isync();
3490
3491 bind(&done);
3492 }
3493
3494
3495 void MacroAssembler::SetRelocatedValue(Register location, Register scratch,
3496 Register new_value) {
3497 lwz(scratch, MemOperand(location));
3498
3499 #if V8_OOL_CONSTANT_POOL
3500 if (emit_debug_code()) {
3501 // Check that the instruction sequence is a load from the constant pool
3502 #if V8_TARGET_ARCH_PPC64
3503 And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16)));
3504 Cmpi(scratch, Operand(ADDI), r0);
3505 Check(eq, kTheInstructionShouldBeALi);
3506 lwz(scratch, MemOperand(location, kInstrSize));
3507 #endif
3508 ExtractBitMask(scratch, scratch, 0x1f * B16);
3509 cmpi(scratch, Operand(kConstantPoolRegister.code()));
3510 Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
3511 // Scratch was clobbered. Restore it.
3512 lwz(scratch, MemOperand(location));
3513 }
3514 // Get the address of the constant and patch it.
3515 andi(scratch, scratch, Operand(kImm16Mask));
3516 StorePX(new_value, MemOperand(kConstantPoolRegister, scratch));
3517 #else
3518 // This code assumes a FIXED_SEQUENCE for lis/ori
3519
3520 // At this point scratch is a lis instruction.
3521 if (emit_debug_code()) {
3522 And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16)));
3523 Cmpi(scratch, Operand(ADDIS), r0);
3524 Check(eq, kTheInstructionToPatchShouldBeALis);
3525 lwz(scratch, MemOperand(location));
3526 }
3527
3528 // insert new high word into lis instruction
3529 #if V8_TARGET_ARCH_PPC64
3530 srdi(ip, new_value, Operand(32));
3531 rlwimi(scratch, ip, 16, 16, 31);
3532 #else
3533 rlwimi(scratch, new_value, 16, 16, 31);
3534 #endif
3535
3536 stw(scratch, MemOperand(location));
3537
3538 lwz(scratch, MemOperand(location, kInstrSize));
3539 // scratch is now ori.
3540 if (emit_debug_code()) {
3541 And(scratch, scratch, Operand(kOpcodeMask));
3542 Cmpi(scratch, Operand(ORI), r0);
3543 Check(eq, kTheInstructionShouldBeAnOri);
3544 lwz(scratch, MemOperand(location, kInstrSize));
3545 }
3546
3547 // insert new low word into ori instruction
3548 #if V8_TARGET_ARCH_PPC64
3549 rlwimi(scratch, ip, 0, 16, 31);
3550 #else
3551 rlwimi(scratch, new_value, 0, 16, 31);
3552 #endif
3553 stw(scratch, MemOperand(location, kInstrSize));
3554
3555 #if V8_TARGET_ARCH_PPC64
3556 if (emit_debug_code()) {
3557 lwz(scratch, MemOperand(location, 2 * kInstrSize));
3558 // scratch is now sldi.
3559 And(scratch, scratch, Operand(kOpcodeMask | kExt5OpcodeMask));
3560 Cmpi(scratch, Operand(EXT5 | RLDICR), r0);
3561 Check(eq, kTheInstructionShouldBeASldi);
3562 }
3563
3564 lwz(scratch, MemOperand(location, 3 * kInstrSize));
3565 // scratch is now ori.
3566 if (emit_debug_code()) {
3567 And(scratch, scratch, Operand(kOpcodeMask));
3568 Cmpi(scratch, Operand(ORIS), r0);
3569 Check(eq, kTheInstructionShouldBeAnOris);
3570 lwz(scratch, MemOperand(location, 3 * kInstrSize));
3571 }
3572
3573 rlwimi(scratch, new_value, 16, 16, 31);
3574 stw(scratch, MemOperand(location, 3 * kInstrSize));
3575
3576 lwz(scratch, MemOperand(location, 4 * kInstrSize));
3577 // scratch is now ori.
3578 if (emit_debug_code()) {
3579 And(scratch, scratch, Operand(kOpcodeMask));
3580 Cmpi(scratch, Operand(ORI), r0);
3581 Check(eq, kTheInstructionShouldBeAnOri);
3582 lwz(scratch, MemOperand(location, 4 * kInstrSize));
3583 }
3584 rlwimi(scratch, new_value, 0, 16, 31);
3585 stw(scratch, MemOperand(location, 4 * kInstrSize));
3586 #endif
3587
3588 // Update the I-cache so the new lis and addic can be executed.
3589 #if V8_TARGET_ARCH_PPC64
3590 FlushICache(location, 5 * kInstrSize, scratch);
3591 #else
3592 FlushICache(location, 2 * kInstrSize, scratch);
3593 #endif
3594 #endif
3595 }
3596
3597
3598 void MacroAssembler::GetRelocatedValue(Register location, Register result,
3599 Register scratch) {
3600 lwz(result, MemOperand(location));
3601
3602 #if V8_OOL_CONSTANT_POOL
3603 if (emit_debug_code()) {
3604 // Check that the instruction sequence is a load from the constant pool
3605 #if V8_TARGET_ARCH_PPC64
3606 And(result, result, Operand(kOpcodeMask | (0x1f * B16)));
3607 Cmpi(result, Operand(ADDI), r0);
3608 Check(eq, kTheInstructionShouldBeALi);
3609 lwz(result, MemOperand(location, kInstrSize));
3610 #endif
3611 ExtractBitMask(result, result, 0x1f * B16);
3612 cmpi(result, Operand(kConstantPoolRegister.code()));
3613 Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
3614 lwz(result, MemOperand(location));
3615 }
3616 // Get the address of the constant and retrieve it.
3617 andi(result, result, Operand(kImm16Mask));
3618 LoadPX(result, MemOperand(kConstantPoolRegister, result));
3619 #else
3620 // This code assumes a FIXED_SEQUENCE for lis/ori
3621 if (emit_debug_code()) {
3622 And(result, result, Operand(kOpcodeMask | (0x1f * B16)));
3623 Cmpi(result, Operand(ADDIS), r0);
3624 Check(eq, kTheInstructionShouldBeALis);
3625 lwz(result, MemOperand(location));
3626 }
3627
3628 // result now holds a lis instruction. Extract the immediate.
3629 slwi(result, result, Operand(16));
3630
3631 lwz(scratch, MemOperand(location, kInstrSize));
3632 if (emit_debug_code()) {
3633 And(scratch, scratch, Operand(kOpcodeMask));
3634 Cmpi(scratch, Operand(ORI), r0);
3635 Check(eq, kTheInstructionShouldBeAnOri);
3636 lwz(scratch, MemOperand(location, kInstrSize));
3637 }
3638 // Copy the low 16bits from ori instruction into result
3639 rlwimi(result, scratch, 0, 16, 31);
3640
3641 #if V8_TARGET_ARCH_PPC64
3642 if (emit_debug_code()) {
3643 lwz(scratch, MemOperand(location, 2 * kInstrSize));
3644 // scratch is now sldi.
3645 And(scratch, scratch, Operand(kOpcodeMask | kExt5OpcodeMask));
3646 Cmpi(scratch, Operand(EXT5 | RLDICR), r0);
3647 Check(eq, kTheInstructionShouldBeASldi);
3648 }
3649
3650 lwz(scratch, MemOperand(location, 3 * kInstrSize));
3651 // scratch is now ori.
3652 if (emit_debug_code()) {
3653 And(scratch, scratch, Operand(kOpcodeMask));
3654 Cmpi(scratch, Operand(ORIS), r0);
3655 Check(eq, kTheInstructionShouldBeAnOris);
3656 lwz(scratch, MemOperand(location, 3 * kInstrSize));
3657 }
3658 sldi(result, result, Operand(16));
3659 rldimi(result, scratch, 0, 48);
3660
3661 lwz(scratch, MemOperand(location, 4 * kInstrSize));
3662 // scratch is now ori.
3663 if (emit_debug_code()) {
3664 And(scratch, scratch, Operand(kOpcodeMask));
3665 Cmpi(scratch, Operand(ORI), r0);
3666 Check(eq, kTheInstructionShouldBeAnOri);
3667 lwz(scratch, MemOperand(location, 4 * kInstrSize));
3668 }
3669 sldi(result, result, Operand(16));
3670 rldimi(result, scratch, 0, 48);
3671 #endif
3672 #endif
3673 }
3674
3675
3676 void MacroAssembler::CheckPageFlag(
3677 Register object,
3678 Register scratch, // scratch may be same register as object
3679 int mask, Condition cc, Label* condition_met) {
3680 DCHECK(cc == ne || cc == eq);
3681 ClearRightImm(scratch, object, Operand(kPageSizeBits));
3682 LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3683
3684 And(r0, scratch, Operand(mask), SetRC);
3685
3686 if (cc == ne) {
3687 bne(condition_met, cr0);
3688 }
3689 if (cc == eq) {
3690 beq(condition_met, cr0);
3691 }
3692 }
3693
3694
3695 void MacroAssembler::CheckMapDeprecated(Handle<Map> map, Register scratch,
3696 Label* if_deprecated) {
3697 if (map->CanBeDeprecated()) {
3698 mov(scratch, Operand(map));
3699 lwz(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
3700 ExtractBitMask(scratch, scratch, Map::Deprecated::kMask, SetRC);
3701 bne(if_deprecated, cr0);
3702 }
3703 }
3704
3705
3706 void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
3707 Register scratch1, Label* on_black) {
3708 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
3709 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
3710 }
3711
3712
3713 void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
3714 Register mask_scratch, Label* has_color,
3715 int first_bit, int second_bit) {
3716 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3717
3718 GetMarkBits(object, bitmap_scratch, mask_scratch);
3719
3720 Label other_color, word_boundary;
3721 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3722 // Test the first bit
3723 and_(r0, ip, mask_scratch, SetRC);
3724 b(first_bit == 1 ? eq : ne, &other_color, cr0);
3725 // Shift left 1
3726 // May need to load the next cell
3727 slwi(mask_scratch, mask_scratch, Operand(1), SetRC);
3728 beq(&word_boundary, cr0);
3729 // Test the second bit
3730 and_(r0, ip, mask_scratch, SetRC);
3731 b(second_bit == 1 ? ne : eq, has_color, cr0);
3732 b(&other_color);
3733
3734 bind(&word_boundary);
3735 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kIntSize));
3736 andi(r0, ip, Operand(1));
3737 b(second_bit == 1 ? ne : eq, has_color, cr0);
3738 bind(&other_color);
3739 }
3740
3741
3742 // Detect some, but not all, common pointer-free objects. This is used by the
3743 // incremental write barrier which doesn't care about oddballs (they are always
3744 // marked black immediately so this code is not hit).
3745 void MacroAssembler::JumpIfDataObject(Register value, Register scratch,
3746 Label* not_data_object) {
3747 Label is_data_object;
3748 LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
3749 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
3750 beq(&is_data_object);
3751 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3752 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3753 // If it's a string and it's not a cons string then it's an object containing
3754 // no GC pointers.
3755 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3756 STATIC_ASSERT((kIsIndirectStringMask | kIsNotStringMask) == 0x81);
3757 andi(scratch, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
3758 bne(not_data_object, cr0);
3759 bind(&is_data_object);
3760 }
3761
3762
3763 void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
3764 Register mask_reg) {
3765 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3766 DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
3767 lis(r0, Operand((~Page::kPageAlignmentMask >> 16)));
3768 and_(bitmap_reg, addr_reg, r0);
3769 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3770 ExtractBitRange(mask_reg, addr_reg, kLowBits - 1, kPointerSizeLog2);
3771 ExtractBitRange(ip, addr_reg, kPageSizeBits - 1, kLowBits);
3772 ShiftLeftImm(ip, ip, Operand(Bitmap::kBytesPerCellLog2));
3773 add(bitmap_reg, bitmap_reg, ip);
3774 li(ip, Operand(1));
3775 slw(mask_reg, ip, mask_reg);
3776 }
3777
3778
3779 void MacroAssembler::EnsureNotWhite(Register value, Register bitmap_scratch,
3780 Register mask_scratch,
3781 Register load_scratch,
3782 Label* value_is_white_and_not_data) {
3783 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3784 GetMarkBits(value, bitmap_scratch, mask_scratch);
3785
3786 // If the value is black or grey we don't need to do anything.
3787 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3788 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
3789 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
3790 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3791
3792 Label done;
3793
3794 // Since both black and grey have a 1 in the first position and white does
3795 // not have a 1 there we only need to check one bit.
3796 lwz(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3797 and_(r0, mask_scratch, load_scratch, SetRC);
3798 bne(&done, cr0);
3799
3800 if (emit_debug_code()) {
3801 // Check for impossible bit pattern.
3802 Label ok;
3803 // LSL may overflow, making the check conservative.
3804 slwi(r0, mask_scratch, Operand(1));
3805 and_(r0, load_scratch, r0, SetRC);
3806 beq(&ok, cr0);
3807 stop("Impossible marking bit pattern");
3808 bind(&ok);
3809 }
3810
3811 // Value is white. We check whether it is data that doesn't need scanning.
3812 // Currently only checks for HeapNumber and non-cons strings.
3813 Register map = load_scratch; // Holds map while checking type.
3814 Register length = load_scratch; // Holds length of object after testing type.
3815 Label is_data_object, maybe_string_object, is_string_object, is_encoded;
3816 #if V8_TARGET_ARCH_PPC64
3817 Label length_computed;
3818 #endif
3819
3820
3821 // Check for heap-number
3822 LoadP(map, FieldMemOperand(value, HeapObject::kMapOffset));
3823 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
3824 bne(&maybe_string_object);
3825 li(length, Operand(HeapNumber::kSize));
3826 b(&is_data_object);
3827 bind(&maybe_string_object);
3828
3829 // Check for strings.
3830 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3831 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3832 // If it's a string and it's not a cons string then it's an object containing
3833 // no GC pointers.
3834 Register instance_type = load_scratch;
3835 lbz(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
3836 andi(r0, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
3837 bne(value_is_white_and_not_data, cr0);
3838 // It's a non-indirect (non-cons and non-slice) string.
3839 // If it's external, the length is just ExternalString::kSize.
3840 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
3841 // External strings are the only ones with the kExternalStringTag bit
3842 // set.
3843 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
3844 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
3845 andi(r0, instance_type, Operand(kExternalStringTag));
3846 beq(&is_string_object, cr0);
3847 li(length, Operand(ExternalString::kSize));
3848 b(&is_data_object);
3849 bind(&is_string_object);
3850
3851 // Sequential string, either Latin1 or UC16.
3852 // For Latin1 (char-size of 1) we untag the smi to get the length.
3853 // For UC16 (char-size of 2):
3854 // - (32-bit) we just leave the smi tag in place, thereby getting
3855 // the length multiplied by 2.
3856 // - (64-bit) we compute the offset in the 2-byte array
3857 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
3858 LoadP(ip, FieldMemOperand(value, String::kLengthOffset));
3859 andi(r0, instance_type, Operand(kStringEncodingMask));
3860 beq(&is_encoded, cr0);
3861 SmiUntag(ip);
3862 #if V8_TARGET_ARCH_PPC64
3863 b(&length_computed);
3864 #endif
3865 bind(&is_encoded);
3866 #if V8_TARGET_ARCH_PPC64
3867 SmiToShortArrayOffset(ip, ip);
3868 bind(&length_computed);
3869 #else
3870 DCHECK(kSmiShift == 1);
3871 #endif
3872 addi(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
3873 li(r0, Operand(~kObjectAlignmentMask));
3874 and_(length, length, r0);
3875
3876 bind(&is_data_object);
3877 // Value is a data object, and it is white. Mark it black. Since we know
3878 // that the object is white we can make it black by flipping one bit.
3879 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3880 orx(ip, ip, mask_scratch);
3881 stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3882
3883 mov(ip, Operand(~Page::kPageAlignmentMask));
3884 and_(bitmap_scratch, bitmap_scratch, ip);
3885 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3886 add(ip, ip, length);
3887 stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3888
3889 bind(&done);
3890 }
3891
3892
3893 // Saturate a value into 8-bit unsigned integer
3894 // if input_value < 0, output_value is 0
3895 // if input_value > 255, output_value is 255
3896 // otherwise output_value is the input_value
3897 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3898 Label done, negative_label, overflow_label;
3899 int satval = (1 << 8) - 1;
3900
3901 cmpi(input_reg, Operand::Zero());
3902 blt(&negative_label);
3903
3904 cmpi(input_reg, Operand(satval));
3905 bgt(&overflow_label);
3906 if (!output_reg.is(input_reg)) {
3907 mr(output_reg, input_reg);
3908 }
3909 b(&done);
3910
3911 bind(&negative_label);
3912 li(output_reg, Operand::Zero()); // set to 0 if negative
3913 b(&done);
3914
3915
3916 bind(&overflow_label); // set to satval if > satval
3917 li(output_reg, Operand(satval));
3918
3919 bind(&done);
3920 }
3921
3922
3923 void MacroAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
3924
3925
3926 void MacroAssembler::ResetRoundingMode() {
3927 mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest)
3928 }
3929
3930
3931 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3932 DoubleRegister input_reg,
3933 DoubleRegister double_scratch) {
3934 Label above_zero;
3935 Label done;
3936 Label in_bounds;
3937
3938 LoadDoubleLiteral(double_scratch, 0.0, result_reg);
3939 fcmpu(input_reg, double_scratch);
3940 bgt(&above_zero);
3941
3942 // Double value is less than zero, NaN or Inf, return 0.
3943 LoadIntLiteral(result_reg, 0);
3944 b(&done);
3945
3946 // Double value is >= 255, return 255.
3947 bind(&above_zero);
3948 LoadDoubleLiteral(double_scratch, 255.0, result_reg);
3949 fcmpu(input_reg, double_scratch);
3950 ble(&in_bounds);
3951 LoadIntLiteral(result_reg, 255);
3952 b(&done);
3953
3954 // In 0-255 range, round and truncate.
3955 bind(&in_bounds);
3956
3957 // round to nearest (default rounding mode)
3958 fctiw(double_scratch, input_reg);
3959 MovDoubleLowToInt(result_reg, double_scratch);
3960 bind(&done);
3961 }
3962
3963
3964 void MacroAssembler::LoadInstanceDescriptors(Register map,
3965 Register descriptors) {
3966 LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3967 }
3968
3969
3970 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3971 lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
3972 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3973 }
3974
3975
3976 void MacroAssembler::EnumLength(Register dst, Register map) {
3977 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3978 lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
3979 ExtractBitMask(dst, dst, Map::EnumLengthBits::kMask);
3980 SmiTag(dst);
3981 }
3982
3983
3984 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
3985 Register empty_fixed_array_value = r9;
3986 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3987 Label next, start;
3988 mr(r5, r3);
3989
3990 // Check if the enum length field is properly initialized, indicating that
3991 // there is an enum cache.
3992 LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
3993
3994 EnumLength(r6, r4);
3995 CmpSmiLiteral(r6, Smi::FromInt(kInvalidEnumCacheSentinel), r0);
3996 beq(call_runtime);
3997
3998 b(&start);
3999
4000 bind(&next);
4001 LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
4002
4003 // For all objects but the receiver, check that the cache is empty.
4004 EnumLength(r6, r4);
4005 CmpSmiLiteral(r6, Smi::FromInt(0), r0);
4006 bne(call_runtime);
4007
4008 bind(&start);
4009
4010 // Check that there are no elements. Register r5 contains the current JS
4011 // object we've reached through the prototype chain.
4012 Label no_elements;
4013 LoadP(r5, FieldMemOperand(r5, JSObject::kElementsOffset));
4014 cmp(r5, empty_fixed_array_value);
4015 beq(&no_elements);
4016
4017 // Second chance, the object may be using the empty slow element dictionary.
4018 CompareRoot(r5, Heap::kEmptySlowElementDictionaryRootIndex);
4019 bne(call_runtime);
4020
4021 bind(&no_elements);
4022 LoadP(r5, FieldMemOperand(r4, Map::kPrototypeOffset));
4023 cmp(r5, null_value);
4024 bne(&next);
4025 }
4026
4027
4028 ////////////////////////////////////////////////////////////////////////////////
4029 //
4030 // New MacroAssembler Interfaces added for PPC
4031 //
4032 ////////////////////////////////////////////////////////////////////////////////
4033 void MacroAssembler::LoadIntLiteral(Register dst, int value) {
4034 mov(dst, Operand(value));
4035 }
4036
4037
4038 void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
4039 mov(dst, Operand(smi));
4040 }
4041
4042
4043 void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
4044 Register scratch) {
4045 #if V8_OOL_CONSTANT_POOL
4046 // TODO(mbrandy): enable extended constant pool usage for doubles.
4047 // See ARM commit e27ab337 for a reference.
4048 if (is_ool_constant_pool_available() && !is_constant_pool_full()) {
4049 RelocInfo rinfo(pc_, value);
4050 ConstantPoolAddEntry(rinfo);
4051 #if V8_TARGET_ARCH_PPC64
4052 // We use 2 instruction sequence here for consistency with mov.
4053 li(scratch, Operand::Zero());
4054 lfdx(result, MemOperand(kConstantPoolRegister, scratch));
4055 #else
4056 lfd(result, MemOperand(kConstantPoolRegister, 0));
4057 #endif
4058 return;
4059 }
4060 #endif
4061
4062 // avoid gcc strict aliasing error using union cast
4063 union {
4064 double dval;
4065 #if V8_TARGET_ARCH_PPC64
4066 intptr_t ival;
4067 #else
4068 intptr_t ival[2];
4069 #endif
4070 } litVal;
4071
4072 litVal.dval = value;
4073
4074 #if V8_TARGET_ARCH_PPC64
4075 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4076 mov(scratch, Operand(litVal.ival));
4077 mtfprd(result, scratch);
4078 return;
4079 }
4080 #endif
4081
4082 addi(sp, sp, Operand(-kDoubleSize));
4083 #if V8_TARGET_ARCH_PPC64
4084 mov(scratch, Operand(litVal.ival));
4085 std(scratch, MemOperand(sp));
4086 #else
4087 LoadIntLiteral(scratch, litVal.ival[0]);
4088 stw(scratch, MemOperand(sp, 0));
4089 LoadIntLiteral(scratch, litVal.ival[1]);
4090 stw(scratch, MemOperand(sp, 4));
4091 #endif
4092 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4093 lfd(result, MemOperand(sp, 0));
4094 addi(sp, sp, Operand(kDoubleSize));
4095 }
4096
4097
4098 void MacroAssembler::MovIntToDouble(DoubleRegister dst, Register src,
4099 Register scratch) {
4100 // sign-extend src to 64-bit
4101 #if V8_TARGET_ARCH_PPC64
4102 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4103 mtfprwa(dst, src);
4104 return;
4105 }
4106 #endif
4107
4108 DCHECK(!src.is(scratch));
4109 subi(sp, sp, Operand(kDoubleSize));
4110 #if V8_TARGET_ARCH_PPC64
4111 extsw(scratch, src);
4112 std(scratch, MemOperand(sp, 0));
4113 #else
4114 srawi(scratch, src, 31);
4115 stw(scratch, MemOperand(sp, Register::kExponentOffset));
4116 stw(src, MemOperand(sp, Register::kMantissaOffset));
4117 #endif
4118 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4119 lfd(dst, MemOperand(sp, 0));
4120 addi(sp, sp, Operand(kDoubleSize));
4121 }
4122
4123
4124 void MacroAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
4125 Register scratch) {
4126 // zero-extend src to 64-bit
4127 #if V8_TARGET_ARCH_PPC64
4128 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4129 mtfprwz(dst, src);
4130 return;
4131 }
4132 #endif
4133
4134 DCHECK(!src.is(scratch));
4135 subi(sp, sp, Operand(kDoubleSize));
4136 #if V8_TARGET_ARCH_PPC64
4137 clrldi(scratch, src, Operand(32));
4138 std(scratch, MemOperand(sp, 0));
4139 #else
4140 li(scratch, Operand::Zero());
4141 stw(scratch, MemOperand(sp, Register::kExponentOffset));
4142 stw(src, MemOperand(sp, Register::kMantissaOffset));
4143 #endif
4144 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4145 lfd(dst, MemOperand(sp, 0));
4146 addi(sp, sp, Operand(kDoubleSize));
4147 }
4148
4149
4150 void MacroAssembler::MovInt64ToDouble(DoubleRegister dst,
4151 #if !V8_TARGET_ARCH_PPC64
4152 Register src_hi,
4153 #endif
4154 Register src) {
4155 #if V8_TARGET_ARCH_PPC64
4156 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4157 mtfprd(dst, src);
4158 return;
4159 }
4160 #endif
4161
4162 subi(sp, sp, Operand(kDoubleSize));
4163 #if V8_TARGET_ARCH_PPC64
4164 std(src, MemOperand(sp, 0));
4165 #else
4166 stw(src_hi, MemOperand(sp, Register::kExponentOffset));
4167 stw(src, MemOperand(sp, Register::kMantissaOffset));
4168 #endif
4169 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4170 lfd(dst, MemOperand(sp, 0));
4171 addi(sp, sp, Operand(kDoubleSize));
4172 }
4173
4174
4175 #if V8_TARGET_ARCH_PPC64
4176 void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
4177 Register src_hi,
4178 Register src_lo,
4179 Register scratch) {
4180 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4181 sldi(scratch, src_hi, Operand(32));
4182 rldimi(scratch, src_lo, 0, 32);
4183 mtfprd(dst, scratch);
4184 return;
4185 }
4186
4187 subi(sp, sp, Operand(kDoubleSize));
4188 stw(src_hi, MemOperand(sp, Register::kExponentOffset));
4189 stw(src_lo, MemOperand(sp, Register::kMantissaOffset));
4190 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4191 lfd(dst, MemOperand(sp));
4192 addi(sp, sp, Operand(kDoubleSize));
4193 }
4194 #endif
4195
4196
4197 void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
4198 #if V8_TARGET_ARCH_PPC64
4199 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4200 mffprwz(dst, src);
4201 return;
4202 }
4203 #endif
4204
4205 subi(sp, sp, Operand(kDoubleSize));
4206 stfd(src, MemOperand(sp));
4207 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4208 lwz(dst, MemOperand(sp, Register::kMantissaOffset));
4209 addi(sp, sp, Operand(kDoubleSize));
4210 }
4211
4212
4213 void MacroAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
4214 #if V8_TARGET_ARCH_PPC64
4215 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4216 mffprd(dst, src);
4217 srdi(dst, dst, Operand(32));
4218 return;
4219 }
4220 #endif
4221
4222 subi(sp, sp, Operand(kDoubleSize));
4223 stfd(src, MemOperand(sp));
4224 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4225 lwz(dst, MemOperand(sp, Register::kExponentOffset));
4226 addi(sp, sp, Operand(kDoubleSize));
4227 }
4228
4229
4230 void MacroAssembler::MovDoubleToInt64(
4231 #if !V8_TARGET_ARCH_PPC64
4232 Register dst_hi,
4233 #endif
4234 Register dst, DoubleRegister src) {
4235 #if V8_TARGET_ARCH_PPC64
4236 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4237 mffprd(dst, src);
4238 return;
4239 }
4240 #endif
4241
4242 subi(sp, sp, Operand(kDoubleSize));
4243 stfd(src, MemOperand(sp));
4244 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4245 #if V8_TARGET_ARCH_PPC64
4246 ld(dst, MemOperand(sp, 0));
4247 #else
4248 lwz(dst_hi, MemOperand(sp, Register::kExponentOffset));
4249 lwz(dst, MemOperand(sp, Register::kMantissaOffset));
4250 #endif
4251 addi(sp, sp, Operand(kDoubleSize));
4252 }
4253
4254
4255 void MacroAssembler::Add(Register dst, Register src, intptr_t value,
4256 Register scratch) {
4257 if (is_int16(value)) {
4258 addi(dst, src, Operand(value));
4259 } else {
4260 mov(scratch, Operand(value));
4261 add(dst, src, scratch);
4262 }
4263 }
4264
4265
4266 void MacroAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
4267 CRegister cr) {
4268 intptr_t value = src2.immediate();
4269 if (is_int16(value)) {
4270 cmpi(src1, src2, cr);
4271 } else {
4272 mov(scratch, src2);
4273 cmp(src1, scratch, cr);
4274 }
4275 }
4276
4277
4278 void MacroAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
4279 CRegister cr) {
4280 intptr_t value = src2.immediate();
4281 if (is_uint16(value)) {
4282 cmpli(src1, src2, cr);
4283 } else {
4284 mov(scratch, src2);
4285 cmpl(src1, scratch, cr);
4286 }
4287 }
4288
4289
4290 void MacroAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
4291 CRegister cr) {
4292 intptr_t value = src2.immediate();
4293 if (is_int16(value)) {
4294 cmpwi(src1, src2, cr);
4295 } else {
4296 mov(scratch, src2);
4297 cmpw(src1, scratch, cr);
4298 }
4299 }
4300
4301
4302 void MacroAssembler::Cmplwi(Register src1, const Operand& src2,
4303 Register scratch, CRegister cr) {
4304 intptr_t value = src2.immediate();
4305 if (is_uint16(value)) {
4306 cmplwi(src1, src2, cr);
4307 } else {
4308 mov(scratch, src2);
4309 cmplw(src1, scratch, cr);
4310 }
4311 }
4312
4313
4314 void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
4315 RCBit rc) {
4316 if (rb.is_reg()) {
4317 and_(ra, rs, rb.rm(), rc);
4318 } else {
4319 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == SetRC) {
4320 andi(ra, rs, rb);
4321 } else {
4322 // mov handles the relocation.
4323 DCHECK(!rs.is(r0));
4324 mov(r0, rb);
4325 and_(ra, rs, r0, rc);
4326 }
4327 }
4328 }
4329
4330
4331 void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
4332 if (rb.is_reg()) {
4333 orx(ra, rs, rb.rm(), rc);
4334 } else {
4335 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
4336 ori(ra, rs, rb);
4337 } else {
4338 // mov handles the relocation.
4339 DCHECK(!rs.is(r0));
4340 mov(r0, rb);
4341 orx(ra, rs, r0, rc);
4342 }
4343 }
4344 }
4345
4346
4347 void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
4348 RCBit rc) {
4349 if (rb.is_reg()) {
4350 xor_(ra, rs, rb.rm(), rc);
4351 } else {
4352 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
4353 xori(ra, rs, rb);
4354 } else {
4355 // mov handles the relocation.
4356 DCHECK(!rs.is(r0));
4357 mov(r0, rb);
4358 xor_(ra, rs, r0, rc);
4359 }
4360 }
4361 }
4362
4363
4364 void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
4365 CRegister cr) {
4366 #if V8_TARGET_ARCH_PPC64
4367 LoadSmiLiteral(scratch, smi);
4368 cmp(src1, scratch, cr);
4369 #else
4370 Cmpi(src1, Operand(smi), scratch, cr);
4371 #endif
4372 }
4373
4374
4375 void MacroAssembler::CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
4376 CRegister cr) {
4377 #if V8_TARGET_ARCH_PPC64
4378 LoadSmiLiteral(scratch, smi);
4379 cmpl(src1, scratch, cr);
4380 #else
4381 Cmpli(src1, Operand(smi), scratch, cr);
4382 #endif
4383 }
4384
4385
4386 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
4387 Register scratch) {
4388 #if V8_TARGET_ARCH_PPC64
4389 LoadSmiLiteral(scratch, smi);
4390 add(dst, src, scratch);
4391 #else
4392 Add(dst, src, reinterpret_cast<intptr_t>(smi), scratch);
4393 #endif
4394 }
4395
4396
4397 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
4398 Register scratch) {
4399 #if V8_TARGET_ARCH_PPC64
4400 LoadSmiLiteral(scratch, smi);
4401 sub(dst, src, scratch);
4402 #else
4403 Add(dst, src, -(reinterpret_cast<intptr_t>(smi)), scratch);
4404 #endif
4405 }
4406
4407
4408 void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi,
4409 Register scratch, RCBit rc) {
4410 #if V8_TARGET_ARCH_PPC64
4411 LoadSmiLiteral(scratch, smi);
4412 and_(dst, src, scratch, rc);
4413 #else
4414 And(dst, src, Operand(smi), rc);
4415 #endif
4416 }
4417
4418
4419 // Load a "pointer" sized value from the memory location
4420 void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
4421 Register scratch) {
4422 int offset = mem.offset();
4423
4424 if (!scratch.is(no_reg) && !is_int16(offset)) {
4425 /* cannot use d-form */
4426 LoadIntLiteral(scratch, offset);
4427 #if V8_TARGET_ARCH_PPC64
4428 ldx(dst, MemOperand(mem.ra(), scratch));
4429 #else
4430 lwzx(dst, MemOperand(mem.ra(), scratch));
4431 #endif
4432 } else {
4433 #if V8_TARGET_ARCH_PPC64
4434 int misaligned = (offset & 3);
4435 if (misaligned) {
4436 // adjust base to conform to offset alignment requirements
4437 // Todo: enhance to use scratch if dst is unsuitable
4438 DCHECK(!dst.is(r0));
4439 addi(dst, mem.ra(), Operand((offset & 3) - 4));
4440 ld(dst, MemOperand(dst, (offset & ~3) + 4));
4441 } else {
4442 ld(dst, mem);
4443 }
4444 #else
4445 lwz(dst, mem);
4446 #endif
4447 }
4448 }
4449
4450
4451 // Store a "pointer" sized value to the memory location
4452 void MacroAssembler::StoreP(Register src, const MemOperand& mem,
4453 Register scratch) {
4454 int offset = mem.offset();
4455
4456 if (!scratch.is(no_reg) && !is_int16(offset)) {
4457 /* cannot use d-form */
4458 LoadIntLiteral(scratch, offset);
4459 #if V8_TARGET_ARCH_PPC64
4460 stdx(src, MemOperand(mem.ra(), scratch));
4461 #else
4462 stwx(src, MemOperand(mem.ra(), scratch));
4463 #endif
4464 } else {
4465 #if V8_TARGET_ARCH_PPC64
4466 int misaligned = (offset & 3);
4467 if (misaligned) {
4468 // adjust base to conform to offset alignment requirements
4469 // a suitable scratch is required here
4470 DCHECK(!scratch.is(no_reg));
4471 if (scratch.is(r0)) {
4472 LoadIntLiteral(scratch, offset);
4473 stdx(src, MemOperand(mem.ra(), scratch));
4474 } else {
4475 addi(scratch, mem.ra(), Operand((offset & 3) - 4));
4476 std(src, MemOperand(scratch, (offset & ~3) + 4));
4477 }
4478 } else {
4479 std(src, mem);
4480 }
4481 #else
4482 stw(src, mem);
4483 #endif
4484 }
4485 }
4486
4487 void MacroAssembler::LoadWordArith(Register dst, const MemOperand& mem,
4488 Register scratch) {
4489 int offset = mem.offset();
4490
4491 if (!scratch.is(no_reg) && !is_int16(offset)) {
4492 /* cannot use d-form */
4493 LoadIntLiteral(scratch, offset);
4494 #if V8_TARGET_ARCH_PPC64
4495 // lwax(dst, MemOperand(mem.ra(), scratch));
4496 DCHECK(0); // lwax not yet implemented
4497 #else
4498 lwzx(dst, MemOperand(mem.ra(), scratch));
4499 #endif
4500 } else {
4501 #if V8_TARGET_ARCH_PPC64
4502 int misaligned = (offset & 3);
4503 if (misaligned) {
4504 // adjust base to conform to offset alignment requirements
4505 // Todo: enhance to use scratch if dst is unsuitable
4506 DCHECK(!dst.is(r0));
4507 addi(dst, mem.ra(), Operand((offset & 3) - 4));
4508 lwa(dst, MemOperand(dst, (offset & ~3) + 4));
4509 } else {
4510 lwa(dst, mem);
4511 }
4512 #else
4513 lwz(dst, mem);
4514 #endif
4515 }
4516 }
4517
4518
4519 // Variable length depending on whether offset fits into immediate field
4520 // MemOperand currently only supports d-form
4521 void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
4522 Register scratch) {
4523 Register base = mem.ra();
4524 int offset = mem.offset();
4525
4526 if (!is_int16(offset)) {
4527 LoadIntLiteral(scratch, offset);
4528 lwzx(dst, MemOperand(base, scratch));
4529 } else {
4530 lwz(dst, mem);
4531 }
4532 }
4533
4534
4535 // Variable length depending on whether offset fits into immediate field
4536 // MemOperand current only supports d-form
4537 void MacroAssembler::StoreWord(Register src, const MemOperand& mem,
4538 Register scratch) {
4539 Register base = mem.ra();
4540 int offset = mem.offset();
4541
4542 if (!is_int16(offset)) {
4543 LoadIntLiteral(scratch, offset);
4544 stwx(src, MemOperand(base, scratch));
4545 } else {
4546 stw(src, mem);
4547 }
4548 }
4549
4550
4551 // Variable length depending on whether offset fits into immediate field
4552 // MemOperand currently only supports d-form
4553 void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
4554 Register scratch) {
4555 Register base = mem.ra();
4556 int offset = mem.offset();
4557
4558 if (!is_int16(offset)) {
4559 LoadIntLiteral(scratch, offset);
4560 lhzx(dst, MemOperand(base, scratch));
4561 } else {
4562 lhz(dst, mem);
4563 }
4564 }
4565
4566
4567 // Variable length depending on whether offset fits into immediate field
4568 // MemOperand current only supports d-form
4569 void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
4570 Register scratch) {
4571 Register base = mem.ra();
4572 int offset = mem.offset();
4573
4574 if (!is_int16(offset)) {
4575 LoadIntLiteral(scratch, offset);
4576 sthx(src, MemOperand(base, scratch));
4577 } else {
4578 sth(src, mem);
4579 }
4580 }
4581
4582
4583 // Variable length depending on whether offset fits into immediate field
4584 // MemOperand currently only supports d-form
4585 void MacroAssembler::LoadByte(Register dst, const MemOperand& mem,
4586 Register scratch) {
4587 Register base = mem.ra();
4588 int offset = mem.offset();
4589
4590 if (!is_int16(offset)) {
4591 LoadIntLiteral(scratch, offset);
4592 lbzx(dst, MemOperand(base, scratch));
4593 } else {
4594 lbz(dst, mem);
4595 }
4596 }
4597
4598
4599 // Variable length depending on whether offset fits into immediate field
4600 // MemOperand current only supports d-form
4601 void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
4602 Register scratch) {
4603 Register base = mem.ra();
4604 int offset = mem.offset();
4605
4606 if (!is_int16(offset)) {
4607 LoadIntLiteral(scratch, offset);
4608 stbx(src, MemOperand(base, scratch));
4609 } else {
4610 stb(src, mem);
4611 }
4612 }
4613
4614
4615 void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
4616 Representation r, Register scratch) {
4617 DCHECK(!r.IsDouble());
4618 if (r.IsInteger8()) {
4619 LoadByte(dst, mem, scratch);
4620 extsb(dst, dst);
4621 } else if (r.IsUInteger8()) {
4622 LoadByte(dst, mem, scratch);
4623 } else if (r.IsInteger16()) {
4624 LoadHalfWord(dst, mem, scratch);
4625 extsh(dst, dst);
4626 } else if (r.IsUInteger16()) {
4627 LoadHalfWord(dst, mem, scratch);
4628 #if V8_TARGET_ARCH_PPC64
4629 } else if (r.IsInteger32()) {
4630 LoadWord(dst, mem, scratch);
4631 #endif
4632 } else {
4633 LoadP(dst, mem, scratch);
4634 }
4635 }
4636
4637
4638 void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
4639 Representation r, Register scratch) {
4640 DCHECK(!r.IsDouble());
4641 if (r.IsInteger8() || r.IsUInteger8()) {
4642 StoreByte(src, mem, scratch);
4643 } else if (r.IsInteger16() || r.IsUInteger16()) {
4644 StoreHalfWord(src, mem, scratch);
4645 #if V8_TARGET_ARCH_PPC64
4646 } else if (r.IsInteger32()) {
4647 StoreWord(src, mem, scratch);
4648 #endif
4649 } else {
4650 if (r.IsHeapObject()) {
4651 AssertNotSmi(src);
4652 } else if (r.IsSmi()) {
4653 AssertSmi(src);
4654 }
4655 StoreP(src, mem, scratch);
4656 }
4657 }
4658
4659
4660 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
4661 Register scratch_reg,
4662 Label* no_memento_found) {
4663 ExternalReference new_space_start =
4664 ExternalReference::new_space_start(isolate());
4665 ExternalReference new_space_allocation_top =
4666 ExternalReference::new_space_allocation_top_address(isolate());
4667 addi(scratch_reg, receiver_reg,
4668 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
4669 Cmpi(scratch_reg, Operand(new_space_start), r0);
4670 blt(no_memento_found);
4671 mov(ip, Operand(new_space_allocation_top));
4672 LoadP(ip, MemOperand(ip));
4673 cmp(scratch_reg, ip);
4674 bgt(no_memento_found);
4675 LoadP(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
4676 Cmpi(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()),
4677 r0);
4678 }
4679
4680
4681 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
4682 Register reg4, Register reg5,
4683 Register reg6) {
4684 RegList regs = 0;
4685 if (reg1.is_valid()) regs |= reg1.bit();
4686 if (reg2.is_valid()) regs |= reg2.bit();
4687 if (reg3.is_valid()) regs |= reg3.bit();
4688 if (reg4.is_valid()) regs |= reg4.bit();
4689 if (reg5.is_valid()) regs |= reg5.bit();
4690 if (reg6.is_valid()) regs |= reg6.bit();
4691
4692 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
4693 Register candidate = Register::FromAllocationIndex(i);
4694 if (regs & candidate.bit()) continue;
4695 return candidate;
4696 }
4697 UNREACHABLE();
4698 return no_reg;
4699 }
4700
4701
4702 void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
4703 Register scratch0,
4704 Register scratch1,
4705 Label* found) {
4706 DCHECK(!scratch1.is(scratch0));
4707 Factory* factory = isolate()->factory();
4708 Register current = scratch0;
4709 Label loop_again;
4710
4711 // scratch contained elements pointer.
4712 mr(current, object);
4713
4714 // Loop based on the map going up the prototype chain.
4715 bind(&loop_again);
4716 LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
4717 lbz(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4718 DecodeField<Map::ElementsKindBits>(scratch1);
4719 cmpi(scratch1, Operand(DICTIONARY_ELEMENTS));
4720 beq(found);
4721 LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
4722 Cmpi(current, Operand(factory->null_value()), r0);
4723 bne(&loop_again);
4724 }
4725
4726
4727 #ifdef DEBUG
4728 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
4729 Register reg5, Register reg6, Register reg7, Register reg8) {
4730 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
4731 reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
4732 reg7.is_valid() + reg8.is_valid();
4733
4734 RegList regs = 0;
4735 if (reg1.is_valid()) regs |= reg1.bit();
4736 if (reg2.is_valid()) regs |= reg2.bit();
4737 if (reg3.is_valid()) regs |= reg3.bit();
4738 if (reg4.is_valid()) regs |= reg4.bit();
4739 if (reg5.is_valid()) regs |= reg5.bit();
4740 if (reg6.is_valid()) regs |= reg6.bit();
4741 if (reg7.is_valid()) regs |= reg7.bit();
4742 if (reg8.is_valid()) regs |= reg8.bit();
4743 int n_of_non_aliasing_regs = NumRegs(regs);
4744
4745 return n_of_valid_regs != n_of_non_aliasing_regs;
4746 }
4747 #endif
4748
4749
4750 CodePatcher::CodePatcher(byte* address, int instructions,
4751 FlushICache flush_cache)
4752 : address_(address),
4753 size_(instructions * Assembler::kInstrSize),
4754 masm_(NULL, address, size_ + Assembler::kGap),
4755 flush_cache_(flush_cache) {
4756 // Create a new macro assembler pointing to the address of the code to patch.
4757 // The size is adjusted with kGap on order for the assembler to generate size
4758 // bytes of instructions without failing with buffer size constraints.
4759 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4760 }
4761
4762
4763 CodePatcher::~CodePatcher() {
4764 // Indicate that code has changed.
4765 if (flush_cache_ == FLUSH) {
4766 CpuFeatures::FlushICache(address_, size_);
4767 }
4768
4769 // Check that the code was patched as expected.
4770 DCHECK(masm_.pc_ == address_ + size_);
4771 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4772 }
4773
4774
4775 void CodePatcher::Emit(Instr instr) { masm()->emit(instr); }
4776
4777
4778 void CodePatcher::EmitCondition(Condition cond) {
4779 Instr instr = Assembler::instr_at(masm_.pc_);
4780 switch (cond) {
4781 case eq:
4782 instr = (instr & ~kCondMask) | BT;
4783 break;
4784 case ne:
4785 instr = (instr & ~kCondMask) | BF;
4786 break;
4787 default:
4788 UNIMPLEMENTED();
4789 }
4790 masm_.emit(instr);
4791 }
4792
4793
4794 void MacroAssembler::TruncatingDiv(Register result, Register dividend,
4795 int32_t divisor) {
4796 DCHECK(!dividend.is(result));
4797 DCHECK(!dividend.is(r0));
4798 DCHECK(!result.is(r0));
4799 base::MagicNumbersForDivision<uint32_t> mag =
4800 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
4801 mov(r0, Operand(mag.multiplier));
4802 mulhw(result, dividend, r0);
4803 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
4804 if (divisor > 0 && neg) {
4805 add(result, result, dividend);
4806 }
4807 if (divisor < 0 && !neg && mag.multiplier > 0) {
4808 sub(result, result, dividend);
4809 }
4810 if (mag.shift > 0) srawi(result, result, mag.shift);
4811 ExtractBit(r0, dividend, 31);
4812 add(result, result, r0);
4813 }
4814
4815 } // namespace internal
4816 } // namespace v8
4817
4818 #endif // V8_TARGET_ARCH_PPC
OLDNEW
« no previous file with comments | « src/ppc/macro-assembler-ppc.h ('k') | src/ppc/regexp-macro-assembler-ppc.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698