Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(1)

Side by Side Diff: src/ppc/macro-assembler-ppc.cc

Issue 422063005: Contribution of PowerPC port. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: re-upload - catch up to 8/19 level Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 //
3 // Copyright IBM Corp. 2012, 2013. All rights reserved.
4 //
5 // Use of this source code is governed by a BSD-style license that can be
6 // found in the LICENSE file.
7
8 #include <assert.h> // For assert
9 #include <limits.h> // For LONG_MIN, LONG_MAX.
10
11 #include "src/v8.h"
12
13 #if V8_TARGET_ARCH_PPC
14
15 #include "src/bootstrapper.h"
16 #include "src/codegen.h"
17 #include "src/cpu-profiler.h"
18 #include "src/debug.h"
19 #include "src/isolate-inl.h"
20 #include "src/runtime.h"
21
22 namespace v8 {
23 namespace internal {
24
25 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
26 : Assembler(arg_isolate, buffer, size),
27 generating_stub_(false),
28 has_frame_(false) {
29 if (isolate() != NULL) {
30 code_object_ =
31 Handle<Object>(isolate()->heap()->undefined_value(), isolate());
32 }
33 }
34
35
36 void MacroAssembler::Jump(Register target, Condition cond) {
37 DCHECK(cond == al);
38 mtctr(target);
39 bctr();
40 }
41
42
43 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
44 Condition cond, CRegister cr) {
45 Label skip;
46
47 if (cond != al) b(NegateCondition(cond), &skip, cr);
48
49 DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
50
51 mov(r0, Operand(target, rmode));
52 mtctr(r0);
53 bctr();
54
55 bind(&skip);
56 // mov(pc, Operand(target, rmode), LeaveCC, cond);
57 }
58
59
60 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
61 CRegister cr) {
62 DCHECK(!RelocInfo::IsCodeTarget(rmode));
63 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr);
64 }
65
66
67 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
68 Condition cond) {
69 DCHECK(RelocInfo::IsCodeTarget(rmode));
70 // 'code' is always generated ppc code, never THUMB code
71 AllowDeferredHandleDereference embedding_raw_address;
72 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
73 }
74
75
76 int MacroAssembler::CallSize(Register target, Condition cond) {
77 return 2 * kInstrSize;
78 }
79
80
81 void MacroAssembler::Call(Register target, Condition cond) {
82 BlockTrampolinePoolScope block_trampoline_pool(this);
83 Label start;
84 bind(&start);
85 DCHECK(cond == al); // in prep of removal of condition
86
87 // Statement positions are expected to be recorded when the target
88 // address is loaded.
89 positions_recorder()->WriteRecordedPositions();
90
91 // branch via link register and set LK bit for return point
92 mtlr(target);
93 bclr(BA, SetLK);
94
95 DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
96 }
97
98
99 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
100 Condition cond) {
101 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
102 return (2 + instructions_required_for_mov(mov_operand)) * kInstrSize;
103 }
104
105
106 int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
107 RelocInfo::Mode rmode,
108 Condition cond) {
109 return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
110 }
111
112
113 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
114 Condition cond) {
115 BlockTrampolinePoolScope block_trampoline_pool(this);
116 DCHECK(cond == al);
117
118 #ifdef DEBUG
119 // Check the expected size before generating code to ensure we assume the same
120 // constant pool availability (e.g., whether constant pool is full or not).
121 int expected_size = CallSize(target, rmode, cond);
122 Label start;
123 bind(&start);
124 #endif
125
126 // Statement positions are expected to be recorded when the target
127 // address is loaded.
128 positions_recorder()->WriteRecordedPositions();
129
130 // This can likely be optimized to make use of bc() with 24bit relative
131 //
132 // RecordRelocInfo(x.rmode_, x.imm_);
133 // bc( BA, .... offset, LKset);
134 //
135
136 mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode));
137 mtlr(ip);
138 bclr(BA, SetLK);
139
140 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
141 }
142
143
144 int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
145 TypeFeedbackId ast_id, Condition cond) {
146 AllowDeferredHandleDereference using_raw_address;
147 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
148 }
149
150
151 void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
152 TypeFeedbackId ast_id, Condition cond) {
153 BlockTrampolinePoolScope block_trampoline_pool(this);
154 DCHECK(RelocInfo::IsCodeTarget(rmode));
155
156 #ifdef DEBUG
157 // Check the expected size before generating code to ensure we assume the same
158 // constant pool availability (e.g., whether constant pool is full or not).
159 int expected_size = CallSize(code, rmode, ast_id, cond);
160 Label start;
161 bind(&start);
162 #endif
163
164 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
165 SetRecordedAstId(ast_id);
166 rmode = RelocInfo::CODE_TARGET_WITH_ID;
167 }
168 AllowDeferredHandleDereference using_raw_address;
169 Call(reinterpret_cast<Address>(code.location()), rmode, cond);
170 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
171 }
172
173
174 void MacroAssembler::Ret(Condition cond) {
175 DCHECK(cond == al);
176 blr();
177 }
178
179
180 void MacroAssembler::Drop(int count, Condition cond) {
181 DCHECK(cond == al);
182 if (count > 0) {
183 Add(sp, sp, count * kPointerSize, r0);
184 }
185 }
186
187
188 void MacroAssembler::Ret(int drop, Condition cond) {
189 Drop(drop, cond);
190 Ret(cond);
191 }
192
193
194 void MacroAssembler::Call(Label* target) { b(target, SetLK); }
195
196
197 void MacroAssembler::Push(Handle<Object> handle) {
198 mov(ip, Operand(handle));
199 push(ip);
200 }
201
202
203 void MacroAssembler::Move(Register dst, Handle<Object> value) {
204 AllowDeferredHandleDereference smi_check;
205 if (value->IsSmi()) {
206 LoadSmiLiteral(dst, reinterpret_cast<Smi*>(*value));
207 } else {
208 DCHECK(value->IsHeapObject());
209 if (isolate()->heap()->InNewSpace(*value)) {
210 Handle<Cell> cell = isolate()->factory()->NewCell(value);
211 mov(dst, Operand(cell));
212 LoadP(dst, FieldMemOperand(dst, Cell::kValueOffset));
213 } else {
214 mov(dst, Operand(value));
215 }
216 }
217 }
218
219
220 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
221 DCHECK(cond == al);
222 if (!dst.is(src)) {
223 mr(dst, src);
224 }
225 }
226
227
228 void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
229 if (!dst.is(src)) {
230 fmr(dst, src);
231 }
232 }
233
234
235 void MacroAssembler::MultiPush(RegList regs) {
236 int16_t num_to_push = NumberOfBitsSet(regs);
237 int16_t stack_offset = num_to_push * kPointerSize;
238
239 subi(sp, sp, Operand(stack_offset));
240 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
241 if ((regs & (1 << i)) != 0) {
242 stack_offset -= kPointerSize;
243 StoreP(ToRegister(i), MemOperand(sp, stack_offset));
244 }
245 }
246 }
247
248
249 void MacroAssembler::MultiPop(RegList regs) {
250 int16_t stack_offset = 0;
251
252 for (int16_t i = 0; i < kNumRegisters; i++) {
253 if ((regs & (1 << i)) != 0) {
254 LoadP(ToRegister(i), MemOperand(sp, stack_offset));
255 stack_offset += kPointerSize;
256 }
257 }
258 addi(sp, sp, Operand(stack_offset));
259 }
260
261
262 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
263 Condition cond) {
264 DCHECK(cond == al);
265 LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
266 }
267
268
269 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
270 Condition cond) {
271 DCHECK(cond == al);
272 StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
273 }
274
275
276 void MacroAssembler::InNewSpace(Register object, Register scratch,
277 Condition cond, Label* branch) {
278 // N.B. scratch may be same register as object
279 DCHECK(cond == eq || cond == ne);
280 mov(r0, Operand(ExternalReference::new_space_mask(isolate())));
281 and_(scratch, object, r0);
282 mov(r0, Operand(ExternalReference::new_space_start(isolate())));
283 cmp(scratch, r0);
284 b(cond, branch);
285 }
286
287
288 void MacroAssembler::RecordWriteField(
289 Register object, int offset, Register value, Register dst,
290 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
291 RememberedSetAction remembered_set_action, SmiCheck smi_check,
292 PointersToHereCheck pointers_to_here_check_for_value) {
293 // First, check if a write barrier is even needed. The tests below
294 // catch stores of Smis.
295 Label done;
296
297 // Skip barrier if writing a smi.
298 if (smi_check == INLINE_SMI_CHECK) {
299 JumpIfSmi(value, &done);
300 }
301
302 // Although the object register is tagged, the offset is relative to the start
303 // of the object, so so offset must be a multiple of kPointerSize.
304 DCHECK(IsAligned(offset, kPointerSize));
305
306 Add(dst, object, offset - kHeapObjectTag, r0);
307 if (emit_debug_code()) {
308 Label ok;
309 andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
310 beq(&ok, cr0);
311 stop("Unaligned cell in write barrier");
312 bind(&ok);
313 }
314
315 RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
316 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
317
318 bind(&done);
319
320 // Clobber clobbered input registers when running with the debug-code flag
321 // turned on to provoke errors.
322 if (emit_debug_code()) {
323 mov(value, Operand(BitCast<intptr_t>(kZapValue + 4)));
324 mov(dst, Operand(BitCast<intptr_t>(kZapValue + 8)));
325 }
326 }
327
328
329 // Will clobber 4 registers: object, map, dst, ip. The
330 // register 'object' contains a heap object pointer.
331 void MacroAssembler::RecordWriteForMap(Register object, Register map,
332 Register dst,
333 LinkRegisterStatus lr_status,
334 SaveFPRegsMode fp_mode) {
335 if (emit_debug_code()) {
336 LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset));
337 Cmpi(dst, Operand(isolate()->factory()->meta_map()), r0);
338 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
339 }
340
341 if (!FLAG_incremental_marking) {
342 return;
343 }
344
345 if (emit_debug_code()) {
346 LoadP(ip, FieldMemOperand(object, HeapObject::kMapOffset));
347 cmp(ip, map);
348 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
349 }
350
351 Label done;
352
353 // A single check of the map's pages interesting flag suffices, since it is
354 // only set during incremental collection, and then it's also guaranteed that
355 // the from object's page's interesting flag is also set. This optimization
356 // relies on the fact that maps can never be in new space.
357 CheckPageFlag(map,
358 map, // Used as scratch.
359 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
360
361 addi(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
362 if (emit_debug_code()) {
363 Label ok;
364 andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
365 beq(&ok, cr0);
366 stop("Unaligned cell in write barrier");
367 bind(&ok);
368 }
369
370 // Record the actual write.
371 if (lr_status == kLRHasNotBeenSaved) {
372 mflr(r0);
373 push(r0);
374 }
375 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
376 fp_mode);
377 CallStub(&stub);
378 if (lr_status == kLRHasNotBeenSaved) {
379 pop(r0);
380 mtlr(r0);
381 }
382
383 bind(&done);
384
385 // Count number of write barriers in generated code.
386 isolate()->counters()->write_barriers_static()->Increment();
387 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
388
389 // Clobber clobbered registers when running with the debug-code flag
390 // turned on to provoke errors.
391 if (emit_debug_code()) {
392 mov(dst, Operand(BitCast<intptr_t>(kZapValue + 12)));
393 mov(map, Operand(BitCast<intptr_t>(kZapValue + 16)));
394 }
395 }
396
397
398 // Will clobber 4 registers: object, address, scratch, ip. The
399 // register 'object' contains a heap object pointer. The heap object
400 // tag is shifted away.
401 void MacroAssembler::RecordWrite(
402 Register object, Register address, Register value,
403 LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode,
404 RememberedSetAction remembered_set_action, SmiCheck smi_check,
405 PointersToHereCheck pointers_to_here_check_for_value) {
406 DCHECK(!object.is(value));
407 if (emit_debug_code()) {
408 LoadP(ip, MemOperand(address));
409 cmp(ip, value);
410 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
411 }
412
413 if (remembered_set_action == OMIT_REMEMBERED_SET &&
414 !FLAG_incremental_marking) {
415 return;
416 }
417
418 // First, check if a write barrier is even needed. The tests below
419 // catch stores of smis and stores into the young generation.
420 Label done;
421
422 if (smi_check == INLINE_SMI_CHECK) {
423 JumpIfSmi(value, &done);
424 }
425
426 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
427 CheckPageFlag(value,
428 value, // Used as scratch.
429 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
430 }
431 CheckPageFlag(object,
432 value, // Used as scratch.
433 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
434
435 // Record the actual write.
436 if (lr_status == kLRHasNotBeenSaved) {
437 mflr(r0);
438 push(r0);
439 }
440 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
441 fp_mode);
442 CallStub(&stub);
443 if (lr_status == kLRHasNotBeenSaved) {
444 pop(r0);
445 mtlr(r0);
446 }
447
448 bind(&done);
449
450 // Count number of write barriers in generated code.
451 isolate()->counters()->write_barriers_static()->Increment();
452 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
453 value);
454
455 // Clobber clobbered registers when running with the debug-code flag
456 // turned on to provoke errors.
457 if (emit_debug_code()) {
458 mov(address, Operand(BitCast<intptr_t>(kZapValue + 12)));
459 mov(value, Operand(BitCast<intptr_t>(kZapValue + 16)));
460 }
461 }
462
463
464 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
465 Register address, Register scratch,
466 SaveFPRegsMode fp_mode,
467 RememberedSetFinalAction and_then) {
468 Label done;
469 if (emit_debug_code()) {
470 Label ok;
471 JumpIfNotInNewSpace(object, scratch, &ok);
472 stop("Remembered set pointer is in new space");
473 bind(&ok);
474 }
475 // Load store buffer top.
476 ExternalReference store_buffer =
477 ExternalReference::store_buffer_top(isolate());
478 mov(ip, Operand(store_buffer));
479 LoadP(scratch, MemOperand(ip));
480 // Store pointer to buffer and increment buffer top.
481 StoreP(address, MemOperand(scratch));
482 addi(scratch, scratch, Operand(kPointerSize));
483 // Write back new top of buffer.
484 StoreP(scratch, MemOperand(ip));
485 // Call stub on end of buffer.
486 // Check for end of buffer.
487 mov(r0, Operand(StoreBuffer::kStoreBufferOverflowBit));
488 and_(r0, scratch, r0, SetRC);
489
490 if (and_then == kFallThroughAtEnd) {
491 beq(&done, cr0);
492 } else {
493 DCHECK(and_then == kReturnAtEnd);
494 beq(&done, cr0);
495 }
496 mflr(r0);
497 push(r0);
498 StoreBufferOverflowStub store_buffer_overflow =
499 StoreBufferOverflowStub(isolate(), fp_mode);
500 CallStub(&store_buffer_overflow);
501 pop(r0);
502 mtlr(r0);
503 bind(&done);
504 if (and_then == kReturnAtEnd) {
505 Ret();
506 }
507 }
508
509
510 void MacroAssembler::PushFixedFrame(Register marker_reg) {
511 mflr(r0);
512 #if V8_OOL_CONSTANT_POOL
513 if (marker_reg.is_valid()) {
514 Push(r0, fp, kConstantPoolRegister, cp, marker_reg);
515 } else {
516 Push(r0, fp, kConstantPoolRegister, cp);
517 }
518 #else
519 if (marker_reg.is_valid()) {
520 Push(r0, fp, cp, marker_reg);
521 } else {
522 Push(r0, fp, cp);
523 }
524 #endif
525 }
526
527
528 void MacroAssembler::PopFixedFrame(Register marker_reg) {
529 #if V8_OOL_CONSTANT_POOL
530 if (marker_reg.is_valid()) {
531 Pop(r0, fp, kConstantPoolRegister, cp, marker_reg);
532 } else {
533 Pop(r0, fp, kConstantPoolRegister, cp);
534 }
535 #else
536 if (marker_reg.is_valid()) {
537 Pop(r0, fp, cp, marker_reg);
538 } else {
539 Pop(r0, fp, cp);
540 }
541 #endif
542 mtlr(r0);
543 }
544
545
546 // Push and pop all registers that can hold pointers.
547 void MacroAssembler::PushSafepointRegisters() {
548 // Safepoints expect a block of kNumSafepointRegisters values on the
549 // stack, so adjust the stack for unsaved registers.
550 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
551 DCHECK(num_unsaved >= 0);
552 if (num_unsaved > 0) {
553 subi(sp, sp, Operand(num_unsaved * kPointerSize));
554 }
555 MultiPush(kSafepointSavedRegisters);
556 }
557
558
559 void MacroAssembler::PopSafepointRegisters() {
560 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
561 MultiPop(kSafepointSavedRegisters);
562 if (num_unsaved > 0) {
563 addi(sp, sp, Operand(num_unsaved * kPointerSize));
564 }
565 }
566
567
568 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
569 StoreP(src, SafepointRegisterSlot(dst));
570 }
571
572
573 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
574 LoadP(dst, SafepointRegisterSlot(src));
575 }
576
577
578 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
579 // The registers are pushed starting with the highest encoding,
580 // which means that lowest encodings are closest to the stack pointer.
581 RegList regs = kSafepointSavedRegisters;
582 int index = 0;
583
584 DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
585
586 for (int16_t i = 0; i < reg_code; i++) {
587 if ((regs & (1 << i)) != 0) {
588 index++;
589 }
590 }
591
592 return index;
593 }
594
595
596 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
597 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
598 }
599
600
601 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
602 // General purpose registers are pushed last on the stack.
603 int doubles_size = DoubleRegister::NumAllocatableRegisters() * kDoubleSize;
604 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
605 return MemOperand(sp, doubles_size + register_offset);
606 }
607
608
609 void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
610 const DoubleRegister src) {
611 Label done;
612
613 // Test for NaN
614 fcmpu(src, src);
615
616 if (dst.is(src)) {
617 bordered(&done);
618 } else {
619 Label is_nan;
620 bunordered(&is_nan);
621 fmr(dst, src);
622 b(&done);
623 bind(&is_nan);
624 }
625
626 // Replace with canonical NaN.
627 double nan_value = FixedDoubleArray::canonical_not_the_hole_nan_as_double();
628 LoadDoubleLiteral(dst, nan_value, r0);
629
630 bind(&done);
631 }
632
633
634 void MacroAssembler::ConvertIntToDouble(Register src,
635 DoubleRegister double_dst) {
636 MovIntToDouble(double_dst, src, r0);
637 fcfid(double_dst, double_dst);
638 }
639
640
641 void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
642 DoubleRegister double_dst) {
643 MovUnsignedIntToDouble(double_dst, src, r0);
644 fcfid(double_dst, double_dst);
645 }
646
647
648 void MacroAssembler::ConvertIntToFloat(const DoubleRegister dst,
649 const Register src,
650 const Register int_scratch) {
651 MovIntToDouble(dst, src, int_scratch);
652 fcfid(dst, dst);
653 frsp(dst, dst);
654 }
655
656
657 void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
658 #if !V8_TARGET_ARCH_PPC64
659 const Register dst_hi,
660 #endif
661 const Register dst,
662 const DoubleRegister double_dst,
663 FPRoundingMode rounding_mode) {
664 if (rounding_mode == kRoundToZero) {
665 fctidz(double_dst, double_input);
666 } else {
667 SetRoundingMode(rounding_mode);
668 fctid(double_dst, double_input);
669 ResetRoundingMode();
670 }
671
672 MovDoubleToInt64(
673 #if !V8_TARGET_ARCH_PPC64
674 dst_hi,
675 #endif
676 dst, double_dst);
677 }
678
679
680 #if V8_OOL_CONSTANT_POOL
681 void MacroAssembler::LoadConstantPoolPointerRegister() {
682 ConstantPoolUnavailableScope constant_pool_unavailable(this);
683
684 // CheckBuffer() is called too frequently. This will pre-grow
685 // the buffer if needed to avoid spliting the relocation and instructions
686 EnsureSpaceFor(kMovInstructionsNoConstantPool * kInstrSize);
687
688 uintptr_t code_start = reinterpret_cast<uintptr_t>(pc_) - pc_offset();
689 int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize;
690 mov(kConstantPoolRegister,
691 Operand(code_start, RelocInfo::INTERNAL_REFERENCE));
692 LoadP(kConstantPoolRegister,
693 MemOperand(kConstantPoolRegister, constant_pool_offset));
694 }
695 #endif
696
697
698 void MacroAssembler::StubPrologue() {
699 PushFixedFrame();
700 Push(Smi::FromInt(StackFrame::STUB));
701 // Adjust FP to point to saved FP.
702 addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
703 #if V8_OOL_CONSTANT_POOL
704 LoadConstantPoolPointerRegister();
705 set_constant_pool_available(true);
706 #endif
707 }
708
709
710 void MacroAssembler::Prologue(bool code_pre_aging) {
711 {
712 PredictableCodeSizeScope predictible_code_size_scope(
713 this, kNoCodeAgeSequenceLength);
714 Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
715 // The following instructions must remain together and unmodified
716 // for code aging to work properly.
717 if (code_pre_aging) {
718 // Pre-age the code.
719 // This matches the code found in PatchPlatformCodeAge()
720 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
721 intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
722 mflr(ip);
723 mov(r3, Operand(target));
724 Call(r3);
725 for (int i = 0; i < kCodeAgingSequenceNops; i++) {
726 nop();
727 }
728 } else {
729 // This matches the code found in GetNoCodeAgeSequence()
730 PushFixedFrame(r4);
731 // Adjust fp to point to saved fp.
732 addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
733 for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
734 nop();
735 }
736 }
737 }
738 #if V8_OOL_CONSTANT_POOL
739 LoadConstantPoolPointerRegister();
740 set_constant_pool_available(true);
741 #endif
742 }
743
744
745 void MacroAssembler::EnterFrame(StackFrame::Type type,
746 bool load_constant_pool) {
747 PushFixedFrame();
748 #if V8_OOL_CONSTANT_POOL
749 if (load_constant_pool) {
750 LoadConstantPoolPointerRegister();
751 }
752 #endif
753 LoadSmiLiteral(r0, Smi::FromInt(type));
754 push(r0);
755 mov(r0, Operand(CodeObject()));
756 push(r0);
757 // Adjust FP to point to saved FP.
758 addi(fp, sp,
759 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
760 }
761
762
763 int MacroAssembler::LeaveFrame(StackFrame::Type type) {
764 // r3: preserved
765 // r4: preserved
766 // r5: preserved
767
768 // Drop the execution stack down to the frame pointer and restore
769 // the caller frame pointer, return address and constant pool pointer.
770 int frame_ends;
771 #if V8_OOL_CONSTANT_POOL
772 addi(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
773 frame_ends = pc_offset();
774 Pop(r0, fp, kConstantPoolRegister);
775 #else
776 mr(sp, fp);
777 frame_ends = pc_offset();
778 Pop(r0, fp);
779 #endif
780 mtlr(r0);
781 return frame_ends;
782 }
783
784
785 // ExitFrame layout (probably wrongish.. needs updating)
786 //
787 // SP -> previousSP
788 // LK reserved
789 // code
790 // sp_on_exit (for debug?)
791 // oldSP->prev SP
792 // LK
793 // <parameters on stack>
794
795 // Prior to calling EnterExitFrame, we've got a bunch of parameters
796 // on the stack that we need to wrap a real frame around.. so first
797 // we reserve a slot for LK and push the previous SP which is captured
798 // in the fp register (r31)
799 // Then - we buy a new frame
800
801 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
802 // Set up the frame structure on the stack.
803 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
804 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
805 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
806 DCHECK(stack_space > 0);
807
808 // This is an opportunity to build a frame to wrap
809 // all of the pushes that have happened inside of V8
810 // since we were called from C code
811
812 // replicate ARM frame - TODO make this more closely follow PPC ABI
813 mflr(r0);
814 Push(r0, fp);
815 mr(fp, sp);
816 // Reserve room for saved entry sp and code object.
817 subi(sp, sp, Operand(ExitFrameConstants::kFrameSize));
818
819 if (emit_debug_code()) {
820 li(r8, Operand::Zero());
821 StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
822 }
823 #if V8_OOL_CONSTANT_POOL
824 StoreP(kConstantPoolRegister,
825 MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
826 #endif
827 mov(r8, Operand(CodeObject()));
828 StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
829
830 // Save the frame pointer and the context in top.
831 mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
832 StoreP(fp, MemOperand(r8));
833 mov(r8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
834 StoreP(cp, MemOperand(r8));
835
836 // Optionally save all volatile double registers.
837 if (save_doubles) {
838 SaveFPRegs(sp, 0, DoubleRegister::kNumVolatileRegisters);
839 // Note that d0 will be accessible at
840 // fp - ExitFrameConstants::kFrameSize -
841 // kNumVolatileRegisters * kDoubleSize,
842 // since the sp slot and code slot were pushed after the fp.
843 }
844
845 addi(sp, sp, Operand(-stack_space * kPointerSize));
846
847 // Allocate and align the frame preparing for calling the runtime
848 // function.
849 const int frame_alignment = ActivationFrameAlignment();
850 if (frame_alignment > kPointerSize) {
851 DCHECK(IsPowerOf2(frame_alignment));
852 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
853 }
854 li(r0, Operand::Zero());
855 StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
856
857 // Set the exit frame sp value to point just before the return address
858 // location.
859 addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
860 StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
861 }
862
863
864 void MacroAssembler::InitializeNewString(Register string, Register length,
865 Heap::RootListIndex map_index,
866 Register scratch1, Register scratch2) {
867 SmiTag(scratch1, length);
868 LoadRoot(scratch2, map_index);
869 StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset), r0);
870 li(scratch1, Operand(String::kEmptyHashField));
871 StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset), r0);
872 StoreP(scratch1, FieldMemOperand(string, String::kHashFieldSlot), r0);
873 }
874
875
876 int MacroAssembler::ActivationFrameAlignment() {
877 #if !defined(USE_SIMULATOR)
878 // Running on the real platform. Use the alignment as mandated by the local
879 // environment.
880 // Note: This will break if we ever start generating snapshots on one PPC
881 // platform for another PPC platform with a different alignment.
882 return base::OS::ActivationFrameAlignment();
883 #else // Simulated
884 // If we are using the simulator then we should always align to the expected
885 // alignment. As the simulator is used to generate snapshots we do not know
886 // if the target platform will need alignment, so this is controlled from a
887 // flag.
888 return FLAG_sim_stack_alignment;
889 #endif
890 }
891
892
893 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
894 bool restore_context) {
895 #if V8_OOL_CONSTANT_POOL
896 ConstantPoolUnavailableScope constant_pool_unavailable(this);
897 #endif
898 // Optionally restore all double registers.
899 if (save_doubles) {
900 // Calculate the stack location of the saved doubles and restore them.
901 const int kNumRegs = DoubleRegister::kNumVolatileRegisters;
902 const int offset =
903 (ExitFrameConstants::kFrameSize + kNumRegs * kDoubleSize);
904 addi(r6, fp, Operand(-offset));
905 RestoreFPRegs(r6, 0, kNumRegs);
906 }
907
908 // Clear top frame.
909 li(r6, Operand::Zero());
910 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
911 StoreP(r6, MemOperand(ip));
912
913 // Restore current context from top and clear it in debug mode.
914 if (restore_context) {
915 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
916 LoadP(cp, MemOperand(ip));
917 }
918 #ifdef DEBUG
919 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
920 StoreP(r6, MemOperand(ip));
921 #endif
922
923 // Tear down the exit frame, pop the arguments, and return.
924 #if V8_OOL_CONSTANT_POOL
925 LoadP(kConstantPoolRegister,
926 MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
927 #endif
928 mr(sp, fp);
929 pop(fp);
930 pop(r0);
931 mtlr(r0);
932
933 if (argument_count.is_valid()) {
934 ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2));
935 add(sp, sp, argument_count);
936 }
937 }
938
939
940 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
941 Move(dst, d1);
942 }
943
944
945 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
946 Move(dst, d1);
947 }
948
949
950 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
951 const ParameterCount& actual,
952 Handle<Code> code_constant,
953 Register code_reg, Label* done,
954 bool* definitely_mismatches,
955 InvokeFlag flag,
956 const CallWrapper& call_wrapper) {
957 bool definitely_matches = false;
958 *definitely_mismatches = false;
959 Label regular_invoke;
960
961 // Check whether the expected and actual arguments count match. If not,
962 // setup registers according to contract with ArgumentsAdaptorTrampoline:
963 // r3: actual arguments count
964 // r4: function (passed through to callee)
965 // r5: expected arguments count
966
967 // The code below is made a lot easier because the calling code already sets
968 // up actual and expected registers according to the contract if values are
969 // passed in registers.
970
971 // roohack - remove these 3 checks temporarily
972 // DCHECK(actual.is_immediate() || actual.reg().is(r3));
973 // DCHECK(expected.is_immediate() || expected.reg().is(r5));
974 // DCHECK((!code_constant.is_null() && code_reg.is(no_reg))
975 // || code_reg.is(r6));
976
977 if (expected.is_immediate()) {
978 DCHECK(actual.is_immediate());
979 if (expected.immediate() == actual.immediate()) {
980 definitely_matches = true;
981 } else {
982 mov(r3, Operand(actual.immediate()));
983 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
984 if (expected.immediate() == sentinel) {
985 // Don't worry about adapting arguments for builtins that
986 // don't want that done. Skip adaption code by making it look
987 // like we have a match between expected and actual number of
988 // arguments.
989 definitely_matches = true;
990 } else {
991 *definitely_mismatches = true;
992 mov(r5, Operand(expected.immediate()));
993 }
994 }
995 } else {
996 if (actual.is_immediate()) {
997 cmpi(expected.reg(), Operand(actual.immediate()));
998 beq(&regular_invoke);
999 mov(r3, Operand(actual.immediate()));
1000 } else {
1001 cmp(expected.reg(), actual.reg());
1002 beq(&regular_invoke);
1003 }
1004 }
1005
1006 if (!definitely_matches) {
1007 if (!code_constant.is_null()) {
1008 mov(r6, Operand(code_constant));
1009 addi(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
1010 }
1011
1012 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
1013 if (flag == CALL_FUNCTION) {
1014 call_wrapper.BeforeCall(CallSize(adaptor));
1015 Call(adaptor);
1016 call_wrapper.AfterCall();
1017 if (!*definitely_mismatches) {
1018 b(done);
1019 }
1020 } else {
1021 Jump(adaptor, RelocInfo::CODE_TARGET);
1022 }
1023 bind(&regular_invoke);
1024 }
1025 }
1026
1027
1028 void MacroAssembler::InvokeCode(Register code, const ParameterCount& expected,
1029 const ParameterCount& actual, InvokeFlag flag,
1030 const CallWrapper& call_wrapper) {
1031 // You can't call a function without a valid frame.
1032 DCHECK(flag == JUMP_FUNCTION || has_frame());
1033
1034 Label done;
1035 bool definitely_mismatches = false;
1036 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done,
1037 &definitely_mismatches, flag, call_wrapper);
1038 if (!definitely_mismatches) {
1039 if (flag == CALL_FUNCTION) {
1040 call_wrapper.BeforeCall(CallSize(code));
1041 Call(code);
1042 call_wrapper.AfterCall();
1043 } else {
1044 DCHECK(flag == JUMP_FUNCTION);
1045 Jump(code);
1046 }
1047
1048 // Continue here if InvokePrologue does handle the invocation due to
1049 // mismatched parameter counts.
1050 bind(&done);
1051 }
1052 }
1053
1054
1055 void MacroAssembler::InvokeFunction(Register fun, const ParameterCount& actual,
1056 InvokeFlag flag,
1057 const CallWrapper& call_wrapper) {
1058 // You can't call a function without a valid frame.
1059 DCHECK(flag == JUMP_FUNCTION || has_frame());
1060
1061 // Contract with called JS functions requires that function is passed in r4.
1062 DCHECK(fun.is(r4));
1063
1064 Register expected_reg = r5;
1065 Register code_reg = r6;
1066
1067 LoadP(code_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
1068 LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1069 LoadWordArith(expected_reg,
1070 FieldMemOperand(
1071 code_reg, SharedFunctionInfo::kFormalParameterCountOffset));
1072 #if !defined(V8_TARGET_ARCH_PPC64)
1073 SmiUntag(expected_reg);
1074 #endif
1075 LoadP(code_reg, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
1076
1077 ParameterCount expected(expected_reg);
1078 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
1079 }
1080
1081
1082 void MacroAssembler::InvokeFunction(Register function,
1083 const ParameterCount& expected,
1084 const ParameterCount& actual,
1085 InvokeFlag flag,
1086 const CallWrapper& call_wrapper) {
1087 // You can't call a function without a valid frame.
1088 DCHECK(flag == JUMP_FUNCTION || has_frame());
1089
1090 // Contract with called JS functions requires that function is passed in r4.
1091 DCHECK(function.is(r4));
1092
1093 // Get the function and setup the context.
1094 LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1095
1096 // We call indirectly through the code field in the function to
1097 // allow recompilation to take effect without changing any of the
1098 // call sites.
1099 LoadP(r6, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
1100 InvokeCode(r6, expected, actual, flag, call_wrapper);
1101 }
1102
1103
1104 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1105 const ParameterCount& expected,
1106 const ParameterCount& actual,
1107 InvokeFlag flag,
1108 const CallWrapper& call_wrapper) {
1109 Move(r4, function);
1110 InvokeFunction(r4, expected, actual, flag, call_wrapper);
1111 }
1112
1113
1114 void MacroAssembler::IsObjectJSObjectType(Register heap_object, Register map,
1115 Register scratch, Label* fail) {
1116 LoadP(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
1117 IsInstanceJSObjectType(map, scratch, fail);
1118 }
1119
1120
1121 void MacroAssembler::IsInstanceJSObjectType(Register map, Register scratch,
1122 Label* fail) {
1123 lbz(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1124 cmpi(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1125 blt(fail);
1126 cmpi(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1127 bgt(fail);
1128 }
1129
1130
1131 void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
1132 Label* fail) {
1133 DCHECK(kNotStringTag != 0);
1134
1135 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1136 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1137 andi(r0, scratch, Operand(kIsNotStringMask));
1138 bne(fail, cr0);
1139 }
1140
1141
1142 void MacroAssembler::IsObjectNameType(Register object, Register scratch,
1143 Label* fail) {
1144 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1145 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1146 cmpi(scratch, Operand(LAST_NAME_TYPE));
1147 bgt(fail);
1148 }
1149
1150
1151 void MacroAssembler::DebugBreak() {
1152 li(r3, Operand::Zero());
1153 mov(r4, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1154 CEntryStub ces(isolate(), 1);
1155 DCHECK(AllowThisStubCall(&ces));
1156 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
1157 }
1158
1159
1160 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
1161 int handler_index) {
1162 // Adjust this code if not the case.
1163 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1164 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1165 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1166 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1167 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1168 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1169
1170 // For the JSEntry handler, we must preserve r1-r7, r0,r8-r15 are available.
1171 // We want the stack to look like
1172 // sp -> NextOffset
1173 // CodeObject
1174 // state
1175 // context
1176 // frame pointer
1177
1178 // Link the current handler as the next handler.
1179 mov(r8, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1180 LoadP(r0, MemOperand(r8));
1181 StorePU(r0, MemOperand(sp, -StackHandlerConstants::kSize));
1182 // Set this new handler as the current one.
1183 StoreP(sp, MemOperand(r8));
1184
1185 if (kind == StackHandler::JS_ENTRY) {
1186 li(r8, Operand::Zero()); // NULL frame pointer.
1187 StoreP(r8, MemOperand(sp, StackHandlerConstants::kFPOffset));
1188 LoadSmiLiteral(r8, Smi::FromInt(0)); // Indicates no context.
1189 StoreP(r8, MemOperand(sp, StackHandlerConstants::kContextOffset));
1190 } else {
1191 // still not sure if fp is right
1192 StoreP(fp, MemOperand(sp, StackHandlerConstants::kFPOffset));
1193 StoreP(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
1194 }
1195 unsigned state = StackHandler::IndexField::encode(handler_index) |
1196 StackHandler::KindField::encode(kind);
1197 LoadIntLiteral(r8, state);
1198 StoreP(r8, MemOperand(sp, StackHandlerConstants::kStateOffset));
1199 mov(r8, Operand(CodeObject()));
1200 StoreP(r8, MemOperand(sp, StackHandlerConstants::kCodeOffset));
1201 }
1202
1203
1204 void MacroAssembler::PopTryHandler() {
1205 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1206 pop(r4);
1207 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1208 addi(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1209 StoreP(r4, MemOperand(ip));
1210 }
1211
1212
1213 // PPC - make use of ip as a temporary register
1214 void MacroAssembler::JumpToHandlerEntry() {
1215 // Compute the handler entry address and jump to it. The handler table is
1216 // a fixed array of (smi-tagged) code offsets.
1217 // r3 = exception, r4 = code object, r5 = state.
1218 #if V8_OOL_CONSTANT_POOL
1219 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1220 LoadP(kConstantPoolRegister, FieldMemOperand(r4, Code::kConstantPoolOffset));
1221 #endif
1222 LoadP(r6, FieldMemOperand(r4, Code::kHandlerTableOffset)); // Handler table.
1223 addi(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1224 srwi(r5, r5, Operand(StackHandler::kKindWidth)); // Handler index.
1225 slwi(ip, r5, Operand(kPointerSizeLog2));
1226 add(ip, r6, ip);
1227 LoadP(r5, MemOperand(ip)); // Smi-tagged offset.
1228 addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
1229 SmiUntag(ip, r5);
1230 add(r0, r4, ip);
1231 mtctr(r0);
1232 bctr();
1233 }
1234
1235
1236 void MacroAssembler::Throw(Register value) {
1237 // Adjust this code if not the case.
1238 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1239 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1240 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1241 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1242 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1243 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1244 Label skip;
1245
1246 // The exception is expected in r3.
1247 if (!value.is(r3)) {
1248 mr(r3, value);
1249 }
1250 // Drop the stack pointer to the top of the top handler.
1251 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1252 LoadP(sp, MemOperand(r6));
1253 // Restore the next handler.
1254 pop(r5);
1255 StoreP(r5, MemOperand(r6));
1256
1257 // Get the code object (r4) and state (r5). Restore the context and frame
1258 // pointer.
1259 pop(r4);
1260 pop(r5);
1261 pop(cp);
1262 pop(fp);
1263
1264 // If the handler is a JS frame, restore the context to the frame.
1265 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
1266 // or cp.
1267 cmpi(cp, Operand::Zero());
1268 beq(&skip);
1269 StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1270 bind(&skip);
1271
1272 JumpToHandlerEntry();
1273 }
1274
1275
1276 void MacroAssembler::ThrowUncatchable(Register value) {
1277 // Adjust this code if not the case.
1278 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1279 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1280 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1281 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1282 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1283 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1284
1285 // The exception is expected in r3.
1286 if (!value.is(r3)) {
1287 mr(r3, value);
1288 }
1289 // Drop the stack pointer to the top of the top stack handler.
1290 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1291 LoadP(sp, MemOperand(r6));
1292
1293 // Unwind the handlers until the ENTRY handler is found.
1294 Label fetch_next, check_kind;
1295 b(&check_kind);
1296 bind(&fetch_next);
1297 LoadP(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
1298
1299 bind(&check_kind);
1300 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1301 LoadP(r5, MemOperand(sp, StackHandlerConstants::kStateOffset));
1302 andi(r0, r5, Operand(StackHandler::KindField::kMask));
1303 bne(&fetch_next, cr0);
1304
1305 // Set the top handler address to next handler past the top ENTRY handler.
1306 pop(r5);
1307 StoreP(r5, MemOperand(r6));
1308 // Get the code object (r4) and state (r5). Clear the context and frame
1309 // pointer (0 was saved in the handler).
1310 pop(r4);
1311 pop(r5);
1312 pop(cp);
1313 pop(fp);
1314
1315 JumpToHandlerEntry();
1316 }
1317
1318
1319 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1320 Register scratch, Label* miss) {
1321 Label same_contexts;
1322
1323 DCHECK(!holder_reg.is(scratch));
1324 DCHECK(!holder_reg.is(ip));
1325 DCHECK(!scratch.is(ip));
1326
1327 // Load current lexical context from the stack frame.
1328 LoadP(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1329 // In debug mode, make sure the lexical context is set.
1330 #ifdef DEBUG
1331 cmpi(scratch, Operand::Zero());
1332 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
1333 #endif
1334
1335 // Load the native context of the current context.
1336 int offset =
1337 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1338 LoadP(scratch, FieldMemOperand(scratch, offset));
1339 LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
1340
1341 // Check the context is a native context.
1342 if (emit_debug_code()) {
1343 // Cannot use ip as a temporary in this verification code. Due to the fact
1344 // that ip is clobbered as part of cmp with an object Operand.
1345 push(holder_reg); // Temporarily save holder on the stack.
1346 // Read the first word and compare to the native_context_map.
1347 LoadP(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1348 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1349 cmp(holder_reg, ip);
1350 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1351 pop(holder_reg); // Restore holder.
1352 }
1353
1354 // Check if both contexts are the same.
1355 LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1356 cmp(scratch, ip);
1357 beq(&same_contexts);
1358
1359 // Check the context is a native context.
1360 if (emit_debug_code()) {
1361 // Cannot use ip as a temporary in this verification code. Due to the fact
1362 // that ip is clobbered as part of cmp with an object Operand.
1363 push(holder_reg); // Temporarily save holder on the stack.
1364 mr(holder_reg, ip); // Move ip to its holding place.
1365 LoadRoot(ip, Heap::kNullValueRootIndex);
1366 cmp(holder_reg, ip);
1367 Check(ne, kJSGlobalProxyContextShouldNotBeNull);
1368
1369 LoadP(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1370 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1371 cmp(holder_reg, ip);
1372 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1373 // Restore ip is not needed. ip is reloaded below.
1374 pop(holder_reg); // Restore holder.
1375 // Restore ip to holder's context.
1376 LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1377 }
1378
1379 // Check that the security token in the calling global object is
1380 // compatible with the security token in the receiving global
1381 // object.
1382 int token_offset =
1383 Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
1384
1385 LoadP(scratch, FieldMemOperand(scratch, token_offset));
1386 LoadP(ip, FieldMemOperand(ip, token_offset));
1387 cmp(scratch, ip);
1388 bne(miss);
1389
1390 bind(&same_contexts);
1391 }
1392
1393
1394 // Compute the hash code from the untagged key. This must be kept in sync with
1395 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1396 // code-stub-hydrogen.cc
1397 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1398 // First of all we assign the hash seed to scratch.
1399 LoadRoot(scratch, Heap::kHashSeedRootIndex);
1400 SmiUntag(scratch);
1401
1402 // Xor original key with a seed.
1403 xor_(t0, t0, scratch);
1404
1405 // Compute the hash code from the untagged key. This must be kept in sync
1406 // with ComputeIntegerHash in utils.h.
1407 //
1408 // hash = ~hash + (hash << 15);
1409 notx(scratch, t0);
1410 slwi(t0, t0, Operand(15));
1411 add(t0, scratch, t0);
1412 // hash = hash ^ (hash >> 12);
1413 srwi(scratch, t0, Operand(12));
1414 xor_(t0, t0, scratch);
1415 // hash = hash + (hash << 2);
1416 slwi(scratch, t0, Operand(2));
1417 add(t0, t0, scratch);
1418 // hash = hash ^ (hash >> 4);
1419 srwi(scratch, t0, Operand(4));
1420 xor_(t0, t0, scratch);
1421 // hash = hash * 2057;
1422 mr(r0, t0);
1423 slwi(scratch, t0, Operand(3));
1424 add(t0, t0, scratch);
1425 slwi(scratch, r0, Operand(11));
1426 add(t0, t0, scratch);
1427 // hash = hash ^ (hash >> 16);
1428 srwi(scratch, t0, Operand(16));
1429 xor_(t0, t0, scratch);
1430 }
1431
1432
1433 void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
1434 Register key, Register result,
1435 Register t0, Register t1,
1436 Register t2) {
1437 // Register use:
1438 //
1439 // elements - holds the slow-case elements of the receiver on entry.
1440 // Unchanged unless 'result' is the same register.
1441 //
1442 // key - holds the smi key on entry.
1443 // Unchanged unless 'result' is the same register.
1444 //
1445 // result - holds the result on exit if the load succeeded.
1446 // Allowed to be the same as 'key' or 'result'.
1447 // Unchanged on bailout so 'key' or 'result' can be used
1448 // in further computation.
1449 //
1450 // Scratch registers:
1451 //
1452 // t0 - holds the untagged key on entry and holds the hash once computed.
1453 //
1454 // t1 - used to hold the capacity mask of the dictionary
1455 //
1456 // t2 - used for the index into the dictionary.
1457 Label done;
1458
1459 GetNumberHash(t0, t1);
1460
1461 // Compute the capacity mask.
1462 LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1463 SmiUntag(t1);
1464 subi(t1, t1, Operand(1));
1465
1466 // Generate an unrolled loop that performs a few probes before giving up.
1467 for (int i = 0; i < kNumberDictionaryProbes; i++) {
1468 // Use t2 for index calculations and keep the hash intact in t0.
1469 mr(t2, t0);
1470 // Compute the masked index: (hash + i + i * i) & mask.
1471 if (i > 0) {
1472 addi(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1473 }
1474 and_(t2, t2, t1);
1475
1476 // Scale the index by multiplying by the element size.
1477 DCHECK(SeededNumberDictionary::kEntrySize == 3);
1478 slwi(ip, t2, Operand(1));
1479 add(t2, t2, ip); // t2 = t2 * 3
1480
1481 // Check if the key is identical to the name.
1482 slwi(t2, t2, Operand(kPointerSizeLog2));
1483 add(t2, elements, t2);
1484 LoadP(ip,
1485 FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
1486 cmp(key, ip);
1487 if (i != kNumberDictionaryProbes - 1) {
1488 beq(&done);
1489 } else {
1490 bne(miss);
1491 }
1492 }
1493
1494 bind(&done);
1495 // Check that the value is a normal property.
1496 // t2: elements + (index * kPointerSize)
1497 const int kDetailsOffset =
1498 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1499 LoadP(t1, FieldMemOperand(t2, kDetailsOffset));
1500 LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask));
1501 and_(r0, t1, ip, SetRC);
1502 bne(miss, cr0);
1503
1504 // Get the value at the masked, scaled index and return.
1505 const int kValueOffset =
1506 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1507 LoadP(result, FieldMemOperand(t2, kValueOffset));
1508 }
1509
1510
1511 void MacroAssembler::Allocate(int object_size, Register result,
1512 Register scratch1, Register scratch2,
1513 Label* gc_required, AllocationFlags flags) {
1514 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
1515 if (!FLAG_inline_new) {
1516 if (emit_debug_code()) {
1517 // Trash the registers to simulate an allocation failure.
1518 li(result, Operand(0x7091));
1519 li(scratch1, Operand(0x7191));
1520 li(scratch2, Operand(0x7291));
1521 }
1522 b(gc_required);
1523 return;
1524 }
1525
1526 DCHECK(!result.is(scratch1));
1527 DCHECK(!result.is(scratch2));
1528 DCHECK(!scratch1.is(scratch2));
1529 DCHECK(!scratch1.is(ip));
1530 DCHECK(!scratch2.is(ip));
1531
1532 // Make object size into bytes.
1533 if ((flags & SIZE_IN_WORDS) != 0) {
1534 object_size *= kPointerSize;
1535 }
1536 DCHECK_EQ(0, static_cast<int>(object_size & kObjectAlignmentMask));
1537
1538 // Check relative positions of allocation top and limit addresses.
1539 ExternalReference allocation_top =
1540 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1541 ExternalReference allocation_limit =
1542 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1543
1544 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1545 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1546 DCHECK((limit - top) == kPointerSize);
1547
1548 // Set up allocation top address register.
1549 Register topaddr = scratch1;
1550 mov(topaddr, Operand(allocation_top));
1551
1552 // This code stores a temporary value in ip. This is OK, as the code below
1553 // does not need ip for implicit literal generation.
1554 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1555 // Load allocation top into result and allocation limit into ip.
1556 LoadP(result, MemOperand(topaddr));
1557 LoadP(ip, MemOperand(topaddr, kPointerSize));
1558 } else {
1559 if (emit_debug_code()) {
1560 // Assert that result actually contains top on entry. ip is used
1561 // immediately below so this use of ip does not cause difference with
1562 // respect to register content between debug and release mode.
1563 LoadP(ip, MemOperand(topaddr));
1564 cmp(result, ip);
1565 Check(eq, kUnexpectedAllocationTop);
1566 }
1567 // Load allocation limit into ip. Result already contains allocation top.
1568 LoadP(ip, MemOperand(topaddr, limit - top), r0);
1569 }
1570
1571 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1572 // Align the next allocation. Storing the filler map without checking top is
1573 // safe in new-space because the limit of the heap is aligned there.
1574 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1575 #if V8_TARGET_ARCH_PPC64
1576 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1577 #else
1578 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1579 andi(scratch2, result, Operand(kDoubleAlignmentMask));
1580 Label aligned;
1581 beq(&aligned, cr0);
1582 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1583 cmpl(result, ip);
1584 bge(gc_required);
1585 }
1586 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1587 stw(scratch2, MemOperand(result));
1588 addi(result, result, Operand(kDoubleSize / 2));
1589 bind(&aligned);
1590 #endif
1591 }
1592
1593 // Calculate new top and bail out if new space is exhausted. Use result
1594 // to calculate the new top.
1595 li(r0, Operand(-1));
1596 if (is_int16(object_size)) {
1597 addic(scratch2, result, Operand(object_size));
1598 } else {
1599 mov(scratch2, Operand(object_size));
1600 addc(scratch2, result, scratch2);
1601 }
1602 addze(r0, r0, LeaveOE, SetRC);
1603 beq(gc_required, cr0);
1604 cmpl(scratch2, ip);
1605 bgt(gc_required);
1606 StoreP(scratch2, MemOperand(topaddr));
1607
1608 // Tag object if requested.
1609 if ((flags & TAG_OBJECT) != 0) {
1610 addi(result, result, Operand(kHeapObjectTag));
1611 }
1612 }
1613
1614
1615 void MacroAssembler::Allocate(Register object_size, Register result,
1616 Register scratch1, Register scratch2,
1617 Label* gc_required, AllocationFlags flags) {
1618 if (!FLAG_inline_new) {
1619 if (emit_debug_code()) {
1620 // Trash the registers to simulate an allocation failure.
1621 li(result, Operand(0x7091));
1622 li(scratch1, Operand(0x7191));
1623 li(scratch2, Operand(0x7291));
1624 }
1625 b(gc_required);
1626 return;
1627 }
1628
1629 // Assert that the register arguments are different and that none of
1630 // them are ip. ip is used explicitly in the code generated below.
1631 DCHECK(!result.is(scratch1));
1632 DCHECK(!result.is(scratch2));
1633 DCHECK(!scratch1.is(scratch2));
1634 DCHECK(!object_size.is(ip));
1635 DCHECK(!result.is(ip));
1636 DCHECK(!scratch1.is(ip));
1637 DCHECK(!scratch2.is(ip));
1638
1639 // Check relative positions of allocation top and limit addresses.
1640 ExternalReference allocation_top =
1641 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1642 ExternalReference allocation_limit =
1643 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1644 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1645 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1646 DCHECK((limit - top) == kPointerSize);
1647
1648 // Set up allocation top address.
1649 Register topaddr = scratch1;
1650 mov(topaddr, Operand(allocation_top));
1651
1652 // This code stores a temporary value in ip. This is OK, as the code below
1653 // does not need ip for implicit literal generation.
1654 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1655 // Load allocation top into result and allocation limit into ip.
1656 LoadP(result, MemOperand(topaddr));
1657 LoadP(ip, MemOperand(topaddr, kPointerSize));
1658 } else {
1659 if (emit_debug_code()) {
1660 // Assert that result actually contains top on entry. ip is used
1661 // immediately below so this use of ip does not cause difference with
1662 // respect to register content between debug and release mode.
1663 LoadP(ip, MemOperand(topaddr));
1664 cmp(result, ip);
1665 Check(eq, kUnexpectedAllocationTop);
1666 }
1667 // Load allocation limit into ip. Result already contains allocation top.
1668 LoadP(ip, MemOperand(topaddr, limit - top));
1669 }
1670
1671 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1672 // Align the next allocation. Storing the filler map without checking top is
1673 // safe in new-space because the limit of the heap is aligned there.
1674 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1675 #if V8_TARGET_ARCH_PPC64
1676 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1677 #else
1678 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1679 andi(scratch2, result, Operand(kDoubleAlignmentMask));
1680 Label aligned;
1681 beq(&aligned, cr0);
1682 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1683 cmpl(result, ip);
1684 bge(gc_required);
1685 }
1686 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1687 stw(scratch2, MemOperand(result));
1688 addi(result, result, Operand(kDoubleSize / 2));
1689 bind(&aligned);
1690 #endif
1691 }
1692
1693 // Calculate new top and bail out if new space is exhausted. Use result
1694 // to calculate the new top. Object size may be in words so a shift is
1695 // required to get the number of bytes.
1696 li(r0, Operand(-1));
1697 if ((flags & SIZE_IN_WORDS) != 0) {
1698 ShiftLeftImm(scratch2, object_size, Operand(kPointerSizeLog2));
1699 addc(scratch2, result, scratch2);
1700 } else {
1701 addc(scratch2, result, object_size);
1702 }
1703 addze(r0, r0, LeaveOE, SetRC);
1704 beq(gc_required, cr0);
1705 cmpl(scratch2, ip);
1706 bgt(gc_required);
1707
1708 // Update allocation top. result temporarily holds the new top.
1709 if (emit_debug_code()) {
1710 andi(r0, scratch2, Operand(kObjectAlignmentMask));
1711 Check(eq, kUnalignedAllocationInNewSpace, cr0);
1712 }
1713 StoreP(scratch2, MemOperand(topaddr));
1714
1715 // Tag object if requested.
1716 if ((flags & TAG_OBJECT) != 0) {
1717 addi(result, result, Operand(kHeapObjectTag));
1718 }
1719 }
1720
1721
1722 void MacroAssembler::UndoAllocationInNewSpace(Register object,
1723 Register scratch) {
1724 ExternalReference new_space_allocation_top =
1725 ExternalReference::new_space_allocation_top_address(isolate());
1726
1727 // Make sure the object has no tag before resetting top.
1728 mov(r0, Operand(~kHeapObjectTagMask));
1729 and_(object, object, r0);
1730 // was.. and_(object, object, Operand(~kHeapObjectTagMask));
1731 #ifdef DEBUG
1732 // Check that the object un-allocated is below the current top.
1733 mov(scratch, Operand(new_space_allocation_top));
1734 LoadP(scratch, MemOperand(scratch));
1735 cmp(object, scratch);
1736 Check(lt, kUndoAllocationOfNonAllocatedMemory);
1737 #endif
1738 // Write the address of the object to un-allocate as the current top.
1739 mov(scratch, Operand(new_space_allocation_top));
1740 StoreP(object, MemOperand(scratch));
1741 }
1742
1743
1744 void MacroAssembler::AllocateTwoByteString(Register result, Register length,
1745 Register scratch1, Register scratch2,
1746 Register scratch3,
1747 Label* gc_required) {
1748 // Calculate the number of bytes needed for the characters in the string while
1749 // observing object alignment.
1750 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1751 slwi(scratch1, length, Operand(1)); // Length in bytes, not chars.
1752 addi(scratch1, scratch1,
1753 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1754 mov(r0, Operand(~kObjectAlignmentMask));
1755 and_(scratch1, scratch1, r0);
1756
1757 // Allocate two-byte string in new space.
1758 Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
1759
1760 // Set the map, length and hash field.
1761 InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
1762 scratch2);
1763 }
1764
1765
1766 void MacroAssembler::AllocateAsciiString(Register result, Register length,
1767 Register scratch1, Register scratch2,
1768 Register scratch3,
1769 Label* gc_required) {
1770 // Calculate the number of bytes needed for the characters in the string while
1771 // observing object alignment.
1772 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1773 DCHECK(kCharSize == 1);
1774 addi(scratch1, length,
1775 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
1776 li(r0, Operand(~kObjectAlignmentMask));
1777 and_(scratch1, scratch1, r0);
1778
1779 // Allocate ASCII string in new space.
1780 Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
1781
1782 // Set the map, length and hash field.
1783 InitializeNewString(result, length, Heap::kAsciiStringMapRootIndex, scratch1,
1784 scratch2);
1785 }
1786
1787
1788 void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
1789 Register scratch1,
1790 Register scratch2,
1791 Label* gc_required) {
1792 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1793 TAG_OBJECT);
1794
1795 InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
1796 scratch2);
1797 }
1798
1799
1800 void MacroAssembler::AllocateAsciiConsString(Register result, Register length,
1801 Register scratch1,
1802 Register scratch2,
1803 Label* gc_required) {
1804 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1805 TAG_OBJECT);
1806
1807 InitializeNewString(result, length, Heap::kConsAsciiStringMapRootIndex,
1808 scratch1, scratch2);
1809 }
1810
1811
1812 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
1813 Register length,
1814 Register scratch1,
1815 Register scratch2,
1816 Label* gc_required) {
1817 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1818 TAG_OBJECT);
1819
1820 InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
1821 scratch2);
1822 }
1823
1824
1825 void MacroAssembler::AllocateAsciiSlicedString(Register result, Register length,
1826 Register scratch1,
1827 Register scratch2,
1828 Label* gc_required) {
1829 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1830 TAG_OBJECT);
1831
1832 InitializeNewString(result, length, Heap::kSlicedAsciiStringMapRootIndex,
1833 scratch1, scratch2);
1834 }
1835
1836
1837 void MacroAssembler::CompareObjectType(Register object, Register map,
1838 Register type_reg, InstanceType type) {
1839 const Register temp = type_reg.is(no_reg) ? ip : type_reg;
1840
1841 LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
1842 CompareInstanceType(map, temp, type);
1843 }
1844
1845
1846 void MacroAssembler::CheckObjectTypeRange(Register object, Register map,
1847 InstanceType min_type,
1848 InstanceType max_type,
1849 Label* false_label) {
1850 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1851 STATIC_ASSERT(LAST_TYPE < 256);
1852 LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
1853 lbz(ip, FieldMemOperand(map, Map::kInstanceTypeOffset));
1854 subi(ip, ip, Operand(min_type));
1855 cmpli(ip, Operand(max_type - min_type));
1856 bgt(false_label);
1857 }
1858
1859
1860 void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
1861 InstanceType type) {
1862 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1863 STATIC_ASSERT(LAST_TYPE < 256);
1864 lbz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1865 cmpi(type_reg, Operand(type));
1866 }
1867
1868
1869 void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
1870 DCHECK(!obj.is(ip));
1871 LoadRoot(ip, index);
1872 cmp(obj, ip);
1873 }
1874
1875
1876 void MacroAssembler::CheckFastElements(Register map, Register scratch,
1877 Label* fail) {
1878 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
1879 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
1880 STATIC_ASSERT(FAST_ELEMENTS == 2);
1881 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
1882 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1883 STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000);
1884 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
1885 bgt(fail);
1886 }
1887
1888
1889 void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
1890 Label* fail) {
1891 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
1892 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
1893 STATIC_ASSERT(FAST_ELEMENTS == 2);
1894 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
1895 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1896 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
1897 ble(fail);
1898 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
1899 bgt(fail);
1900 }
1901
1902
1903 void MacroAssembler::CheckFastSmiElements(Register map, Register scratch,
1904 Label* fail) {
1905 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
1906 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
1907 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1908 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
1909 bgt(fail);
1910 }
1911
1912
1913 void MacroAssembler::StoreNumberToDoubleElements(
1914 Register value_reg, Register key_reg, Register elements_reg,
1915 Register scratch1, DoubleRegister double_scratch, Label* fail,
1916 int elements_offset) {
1917 Label smi_value, store;
1918
1919 // Handle smi values specially.
1920 JumpIfSmi(value_reg, &smi_value);
1921
1922 // Ensure that the object is a heap number
1923 CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
1924 DONT_DO_SMI_CHECK);
1925
1926 lfd(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
1927 // Force a canonical NaN.
1928 CanonicalizeNaN(double_scratch);
1929 b(&store);
1930
1931 bind(&smi_value);
1932 SmiToDouble(double_scratch, value_reg);
1933
1934 bind(&store);
1935 SmiToDoubleArrayOffset(scratch1, key_reg);
1936 add(scratch1, elements_reg, scratch1);
1937 stfd(double_scratch, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize -
1938 elements_offset));
1939 }
1940
1941
1942 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
1943 Register right,
1944 Register overflow_dst,
1945 Register scratch) {
1946 DCHECK(!dst.is(overflow_dst));
1947 DCHECK(!dst.is(scratch));
1948 DCHECK(!overflow_dst.is(scratch));
1949 DCHECK(!overflow_dst.is(left));
1950 DCHECK(!overflow_dst.is(right));
1951
1952 // C = A+B; C overflows if A/B have same sign and C has diff sign than A
1953 if (dst.is(left)) {
1954 mr(scratch, left); // Preserve left.
1955 add(dst, left, right); // Left is overwritten.
1956 xor_(scratch, dst, scratch); // Original left.
1957 xor_(overflow_dst, dst, right);
1958 and_(overflow_dst, overflow_dst, scratch, SetRC);
1959 } else if (dst.is(right)) {
1960 mr(scratch, right); // Preserve right.
1961 add(dst, left, right); // Right is overwritten.
1962 xor_(scratch, dst, scratch); // Original right.
1963 xor_(overflow_dst, dst, left);
1964 and_(overflow_dst, overflow_dst, scratch, SetRC);
1965 } else {
1966 add(dst, left, right);
1967 xor_(overflow_dst, dst, left);
1968 xor_(scratch, dst, right);
1969 and_(overflow_dst, scratch, overflow_dst, SetRC);
1970 }
1971 }
1972
1973 void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left,
1974 Register right,
1975 Register overflow_dst,
1976 Register scratch) {
1977 DCHECK(!dst.is(overflow_dst));
1978 DCHECK(!dst.is(scratch));
1979 DCHECK(!overflow_dst.is(scratch));
1980 DCHECK(!overflow_dst.is(left));
1981 DCHECK(!overflow_dst.is(right));
1982
1983 // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
1984 if (dst.is(left)) {
1985 mr(scratch, left); // Preserve left.
1986 sub(dst, left, right); // Left is overwritten.
1987 xor_(overflow_dst, dst, scratch);
1988 xor_(scratch, scratch, right);
1989 and_(overflow_dst, overflow_dst, scratch, SetRC);
1990 } else if (dst.is(right)) {
1991 mr(scratch, right); // Preserve right.
1992 sub(dst, left, right); // Right is overwritten.
1993 xor_(overflow_dst, dst, left);
1994 xor_(scratch, left, scratch);
1995 and_(overflow_dst, overflow_dst, scratch, SetRC);
1996 } else {
1997 sub(dst, left, right);
1998 xor_(overflow_dst, dst, left);
1999 xor_(scratch, left, right);
2000 and_(overflow_dst, scratch, overflow_dst, SetRC);
2001 }
2002 }
2003
2004
2005 void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
2006 Label* early_success) {
2007 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2008 CompareMap(scratch, map, early_success);
2009 }
2010
2011
2012 void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map,
2013 Label* early_success) {
2014 mov(r0, Operand(map));
2015 cmp(obj_map, r0);
2016 }
2017
2018
2019 void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map,
2020 Label* fail, SmiCheckType smi_check_type) {
2021 if (smi_check_type == DO_SMI_CHECK) {
2022 JumpIfSmi(obj, fail);
2023 }
2024
2025 Label success;
2026 CompareMap(obj, scratch, map, &success);
2027 bne(fail);
2028 bind(&success);
2029 }
2030
2031
2032 void MacroAssembler::CheckMap(Register obj, Register scratch,
2033 Heap::RootListIndex index, Label* fail,
2034 SmiCheckType smi_check_type) {
2035 if (smi_check_type == DO_SMI_CHECK) {
2036 JumpIfSmi(obj, fail);
2037 }
2038 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2039 LoadRoot(ip, index);
2040 cmp(scratch, ip);
2041 bne(fail);
2042 }
2043
2044
2045 void MacroAssembler::DispatchMap(Register obj, Register scratch,
2046 Handle<Map> map, Handle<Code> success,
2047 SmiCheckType smi_check_type) {
2048 Label fail;
2049 if (smi_check_type == DO_SMI_CHECK) {
2050 JumpIfSmi(obj, &fail);
2051 }
2052 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2053 mov(ip, Operand(map));
2054 cmp(scratch, ip);
2055 bne(&fail);
2056 Jump(success, RelocInfo::CODE_TARGET, al);
2057 bind(&fail);
2058 }
2059
2060
2061 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
2062 Register scratch, Label* miss,
2063 bool miss_on_bound_function) {
2064 Label non_instance;
2065 if (miss_on_bound_function) {
2066 // Check that the receiver isn't a smi.
2067 JumpIfSmi(function, miss);
2068
2069 // Check that the function really is a function. Load map into result reg.
2070 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
2071 bne(miss);
2072
2073 LoadP(scratch,
2074 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2075 lwz(scratch,
2076 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2077 TestBit(scratch,
2078 #if V8_TARGET_ARCH_PPC64
2079 SharedFunctionInfo::kBoundFunction,
2080 #else
2081 SharedFunctionInfo::kBoundFunction + kSmiTagSize,
2082 #endif
2083 r0);
2084 bne(miss, cr0);
2085
2086 // Make sure that the function has an instance prototype.
2087 lbz(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2088 andi(r0, scratch, Operand(1 << Map::kHasNonInstancePrototype));
2089 bne(&non_instance, cr0);
2090 }
2091
2092 // Get the prototype or initial map from the function.
2093 LoadP(result,
2094 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2095
2096 // If the prototype or initial map is the hole, don't return it and
2097 // simply miss the cache instead. This will allow us to allocate a
2098 // prototype object on-demand in the runtime system.
2099 LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2100 cmp(result, ip);
2101 beq(miss);
2102
2103 // If the function does not have an initial map, we're done.
2104 Label done;
2105 CompareObjectType(result, scratch, scratch, MAP_TYPE);
2106 bne(&done);
2107
2108 // Get the prototype from the initial map.
2109 LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
2110
2111 if (miss_on_bound_function) {
2112 b(&done);
2113
2114 // Non-instance prototype: Fetch prototype from constructor field
2115 // in initial map.
2116 bind(&non_instance);
2117 LoadP(result, FieldMemOperand(result, Map::kConstructorOffset));
2118 }
2119
2120 // All done.
2121 bind(&done);
2122 }
2123
2124
2125 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
2126 Condition cond) {
2127 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
2128 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
2129 }
2130
2131
2132 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2133 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2134 }
2135
2136
2137 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
2138 return ref0.address() - ref1.address();
2139 }
2140
2141
2142 void MacroAssembler::CallApiFunctionAndReturn(
2143 Register function_address, ExternalReference thunk_ref, int stack_space,
2144 MemOperand return_value_operand, MemOperand* context_restore_operand) {
2145 ExternalReference next_address =
2146 ExternalReference::handle_scope_next_address(isolate());
2147 const int kNextOffset = 0;
2148 const int kLimitOffset = AddressOffset(
2149 ExternalReference::handle_scope_limit_address(isolate()), next_address);
2150 const int kLevelOffset = AddressOffset(
2151 ExternalReference::handle_scope_level_address(isolate()), next_address);
2152
2153 DCHECK(function_address.is(r4) || function_address.is(r5));
2154 Register scratch = r6;
2155
2156 Label profiler_disabled;
2157 Label end_profiler_check;
2158 mov(scratch, Operand(ExternalReference::is_profiling_address(isolate())));
2159 lbz(scratch, MemOperand(scratch, 0));
2160 cmpi(scratch, Operand::Zero());
2161 beq(&profiler_disabled);
2162
2163 // Additional parameter is the address of the actual callback.
2164 mov(scratch, Operand(thunk_ref));
2165 jmp(&end_profiler_check);
2166
2167 bind(&profiler_disabled);
2168 mr(scratch, function_address);
2169 bind(&end_profiler_check);
2170
2171 // Allocate HandleScope in callee-save registers.
2172 // r17 - next_address
2173 // r14 - next_address->kNextOffset
2174 // r15 - next_address->kLimitOffset
2175 // r16 - next_address->kLevelOffset
2176 mov(r17, Operand(next_address));
2177 LoadP(r14, MemOperand(r17, kNextOffset));
2178 LoadP(r15, MemOperand(r17, kLimitOffset));
2179 lwz(r16, MemOperand(r17, kLevelOffset));
2180 addi(r16, r16, Operand(1));
2181 stw(r16, MemOperand(r17, kLevelOffset));
2182
2183 if (FLAG_log_timer_events) {
2184 FrameScope frame(this, StackFrame::MANUAL);
2185 PushSafepointRegisters();
2186 PrepareCallCFunction(1, r3);
2187 mov(r3, Operand(ExternalReference::isolate_address(isolate())));
2188 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
2189 PopSafepointRegisters();
2190 }
2191
2192 // Native call returns to the DirectCEntry stub which redirects to the
2193 // return address pushed on stack (could have moved after GC).
2194 // DirectCEntry stub itself is generated early and never moves.
2195 DirectCEntryStub stub(isolate());
2196 stub.GenerateCall(this, scratch);
2197
2198 if (FLAG_log_timer_events) {
2199 FrameScope frame(this, StackFrame::MANUAL);
2200 PushSafepointRegisters();
2201 PrepareCallCFunction(1, r3);
2202 mov(r3, Operand(ExternalReference::isolate_address(isolate())));
2203 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
2204 PopSafepointRegisters();
2205 }
2206
2207 Label promote_scheduled_exception;
2208 Label exception_handled;
2209 Label delete_allocated_handles;
2210 Label leave_exit_frame;
2211 Label return_value_loaded;
2212
2213 // load value from ReturnValue
2214 LoadP(r3, return_value_operand);
2215 bind(&return_value_loaded);
2216 // No more valid handles (the result handle was the last one). Restore
2217 // previous handle scope.
2218 StoreP(r14, MemOperand(r17, kNextOffset));
2219 if (emit_debug_code()) {
2220 lwz(r4, MemOperand(r17, kLevelOffset));
2221 cmp(r4, r16);
2222 Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
2223 }
2224 subi(r16, r16, Operand(1));
2225 stw(r16, MemOperand(r17, kLevelOffset));
2226 LoadP(ip, MemOperand(r17, kLimitOffset));
2227 cmp(r15, ip);
2228 bne(&delete_allocated_handles);
2229
2230 // Check if the function scheduled an exception.
2231 bind(&leave_exit_frame);
2232 LoadRoot(r14, Heap::kTheHoleValueRootIndex);
2233 mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
2234 LoadP(r15, MemOperand(ip));
2235 cmp(r14, r15);
2236 bne(&promote_scheduled_exception);
2237 bind(&exception_handled);
2238
2239 bool restore_context = context_restore_operand != NULL;
2240 if (restore_context) {
2241 LoadP(cp, *context_restore_operand);
2242 }
2243 // LeaveExitFrame expects unwind space to be in a register.
2244 mov(r14, Operand(stack_space));
2245 LeaveExitFrame(false, r14, !restore_context);
2246 blr();
2247
2248 bind(&promote_scheduled_exception);
2249 {
2250 FrameScope frame(this, StackFrame::INTERNAL);
2251 CallExternalReference(
2252 ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0);
2253 }
2254 jmp(&exception_handled);
2255
2256 // HandleScope limit has changed. Delete allocated extensions.
2257 bind(&delete_allocated_handles);
2258 StoreP(r15, MemOperand(r17, kLimitOffset));
2259 mr(r14, r3);
2260 PrepareCallCFunction(1, r15);
2261 mov(r3, Operand(ExternalReference::isolate_address(isolate())));
2262 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
2263 1);
2264 mr(r3, r14);
2265 b(&leave_exit_frame);
2266 }
2267
2268
2269 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2270 return has_frame_ || !stub->SometimesSetsUpAFrame();
2271 }
2272
2273
2274 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2275 // If the hash field contains an array index pick it out. The assert checks
2276 // that the constants for the maximum number of digits for an array index
2277 // cached in the hash field and the number of bits reserved for it does not
2278 // conflict.
2279 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
2280 (1 << String::kArrayIndexValueBits));
2281 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
2282 }
2283
2284
2285 void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
2286 SmiUntag(ip, smi);
2287 ConvertIntToDouble(ip, value);
2288 }
2289
2290
2291 void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input,
2292 Register scratch1, Register scratch2,
2293 DoubleRegister double_scratch) {
2294 TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch);
2295 }
2296
2297
2298 void MacroAssembler::TryDoubleToInt32Exact(Register result,
2299 DoubleRegister double_input,
2300 Register scratch,
2301 DoubleRegister double_scratch) {
2302 Label done;
2303 DCHECK(!double_input.is(double_scratch));
2304
2305 ConvertDoubleToInt64(double_input,
2306 #if !V8_TARGET_ARCH_PPC64
2307 scratch,
2308 #endif
2309 result, double_scratch);
2310
2311 #if V8_TARGET_ARCH_PPC64
2312 TestIfInt32(result, scratch, r0);
2313 #else
2314 TestIfInt32(scratch, result, r0);
2315 #endif
2316 bne(&done);
2317
2318 // convert back and compare
2319 fcfid(double_scratch, double_scratch);
2320 fcmpu(double_scratch, double_input);
2321 bind(&done);
2322 }
2323
2324
2325 void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input,
2326 Register input_high, Register scratch,
2327 DoubleRegister double_scratch, Label* done,
2328 Label* exact) {
2329 DCHECK(!result.is(input_high));
2330 DCHECK(!double_input.is(double_scratch));
2331 Label exception;
2332
2333 MovDoubleHighToInt(input_high, double_input);
2334
2335 // Test for NaN/Inf
2336 ExtractBitMask(result, input_high, HeapNumber::kExponentMask);
2337 cmpli(result, Operand(0x7ff));
2338 beq(&exception);
2339
2340 // Convert (rounding to -Inf)
2341 ConvertDoubleToInt64(double_input,
2342 #if !V8_TARGET_ARCH_PPC64
2343 scratch,
2344 #endif
2345 result, double_scratch, kRoundToMinusInf);
2346
2347 // Test for overflow
2348 #if V8_TARGET_ARCH_PPC64
2349 TestIfInt32(result, scratch, r0);
2350 #else
2351 TestIfInt32(scratch, result, r0);
2352 #endif
2353 bne(&exception);
2354
2355 // Test for exactness
2356 fcfid(double_scratch, double_scratch);
2357 fcmpu(double_scratch, double_input);
2358 beq(exact);
2359 b(done);
2360
2361 bind(&exception);
2362 }
2363
2364
2365 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2366 DoubleRegister double_input,
2367 Label* done) {
2368 DoubleRegister double_scratch = kScratchDoubleReg;
2369 Register scratch = ip;
2370
2371 ConvertDoubleToInt64(double_input,
2372 #if !V8_TARGET_ARCH_PPC64
2373 scratch,
2374 #endif
2375 result, double_scratch);
2376
2377 // Test for overflow
2378 #if V8_TARGET_ARCH_PPC64
2379 TestIfInt32(result, scratch, r0);
2380 #else
2381 TestIfInt32(scratch, result, r0);
2382 #endif
2383 beq(done);
2384 }
2385
2386
2387 void MacroAssembler::TruncateDoubleToI(Register result,
2388 DoubleRegister double_input) {
2389 Label done;
2390
2391 TryInlineTruncateDoubleToI(result, double_input, &done);
2392
2393 // If we fell through then inline version didn't succeed - call stub instead.
2394 mflr(r0);
2395 push(r0);
2396 // Put input on stack.
2397 stfdu(double_input, MemOperand(sp, -kDoubleSize));
2398
2399 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2400 CallStub(&stub);
2401
2402 addi(sp, sp, Operand(kDoubleSize));
2403 pop(r0);
2404 mtlr(r0);
2405
2406 bind(&done);
2407 }
2408
2409
2410 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2411 Label done;
2412 DoubleRegister double_scratch = kScratchDoubleReg;
2413 DCHECK(!result.is(object));
2414
2415 lfd(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2416 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2417
2418 // If we fell through then inline version didn't succeed - call stub instead.
2419 mflr(r0);
2420 push(r0);
2421 DoubleToIStub stub(isolate(), object, result,
2422 HeapNumber::kValueOffset - kHeapObjectTag, true, true);
2423 CallStub(&stub);
2424 pop(r0);
2425 mtlr(r0);
2426
2427 bind(&done);
2428 }
2429
2430
2431 void MacroAssembler::TruncateNumberToI(Register object, Register result,
2432 Register heap_number_map,
2433 Register scratch1, Label* not_number) {
2434 Label done;
2435 DCHECK(!result.is(object));
2436
2437 UntagAndJumpIfSmi(result, object, &done);
2438 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2439 TruncateHeapNumberToI(result, object);
2440
2441 bind(&done);
2442 }
2443
2444
2445 void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src,
2446 int num_least_bits) {
2447 #if V8_TARGET_ARCH_PPC64
2448 rldicl(dst, src, kBitsPerPointer - kSmiShift,
2449 kBitsPerPointer - num_least_bits);
2450 #else
2451 rlwinm(dst, src, kBitsPerPointer - kSmiShift,
2452 kBitsPerPointer - num_least_bits, 31);
2453 #endif
2454 }
2455
2456
2457 void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
2458 int num_least_bits) {
2459 rlwinm(dst, src, 0, 32 - num_least_bits, 31);
2460 }
2461
2462
2463 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
2464 SaveFPRegsMode save_doubles) {
2465 // All parameters are on the stack. r3 has the return value after call.
2466
2467 // If the expected number of arguments of the runtime function is
2468 // constant, we check that the actual number of arguments match the
2469 // expectation.
2470 CHECK(f->nargs < 0 || f->nargs == num_arguments);
2471
2472 // TODO(1236192): Most runtime routines don't need the number of
2473 // arguments passed in because it is constant. At some point we
2474 // should remove this need and make the runtime routine entry code
2475 // smarter.
2476 mov(r3, Operand(num_arguments));
2477 mov(r4, Operand(ExternalReference(f, isolate())));
2478 CEntryStub stub(isolate(),
2479 #if V8_TARGET_ARCH_PPC64
2480 f->result_size,
2481 #else
2482 1,
2483 #endif
2484 save_doubles);
2485 CallStub(&stub);
2486 }
2487
2488
2489 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2490 int num_arguments) {
2491 mov(r3, Operand(num_arguments));
2492 mov(r4, Operand(ext));
2493
2494 CEntryStub stub(isolate(), 1);
2495 CallStub(&stub);
2496 }
2497
2498
2499 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2500 int num_arguments,
2501 int result_size) {
2502 // TODO(1236192): Most runtime routines don't need the number of
2503 // arguments passed in because it is constant. At some point we
2504 // should remove this need and make the runtime routine entry code
2505 // smarter.
2506 mov(r3, Operand(num_arguments));
2507 JumpToExternalReference(ext);
2508 }
2509
2510
2511 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, int num_arguments,
2512 int result_size) {
2513 TailCallExternalReference(ExternalReference(fid, isolate()), num_arguments,
2514 result_size);
2515 }
2516
2517
2518 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2519 mov(r4, Operand(builtin));
2520 CEntryStub stub(isolate(), 1);
2521 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2522 }
2523
2524
2525 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag,
2526 const CallWrapper& call_wrapper) {
2527 // You can't call a builtin without a valid frame.
2528 DCHECK(flag == JUMP_FUNCTION || has_frame());
2529
2530 GetBuiltinEntry(r5, id);
2531 if (flag == CALL_FUNCTION) {
2532 call_wrapper.BeforeCall(CallSize(r5));
2533 Call(r5);
2534 call_wrapper.AfterCall();
2535 } else {
2536 DCHECK(flag == JUMP_FUNCTION);
2537 Jump(r5);
2538 }
2539 }
2540
2541
2542 void MacroAssembler::GetBuiltinFunction(Register target,
2543 Builtins::JavaScript id) {
2544 // Load the builtins object into target register.
2545 LoadP(target,
2546 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2547 LoadP(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
2548 // Load the JavaScript builtin function from the builtins object.
2549 LoadP(target,
2550 FieldMemOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id)),
2551 r0);
2552 }
2553
2554
2555 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2556 DCHECK(!target.is(r4));
2557 GetBuiltinFunction(r4, id);
2558 // Load the code entry point from the builtins object.
2559 LoadP(target, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
2560 }
2561
2562
2563 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2564 Register scratch1, Register scratch2) {
2565 if (FLAG_native_code_counters && counter->Enabled()) {
2566 mov(scratch1, Operand(value));
2567 mov(scratch2, Operand(ExternalReference(counter)));
2568 stw(scratch1, MemOperand(scratch2));
2569 }
2570 }
2571
2572
2573 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2574 Register scratch1, Register scratch2) {
2575 DCHECK(value > 0);
2576 if (FLAG_native_code_counters && counter->Enabled()) {
2577 mov(scratch2, Operand(ExternalReference(counter)));
2578 lwz(scratch1, MemOperand(scratch2));
2579 addi(scratch1, scratch1, Operand(value));
2580 stw(scratch1, MemOperand(scratch2));
2581 }
2582 }
2583
2584
2585 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2586 Register scratch1, Register scratch2) {
2587 DCHECK(value > 0);
2588 if (FLAG_native_code_counters && counter->Enabled()) {
2589 mov(scratch2, Operand(ExternalReference(counter)));
2590 lwz(scratch1, MemOperand(scratch2));
2591 subi(scratch1, scratch1, Operand(value));
2592 stw(scratch1, MemOperand(scratch2));
2593 }
2594 }
2595
2596
2597 void MacroAssembler::Assert(Condition cond, BailoutReason reason,
2598 CRegister cr) {
2599 if (emit_debug_code()) Check(cond, reason, cr);
2600 }
2601
2602
2603 void MacroAssembler::AssertFastElements(Register elements) {
2604 if (emit_debug_code()) {
2605 DCHECK(!elements.is(ip));
2606 Label ok;
2607 push(elements);
2608 LoadP(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2609 LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2610 cmp(elements, ip);
2611 beq(&ok);
2612 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
2613 cmp(elements, ip);
2614 beq(&ok);
2615 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2616 cmp(elements, ip);
2617 beq(&ok);
2618 Abort(kJSObjectWithFastElementsMapHasSlowElements);
2619 bind(&ok);
2620 pop(elements);
2621 }
2622 }
2623
2624
2625 void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
2626 Label L;
2627 b(cond, &L, cr);
2628 Abort(reason);
2629 // will not return here
2630 bind(&L);
2631 }
2632
2633
2634 void MacroAssembler::Abort(BailoutReason reason) {
2635 Label abort_start;
2636 bind(&abort_start);
2637 #ifdef DEBUG
2638 const char* msg = GetBailoutReason(reason);
2639 if (msg != NULL) {
2640 RecordComment("Abort message: ");
2641 RecordComment(msg);
2642 }
2643
2644 if (FLAG_trap_on_abort) {
2645 stop(msg);
2646 return;
2647 }
2648 #endif
2649
2650 LoadSmiLiteral(r0, Smi::FromInt(reason));
2651 push(r0);
2652 // Disable stub call restrictions to always allow calls to abort.
2653 if (!has_frame_) {
2654 // We don't actually want to generate a pile of code for this, so just
2655 // claim there is a stack frame, without generating one.
2656 FrameScope scope(this, StackFrame::NONE);
2657 CallRuntime(Runtime::kAbort, 1);
2658 } else {
2659 CallRuntime(Runtime::kAbort, 1);
2660 }
2661 // will not return here
2662 }
2663
2664
2665 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2666 if (context_chain_length > 0) {
2667 // Move up the chain of contexts to the context containing the slot.
2668 LoadP(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2669 for (int i = 1; i < context_chain_length; i++) {
2670 LoadP(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2671 }
2672 } else {
2673 // Slot is in the current function context. Move it into the
2674 // destination register in case we store into it (the write barrier
2675 // cannot be allowed to destroy the context in esi).
2676 mr(dst, cp);
2677 }
2678 }
2679
2680
2681 void MacroAssembler::LoadTransitionedArrayMapConditional(
2682 ElementsKind expected_kind, ElementsKind transitioned_kind,
2683 Register map_in_out, Register scratch, Label* no_map_match) {
2684 // Load the global or builtins object from the current context.
2685 LoadP(scratch,
2686 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2687 LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
2688
2689 // Check that the function's map is the same as the expected cached map.
2690 LoadP(scratch,
2691 MemOperand(scratch, Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2692 size_t offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize;
2693 LoadP(ip, FieldMemOperand(scratch, offset));
2694 cmp(map_in_out, ip);
2695 bne(no_map_match);
2696
2697 // Use the transitioned cached map.
2698 offset = transitioned_kind * kPointerSize + FixedArrayBase::kHeaderSize;
2699 LoadP(map_in_out, FieldMemOperand(scratch, offset));
2700 }
2701
2702
2703 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2704 // Load the global or builtins object from the current context.
2705 LoadP(function,
2706 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2707 // Load the native context from the global or builtins object.
2708 LoadP(function,
2709 FieldMemOperand(function, GlobalObject::kNativeContextOffset));
2710 // Load the function from the native context.
2711 LoadP(function, MemOperand(function, Context::SlotOffset(index)), r0);
2712 }
2713
2714
2715 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2716 Register map,
2717 Register scratch) {
2718 // Load the initial map. The global functions all have initial maps.
2719 LoadP(map,
2720 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2721 if (emit_debug_code()) {
2722 Label ok, fail;
2723 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2724 b(&ok);
2725 bind(&fail);
2726 Abort(kGlobalFunctionsMustHaveInitialMap);
2727 bind(&ok);
2728 }
2729 }
2730
2731
2732 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2733 Register reg, Register scratch, Label* not_power_of_two_or_zero) {
2734 subi(scratch, reg, Operand(1));
2735 cmpi(scratch, Operand::Zero());
2736 blt(not_power_of_two_or_zero);
2737 and_(r0, scratch, reg, SetRC);
2738 bne(not_power_of_two_or_zero, cr0);
2739 }
2740
2741
2742 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
2743 Register scratch,
2744 Label* zero_and_neg,
2745 Label* not_power_of_two) {
2746 subi(scratch, reg, Operand(1));
2747 cmpi(scratch, Operand::Zero());
2748 blt(zero_and_neg);
2749 and_(r0, scratch, reg, SetRC);
2750 bne(not_power_of_two, cr0);
2751 }
2752
2753 #if !V8_TARGET_ARCH_PPC64
2754 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
2755 DCHECK(!reg.is(overflow));
2756 mr(overflow, reg); // Save original value.
2757 SmiTag(reg);
2758 xor_(overflow, overflow, reg, SetRC); // Overflow if (value ^ 2 * value) < 0.
2759 }
2760
2761
2762 void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src,
2763 Register overflow) {
2764 if (dst.is(src)) {
2765 // Fall back to slower case.
2766 SmiTagCheckOverflow(dst, overflow);
2767 } else {
2768 DCHECK(!dst.is(src));
2769 DCHECK(!dst.is(overflow));
2770 DCHECK(!src.is(overflow));
2771 SmiTag(dst, src);
2772 xor_(overflow, dst, src, SetRC); // Overflow if (value ^ 2 * value) < 0.
2773 }
2774 }
2775 #endif
2776
2777 void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2,
2778 Label* on_not_both_smi) {
2779 STATIC_ASSERT(kSmiTag == 0);
2780 DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
2781 orx(r0, reg1, reg2, LeaveRC);
2782 JumpIfNotSmi(r0, on_not_both_smi);
2783 }
2784
2785
2786 void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
2787 Label* smi_case) {
2788 STATIC_ASSERT(kSmiTag == 0);
2789 STATIC_ASSERT(kSmiTagSize == 1);
2790 TestBit(src, 0, r0);
2791 SmiUntag(dst, src);
2792 beq(smi_case, cr0);
2793 }
2794
2795
2796 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
2797 Label* non_smi_case) {
2798 STATIC_ASSERT(kSmiTag == 0);
2799 STATIC_ASSERT(kSmiTagSize == 1);
2800 TestBit(src, 0, r0);
2801 SmiUntag(dst, src);
2802 bne(non_smi_case, cr0);
2803 }
2804
2805
2806 void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
2807 Label* on_either_smi) {
2808 STATIC_ASSERT(kSmiTag == 0);
2809 JumpIfSmi(reg1, on_either_smi);
2810 JumpIfSmi(reg2, on_either_smi);
2811 }
2812
2813
2814 void MacroAssembler::AssertNotSmi(Register object) {
2815 if (emit_debug_code()) {
2816 STATIC_ASSERT(kSmiTag == 0);
2817 TestIfSmi(object, r0);
2818 Check(ne, kOperandIsASmi, cr0);
2819 }
2820 }
2821
2822
2823 void MacroAssembler::AssertSmi(Register object) {
2824 if (emit_debug_code()) {
2825 STATIC_ASSERT(kSmiTag == 0);
2826 TestIfSmi(object, r0);
2827 Check(eq, kOperandIsNotSmi, cr0);
2828 }
2829 }
2830
2831
2832 void MacroAssembler::AssertString(Register object) {
2833 if (emit_debug_code()) {
2834 STATIC_ASSERT(kSmiTag == 0);
2835 TestIfSmi(object, r0);
2836 Check(ne, kOperandIsASmiAndNotAString, cr0);
2837 push(object);
2838 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2839 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
2840 pop(object);
2841 Check(lt, kOperandIsNotAString);
2842 }
2843 }
2844
2845
2846 void MacroAssembler::AssertName(Register object) {
2847 if (emit_debug_code()) {
2848 STATIC_ASSERT(kSmiTag == 0);
2849 TestIfSmi(object, r0);
2850 Check(ne, kOperandIsASmiAndNotAName, cr0);
2851 push(object);
2852 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2853 CompareInstanceType(object, object, LAST_NAME_TYPE);
2854 pop(object);
2855 Check(le, kOperandIsNotAName);
2856 }
2857 }
2858
2859
2860 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
2861 Register scratch) {
2862 if (emit_debug_code()) {
2863 Label done_checking;
2864 AssertNotSmi(object);
2865 CompareRoot(object, Heap::kUndefinedValueRootIndex);
2866 beq(&done_checking);
2867 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2868 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
2869 Assert(eq, kExpectedUndefinedOrCell);
2870 bind(&done_checking);
2871 }
2872 }
2873
2874
2875 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
2876 if (emit_debug_code()) {
2877 CompareRoot(reg, index);
2878 Check(eq, kHeapNumberMapRegisterClobbered);
2879 }
2880 }
2881
2882
2883 void MacroAssembler::JumpIfNotHeapNumber(Register object,
2884 Register heap_number_map,
2885 Register scratch,
2886 Label* on_not_heap_number) {
2887 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2888 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2889 cmp(scratch, heap_number_map);
2890 bne(on_not_heap_number);
2891 }
2892
2893
2894 void MacroAssembler::LookupNumberStringCache(Register object, Register result,
2895 Register scratch1,
2896 Register scratch2,
2897 Register scratch3,
2898 Label* not_found) {
2899 // Use of registers. Register result is used as a temporary.
2900 Register number_string_cache = result;
2901 Register mask = scratch3;
2902
2903 // Load the number string cache.
2904 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2905
2906 // Make the hash mask from the length of the number string cache. It
2907 // contains two elements (number and string) for each cache entry.
2908 LoadP(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
2909 // Divide length by two (length is a smi).
2910 ShiftRightArithImm(mask, mask, kSmiTagSize + kSmiShiftSize + 1);
2911 subi(mask, mask, Operand(1)); // Make mask.
2912
2913 // Calculate the entry in the number string cache. The hash value in the
2914 // number string cache for smis is just the smi value, and the hash for
2915 // doubles is the xor of the upper and lower words. See
2916 // Heap::GetNumberStringCache.
2917 Label is_smi;
2918 Label load_result_from_cache;
2919 JumpIfSmi(object, &is_smi);
2920 CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
2921 DONT_DO_SMI_CHECK);
2922
2923 STATIC_ASSERT(8 == kDoubleSize);
2924 lwz(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
2925 lwz(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
2926 xor_(scratch1, scratch1, scratch2);
2927 and_(scratch1, scratch1, mask);
2928
2929 // Calculate address of entry in string cache: each entry consists
2930 // of two pointer sized fields.
2931 ShiftLeftImm(scratch1, scratch1, Operand(kPointerSizeLog2 + 1));
2932 add(scratch1, number_string_cache, scratch1);
2933
2934 Register probe = mask;
2935 LoadP(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
2936 JumpIfSmi(probe, not_found);
2937 lfd(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
2938 lfd(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
2939 fcmpu(d0, d1);
2940 bne(not_found); // The cache did not contain this value.
2941 b(&load_result_from_cache);
2942
2943 bind(&is_smi);
2944 Register scratch = scratch1;
2945 SmiUntag(scratch, object);
2946 and_(scratch, mask, scratch);
2947 // Calculate address of entry in string cache: each entry consists
2948 // of two pointer sized fields.
2949 ShiftLeftImm(scratch, scratch, Operand(kPointerSizeLog2 + 1));
2950 add(scratch, number_string_cache, scratch);
2951
2952 // Check if the entry is the smi we are looking for.
2953 LoadP(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2954 cmp(object, probe);
2955 bne(not_found);
2956
2957 // Get the result from the cache.
2958 bind(&load_result_from_cache);
2959 LoadP(result,
2960 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
2961 IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
2962 scratch1, scratch2);
2963 }
2964
2965
2966 void MacroAssembler::JumpIfNonSmisNotBothSequentialAsciiStrings(
2967 Register first, Register second, Register scratch1, Register scratch2,
2968 Label* failure) {
2969 // Test that both first and second are sequential ASCII strings.
2970 // Assume that they are non-smis.
2971 LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2972 LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2973 lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2974 lbz(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2975
2976 JumpIfBothInstanceTypesAreNotSequentialAscii(scratch1, scratch2, scratch1,
2977 scratch2, failure);
2978 }
2979
2980 void MacroAssembler::JumpIfNotBothSequentialAsciiStrings(Register first,
2981 Register second,
2982 Register scratch1,
2983 Register scratch2,
2984 Label* failure) {
2985 // Check that neither is a smi.
2986 and_(scratch1, first, second);
2987 JumpIfSmi(scratch1, failure);
2988 JumpIfNonSmisNotBothSequentialAsciiStrings(first, second, scratch1, scratch2,
2989 failure);
2990 }
2991
2992
2993 void MacroAssembler::JumpIfNotUniqueName(Register reg, Label* not_unique_name) {
2994 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2995 Label succeed;
2996 andi(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
2997 beq(&succeed, cr0);
2998 cmpi(reg, Operand(SYMBOL_TYPE));
2999 bne(not_unique_name);
3000
3001 bind(&succeed);
3002 }
3003
3004
3005 // Allocates a heap number or jumps to the need_gc label if the young space
3006 // is full and a scavenge is needed.
3007 void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
3008 Register scratch2,
3009 Register heap_number_map,
3010 Label* gc_required,
3011 TaggingMode tagging_mode,
3012 MutableMode mode) {
3013 // Allocate an object in the heap for the heap number and tag it as a heap
3014 // object.
3015 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3016 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3017
3018 Heap::RootListIndex map_index = mode == MUTABLE
3019 ? Heap::kMutableHeapNumberMapRootIndex
3020 : Heap::kHeapNumberMapRootIndex;
3021 AssertIsRoot(heap_number_map, map_index);
3022
3023 // Store heap number map in the allocated object.
3024 if (tagging_mode == TAG_RESULT) {
3025 StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset),
3026 r0);
3027 } else {
3028 StoreP(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3029 }
3030 }
3031
3032
3033 void MacroAssembler::AllocateHeapNumberWithValue(
3034 Register result, DoubleRegister value, Register scratch1, Register scratch2,
3035 Register heap_number_map, Label* gc_required) {
3036 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
3037 stfd(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3038 }
3039
3040
3041 // Copies a fixed number of fields of heap objects from src to dst.
3042 void MacroAssembler::CopyFields(Register dst, Register src, RegList temps,
3043 int field_count) {
3044 // At least one bit set in the first 15 registers.
3045 DCHECK((temps & ((1 << 15) - 1)) != 0);
3046 DCHECK((temps & dst.bit()) == 0);
3047 DCHECK((temps & src.bit()) == 0);
3048 // Primitive implementation using only one temporary register.
3049
3050 Register tmp = no_reg;
3051 // Find a temp register in temps list.
3052 for (int i = 0; i < 15; i++) {
3053 if ((temps & (1 << i)) != 0) {
3054 tmp.set_code(i);
3055 break;
3056 }
3057 }
3058 DCHECK(!tmp.is(no_reg));
3059
3060 for (int i = 0; i < field_count; i++) {
3061 LoadP(tmp, FieldMemOperand(src, i * kPointerSize), r0);
3062 StoreP(tmp, FieldMemOperand(dst, i * kPointerSize), r0);
3063 }
3064 }
3065
3066
3067 void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
3068 Register scratch) {
3069 Label align_loop, aligned, word_loop, byte_loop, byte_loop_1, done;
3070
3071 DCHECK(!scratch.is(r0));
3072
3073 cmpi(length, Operand::Zero());
3074 beq(&done);
3075
3076 // Check src alignment and length to see whether word_loop is possible
3077 andi(scratch, src, Operand(kPointerSize - 1));
3078 beq(&aligned, cr0);
3079 subfic(scratch, scratch, Operand(kPointerSize * 2));
3080 cmp(length, scratch);
3081 blt(&byte_loop);
3082
3083 // Align src before copying in word size chunks.
3084 subi(scratch, scratch, Operand(kPointerSize));
3085 mtctr(scratch);
3086 bind(&align_loop);
3087 lbz(scratch, MemOperand(src));
3088 addi(src, src, Operand(1));
3089 subi(length, length, Operand(1));
3090 stb(scratch, MemOperand(dst));
3091 addi(dst, dst, Operand(1));
3092 bdnz(&align_loop);
3093
3094 bind(&aligned);
3095
3096 // Copy bytes in word size chunks.
3097 if (emit_debug_code()) {
3098 andi(r0, src, Operand(kPointerSize - 1));
3099 Assert(eq, kExpectingAlignmentForCopyBytes, cr0);
3100 }
3101
3102 ShiftRightImm(scratch, length, Operand(kPointerSizeLog2));
3103 cmpi(scratch, Operand::Zero());
3104 beq(&byte_loop);
3105
3106 mtctr(scratch);
3107 bind(&word_loop);
3108 LoadP(scratch, MemOperand(src));
3109 addi(src, src, Operand(kPointerSize));
3110 subi(length, length, Operand(kPointerSize));
3111 if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
3112 // currently false for PPC - but possible future opt
3113 StoreP(scratch, MemOperand(dst));
3114 addi(dst, dst, Operand(kPointerSize));
3115 } else {
3116 #if V8_TARGET_LITTLE_ENDIAN
3117 stb(scratch, MemOperand(dst, 0));
3118 ShiftRightImm(scratch, scratch, Operand(8));
3119 stb(scratch, MemOperand(dst, 1));
3120 ShiftRightImm(scratch, scratch, Operand(8));
3121 stb(scratch, MemOperand(dst, 2));
3122 ShiftRightImm(scratch, scratch, Operand(8));
3123 stb(scratch, MemOperand(dst, 3));
3124 #if V8_TARGET_ARCH_PPC64
3125 ShiftRightImm(scratch, scratch, Operand(8));
3126 stb(scratch, MemOperand(dst, 4));
3127 ShiftRightImm(scratch, scratch, Operand(8));
3128 stb(scratch, MemOperand(dst, 5));
3129 ShiftRightImm(scratch, scratch, Operand(8));
3130 stb(scratch, MemOperand(dst, 6));
3131 ShiftRightImm(scratch, scratch, Operand(8));
3132 stb(scratch, MemOperand(dst, 7));
3133 #endif
3134 #else
3135 #if V8_TARGET_ARCH_PPC64
3136 stb(scratch, MemOperand(dst, 7));
3137 ShiftRightImm(scratch, scratch, Operand(8));
3138 stb(scratch, MemOperand(dst, 6));
3139 ShiftRightImm(scratch, scratch, Operand(8));
3140 stb(scratch, MemOperand(dst, 5));
3141 ShiftRightImm(scratch, scratch, Operand(8));
3142 stb(scratch, MemOperand(dst, 4));
3143 ShiftRightImm(scratch, scratch, Operand(8));
3144 #endif
3145 stb(scratch, MemOperand(dst, 3));
3146 ShiftRightImm(scratch, scratch, Operand(8));
3147 stb(scratch, MemOperand(dst, 2));
3148 ShiftRightImm(scratch, scratch, Operand(8));
3149 stb(scratch, MemOperand(dst, 1));
3150 ShiftRightImm(scratch, scratch, Operand(8));
3151 stb(scratch, MemOperand(dst, 0));
3152 #endif
3153 addi(dst, dst, Operand(kPointerSize));
3154 }
3155 bdnz(&word_loop);
3156
3157 // Copy the last bytes if any left.
3158 cmpi(length, Operand::Zero());
3159 beq(&done);
3160
3161 bind(&byte_loop);
3162 mtctr(length);
3163 bind(&byte_loop_1);
3164 lbz(scratch, MemOperand(src));
3165 addi(src, src, Operand(1));
3166 stb(scratch, MemOperand(dst));
3167 addi(dst, dst, Operand(1));
3168 bdnz(&byte_loop_1);
3169
3170 bind(&done);
3171 }
3172
3173
3174 void MacroAssembler::InitializeNFieldsWithFiller(Register start_offset,
3175 Register count,
3176 Register filler) {
3177 Label loop;
3178 mtctr(count);
3179 bind(&loop);
3180 StoreP(filler, MemOperand(start_offset));
3181 addi(start_offset, start_offset, Operand(kPointerSize));
3182 bdnz(&loop);
3183 }
3184
3185 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3186 Register end_offset,
3187 Register filler) {
3188 Label done;
3189 sub(r0, end_offset, start_offset, LeaveOE, SetRC);
3190 beq(&done, cr0);
3191 ShiftRightImm(r0, r0, Operand(kPointerSizeLog2));
3192 InitializeNFieldsWithFiller(start_offset, r0, filler);
3193 bind(&done);
3194 }
3195
3196
3197 void MacroAssembler::SaveFPRegs(Register location, int first, int count) {
3198 DCHECK(count > 0);
3199 int cur = first;
3200 subi(location, location, Operand(count * kDoubleSize));
3201 for (int i = 0; i < count; i++) {
3202 DoubleRegister reg = DoubleRegister::from_code(cur++);
3203 stfd(reg, MemOperand(location, i * kDoubleSize));
3204 }
3205 }
3206
3207
3208 void MacroAssembler::RestoreFPRegs(Register location, int first, int count) {
3209 DCHECK(count > 0);
3210 int cur = first + count - 1;
3211 for (int i = count - 1; i >= 0; i--) {
3212 DoubleRegister reg = DoubleRegister::from_code(cur--);
3213 lfd(reg, MemOperand(location, i * kDoubleSize));
3214 }
3215 addi(location, location, Operand(count * kDoubleSize));
3216 }
3217
3218
3219 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialAscii(
3220 Register first, Register second, Register scratch1, Register scratch2,
3221 Label* failure) {
3222 const int kFlatAsciiStringMask =
3223 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3224 const int kFlatAsciiStringTag =
3225 kStringTag | kOneByteStringTag | kSeqStringTag;
3226 andi(scratch1, first, Operand(kFlatAsciiStringMask));
3227 andi(scratch2, second, Operand(kFlatAsciiStringMask));
3228 cmpi(scratch1, Operand(kFlatAsciiStringTag));
3229 bne(failure);
3230 cmpi(scratch2, Operand(kFlatAsciiStringTag));
3231 bne(failure);
3232 }
3233
3234
3235 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialAscii(Register type,
3236 Register scratch,
3237 Label* failure) {
3238 const int kFlatAsciiStringMask =
3239 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3240 const int kFlatAsciiStringTag =
3241 kStringTag | kOneByteStringTag | kSeqStringTag;
3242 andi(scratch, type, Operand(kFlatAsciiStringMask));
3243 cmpi(scratch, Operand(kFlatAsciiStringTag));
3244 bne(failure);
3245 }
3246
3247 static const int kRegisterPassedArguments = 8;
3248
3249
3250 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3251 int num_double_arguments) {
3252 int stack_passed_words = 0;
3253 if (num_double_arguments > DoubleRegister::kNumRegisters) {
3254 stack_passed_words +=
3255 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
3256 }
3257 // Up to 8 simple arguments are passed in registers r3..r10.
3258 if (num_reg_arguments > kRegisterPassedArguments) {
3259 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3260 }
3261 return stack_passed_words;
3262 }
3263
3264
3265 void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
3266 Register value,
3267 uint32_t encoding_mask) {
3268 Label is_object;
3269 TestIfSmi(string, r0);
3270 Check(ne, kNonObject, cr0);
3271
3272 LoadP(ip, FieldMemOperand(string, HeapObject::kMapOffset));
3273 lbz(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
3274
3275 andi(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
3276 cmpi(ip, Operand(encoding_mask));
3277 Check(eq, kUnexpectedStringType);
3278
3279 // The index is assumed to be untagged coming in, tag it to compare with the
3280 // string length without using a temp register, it is restored at the end of
3281 // this function.
3282 #if !V8_TARGET_ARCH_PPC64
3283 Label index_tag_ok, index_tag_bad;
3284 JumpIfNotSmiCandidate(index, r0, &index_tag_bad);
3285 #endif
3286 SmiTag(index, index);
3287 #if !V8_TARGET_ARCH_PPC64
3288 b(&index_tag_ok);
3289 bind(&index_tag_bad);
3290 Abort(kIndexIsTooLarge);
3291 bind(&index_tag_ok);
3292 #endif
3293
3294 LoadP(ip, FieldMemOperand(string, String::kLengthOffset));
3295 cmp(index, ip);
3296 Check(lt, kIndexIsTooLarge);
3297
3298 DCHECK(Smi::FromInt(0) == 0);
3299 cmpi(index, Operand::Zero());
3300 Check(ge, kIndexIsNegative);
3301
3302 SmiUntag(index, index);
3303 }
3304
3305
3306 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3307 int num_double_arguments,
3308 Register scratch) {
3309 int frame_alignment = ActivationFrameAlignment();
3310 int stack_passed_arguments =
3311 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
3312 int stack_space = kNumRequiredStackFrameSlots;
3313
3314 if (frame_alignment > kPointerSize) {
3315 // Make stack end at alignment and make room for stack arguments
3316 // -- preserving original value of sp.
3317 mr(scratch, sp);
3318 addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize));
3319 DCHECK(IsPowerOf2(frame_alignment));
3320 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
3321 StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3322 } else {
3323 // Make room for stack arguments
3324 stack_space += stack_passed_arguments;
3325 }
3326
3327 // Allocate frame with required slots to make ABI work.
3328 li(r0, Operand::Zero());
3329 StorePU(r0, MemOperand(sp, -stack_space * kPointerSize));
3330 }
3331
3332
3333 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3334 Register scratch) {
3335 PrepareCallCFunction(num_reg_arguments, 0, scratch);
3336 }
3337
3338
3339 void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
3340
3341
3342 void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
3343
3344
3345 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3346 DoubleRegister src2) {
3347 if (src2.is(d1)) {
3348 DCHECK(!src1.is(d2));
3349 Move(d2, src2);
3350 Move(d1, src1);
3351 } else {
3352 Move(d1, src1);
3353 Move(d2, src2);
3354 }
3355 }
3356
3357
3358 void MacroAssembler::CallCFunction(ExternalReference function,
3359 int num_reg_arguments,
3360 int num_double_arguments) {
3361 mov(ip, Operand(function));
3362 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3363 }
3364
3365
3366 void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
3367 int num_double_arguments) {
3368 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3369 }
3370
3371
3372 void MacroAssembler::CallCFunction(ExternalReference function,
3373 int num_arguments) {
3374 CallCFunction(function, num_arguments, 0);
3375 }
3376
3377
3378 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
3379 CallCFunction(function, num_arguments, 0);
3380 }
3381
3382
3383 void MacroAssembler::CallCFunctionHelper(Register function,
3384 int num_reg_arguments,
3385 int num_double_arguments) {
3386 DCHECK(has_frame());
3387 // Just call directly. The function called cannot cause a GC, or
3388 // allow preemption, so the return address in the link register
3389 // stays correct.
3390 #if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
3391 // AIX uses a function descriptor. When calling C code be aware
3392 // of this descriptor and pick up values from it
3393 LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
3394 LoadP(ip, MemOperand(function, 0));
3395 Register dest = ip;
3396 #elif ABI_TOC_ADDRESSABILITY_VIA_IP
3397 Move(ip, function);
3398 Register dest = ip;
3399 #else
3400 Register dest = function;
3401 #endif
3402
3403 Call(dest);
3404
3405 // Remove frame bought in PrepareCallCFunction
3406 int stack_passed_arguments =
3407 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
3408 int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
3409 if (ActivationFrameAlignment() > kPointerSize) {
3410 LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
3411 } else {
3412 addi(sp, sp, Operand(stack_space * kPointerSize));
3413 }
3414 }
3415
3416
3417 void MacroAssembler::FlushICache(Register address, size_t size,
3418 Register scratch) {
3419 Label done;
3420
3421 dcbf(r0, address);
3422 sync();
3423 icbi(r0, address);
3424 isync();
3425
3426 // This code handles ranges which cross a single cacheline boundary.
3427 // scratch is last cacheline which intersects range.
3428 const int kCacheLineSizeLog2 = WhichPowerOf2(CpuFeatures::cache_line_size());
3429
3430 DCHECK(size > 0 && size <= (size_t)(1 << kCacheLineSizeLog2));
3431 addi(scratch, address, Operand(size - 1));
3432 ClearRightImm(scratch, scratch, Operand(kCacheLineSizeLog2));
3433 cmpl(scratch, address);
3434 ble(&done);
3435
3436 dcbf(r0, scratch);
3437 sync();
3438 icbi(r0, scratch);
3439 isync();
3440
3441 bind(&done);
3442 }
3443
3444
3445 void MacroAssembler::SetRelocatedValue(Register location, Register scratch,
3446 Register new_value) {
3447 lwz(scratch, MemOperand(location));
3448
3449 #if V8_OOL_CONSTANT_POOL
3450 if (emit_debug_code()) {
3451 // Check that the instruction sequence is a load from the constant pool
3452 #if V8_TARGET_ARCH_PPC64
3453 And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16)));
3454 Cmpi(scratch, Operand(ADDI), r0);
3455 Check(eq, kTheInstructionShouldBeALi);
3456 lwz(scratch, MemOperand(location, kInstrSize));
3457 #endif
3458 ExtractBitMask(scratch, scratch, 0x1f * B16);
3459 cmpi(scratch, Operand(kConstantPoolRegister.code()));
3460 Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
3461 // Scratch was clobbered. Restore it.
3462 lwz(scratch, MemOperand(location));
3463 }
3464 // Get the address of the constant and patch it.
3465 andi(scratch, scratch, Operand(kImm16Mask));
3466 StorePX(new_value, MemOperand(kConstantPoolRegister, scratch));
3467 #else
3468 // This code assumes a FIXED_SEQUENCE for lis/ori
3469
3470 // At this point scratch is a lis instruction.
3471 if (emit_debug_code()) {
3472 And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16)));
3473 Cmpi(scratch, Operand(ADDIS), r0);
3474 Check(eq, kTheInstructionToPatchShouldBeALis);
3475 lwz(scratch, MemOperand(location));
3476 }
3477
3478 // insert new high word into lis instruction
3479 #if V8_TARGET_ARCH_PPC64
3480 srdi(ip, new_value, Operand(32));
3481 rlwimi(scratch, ip, 16, 16, 31);
3482 #else
3483 rlwimi(scratch, new_value, 16, 16, 31);
3484 #endif
3485
3486 stw(scratch, MemOperand(location));
3487
3488 lwz(scratch, MemOperand(location, kInstrSize));
3489 // scratch is now ori.
3490 if (emit_debug_code()) {
3491 And(scratch, scratch, Operand(kOpcodeMask));
3492 Cmpi(scratch, Operand(ORI), r0);
3493 Check(eq, kTheInstructionShouldBeAnOri);
3494 lwz(scratch, MemOperand(location, kInstrSize));
3495 }
3496
3497 // insert new low word into ori instruction
3498 #if V8_TARGET_ARCH_PPC64
3499 rlwimi(scratch, ip, 0, 16, 31);
3500 #else
3501 rlwimi(scratch, new_value, 0, 16, 31);
3502 #endif
3503 stw(scratch, MemOperand(location, kInstrSize));
3504
3505 #if V8_TARGET_ARCH_PPC64
3506 if (emit_debug_code()) {
3507 lwz(scratch, MemOperand(location, 2 * kInstrSize));
3508 // scratch is now sldi.
3509 And(scratch, scratch, Operand(kOpcodeMask | kExt5OpcodeMask));
3510 Cmpi(scratch, Operand(EXT5 | RLDICR), r0);
3511 Check(eq, kTheInstructionShouldBeASldi);
3512 }
3513
3514 lwz(scratch, MemOperand(location, 3 * kInstrSize));
3515 // scratch is now ori.
3516 if (emit_debug_code()) {
3517 And(scratch, scratch, Operand(kOpcodeMask));
3518 Cmpi(scratch, Operand(ORIS), r0);
3519 Check(eq, kTheInstructionShouldBeAnOris);
3520 lwz(scratch, MemOperand(location, 3 * kInstrSize));
3521 }
3522
3523 rlwimi(scratch, new_value, 16, 16, 31);
3524 stw(scratch, MemOperand(location, 3 * kInstrSize));
3525
3526 lwz(scratch, MemOperand(location, 4 * kInstrSize));
3527 // scratch is now ori.
3528 if (emit_debug_code()) {
3529 And(scratch, scratch, Operand(kOpcodeMask));
3530 Cmpi(scratch, Operand(ORI), r0);
3531 Check(eq, kTheInstructionShouldBeAnOri);
3532 lwz(scratch, MemOperand(location, 4 * kInstrSize));
3533 }
3534 rlwimi(scratch, new_value, 0, 16, 31);
3535 stw(scratch, MemOperand(location, 4 * kInstrSize));
3536 #endif
3537
3538 // Update the I-cache so the new lis and addic can be executed.
3539 #if V8_TARGET_ARCH_PPC64
3540 FlushICache(location, 5 * kInstrSize, scratch);
3541 #else
3542 FlushICache(location, 2 * kInstrSize, scratch);
3543 #endif
3544 #endif
3545 }
3546
3547
3548 void MacroAssembler::GetRelocatedValue(Register location, Register result,
3549 Register scratch) {
3550 lwz(result, MemOperand(location));
3551
3552 #if V8_OOL_CONSTANT_POOL
3553 if (emit_debug_code()) {
3554 // Check that the instruction sequence is a load from the constant pool
3555 #if V8_TARGET_ARCH_PPC64
3556 And(result, result, Operand(kOpcodeMask | (0x1f * B16)));
3557 Cmpi(result, Operand(ADDI), r0);
3558 Check(eq, kTheInstructionShouldBeALi);
3559 lwz(result, MemOperand(location, kInstrSize));
3560 #endif
3561 ExtractBitMask(result, result, 0x1f * B16);
3562 cmpi(result, Operand(kConstantPoolRegister.code()));
3563 Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
3564 lwz(result, MemOperand(location));
3565 }
3566 // Get the address of the constant and retrieve it.
3567 andi(result, result, Operand(kImm16Mask));
3568 LoadPX(result, MemOperand(kConstantPoolRegister, result));
3569 #else
3570 // This code assumes a FIXED_SEQUENCE for lis/ori
3571 if (emit_debug_code()) {
3572 And(result, result, Operand(kOpcodeMask | (0x1f * B16)));
3573 Cmpi(result, Operand(ADDIS), r0);
3574 Check(eq, kTheInstructionShouldBeALis);
3575 lwz(result, MemOperand(location));
3576 }
3577
3578 // result now holds a lis instruction. Extract the immediate.
3579 slwi(result, result, Operand(16));
3580
3581 lwz(scratch, MemOperand(location, kInstrSize));
3582 if (emit_debug_code()) {
3583 And(scratch, scratch, Operand(kOpcodeMask));
3584 Cmpi(scratch, Operand(ORI), r0);
3585 Check(eq, kTheInstructionShouldBeAnOri);
3586 lwz(scratch, MemOperand(location, kInstrSize));
3587 }
3588 // Copy the low 16bits from ori instruction into result
3589 rlwimi(result, scratch, 0, 16, 31);
3590
3591 #if V8_TARGET_ARCH_PPC64
3592 if (emit_debug_code()) {
3593 lwz(scratch, MemOperand(location, 2 * kInstrSize));
3594 // scratch is now sldi.
3595 And(scratch, scratch, Operand(kOpcodeMask | kExt5OpcodeMask));
3596 Cmpi(scratch, Operand(EXT5 | RLDICR), r0);
3597 Check(eq, kTheInstructionShouldBeASldi);
3598 }
3599
3600 lwz(scratch, MemOperand(location, 3 * kInstrSize));
3601 // scratch is now ori.
3602 if (emit_debug_code()) {
3603 And(scratch, scratch, Operand(kOpcodeMask));
3604 Cmpi(scratch, Operand(ORIS), r0);
3605 Check(eq, kTheInstructionShouldBeAnOris);
3606 lwz(scratch, MemOperand(location, 3 * kInstrSize));
3607 }
3608 sldi(result, result, Operand(16));
3609 rldimi(result, scratch, 0, 48);
3610
3611 lwz(scratch, MemOperand(location, 4 * kInstrSize));
3612 // scratch is now ori.
3613 if (emit_debug_code()) {
3614 And(scratch, scratch, Operand(kOpcodeMask));
3615 Cmpi(scratch, Operand(ORI), r0);
3616 Check(eq, kTheInstructionShouldBeAnOri);
3617 lwz(scratch, MemOperand(location, 4 * kInstrSize));
3618 }
3619 sldi(result, result, Operand(16));
3620 rldimi(result, scratch, 0, 48);
3621 #endif
3622 #endif
3623 }
3624
3625
3626 void MacroAssembler::CheckPageFlag(
3627 Register object,
3628 Register scratch, // scratch may be same register as object
3629 int mask, Condition cc, Label* condition_met) {
3630 DCHECK(cc == ne || cc == eq);
3631 ClearRightImm(scratch, object, Operand(kPageSizeBits));
3632 LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3633
3634 And(r0, scratch, Operand(mask), SetRC);
3635
3636 if (cc == ne) {
3637 bne(condition_met, cr0);
3638 }
3639 if (cc == eq) {
3640 beq(condition_met, cr0);
3641 }
3642 }
3643
3644
3645 void MacroAssembler::CheckMapDeprecated(Handle<Map> map, Register scratch,
3646 Label* if_deprecated) {
3647 if (map->CanBeDeprecated()) {
3648 mov(scratch, Operand(map));
3649 lwz(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
3650 ExtractBitMask(scratch, scratch, Map::Deprecated::kMask, SetRC);
3651 bne(if_deprecated, cr0);
3652 }
3653 }
3654
3655
3656 void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
3657 Register scratch1, Label* on_black) {
3658 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
3659 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
3660 }
3661
3662
3663 void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
3664 Register mask_scratch, Label* has_color,
3665 int first_bit, int second_bit) {
3666 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3667
3668 GetMarkBits(object, bitmap_scratch, mask_scratch);
3669
3670 Label other_color, word_boundary;
3671 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3672 // Test the first bit
3673 and_(r0, ip, mask_scratch, SetRC);
3674 b(first_bit == 1 ? eq : ne, &other_color, cr0);
3675 // Shift left 1
3676 // May need to load the next cell
3677 slwi(mask_scratch, mask_scratch, Operand(1), SetRC);
3678 beq(&word_boundary, cr0);
3679 // Test the second bit
3680 and_(r0, ip, mask_scratch, SetRC);
3681 b(second_bit == 1 ? ne : eq, has_color, cr0);
3682 b(&other_color);
3683
3684 bind(&word_boundary);
3685 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kIntSize));
3686 andi(r0, ip, Operand(1));
3687 b(second_bit == 1 ? ne : eq, has_color, cr0);
3688 bind(&other_color);
3689 }
3690
3691
3692 // Detect some, but not all, common pointer-free objects. This is used by the
3693 // incremental write barrier which doesn't care about oddballs (they are always
3694 // marked black immediately so this code is not hit).
3695 void MacroAssembler::JumpIfDataObject(Register value, Register scratch,
3696 Label* not_data_object) {
3697 Label is_data_object;
3698 LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
3699 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
3700 beq(&is_data_object);
3701 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3702 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3703 // If it's a string and it's not a cons string then it's an object containing
3704 // no GC pointers.
3705 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3706 STATIC_ASSERT((kIsIndirectStringMask | kIsNotStringMask) == 0x81);
3707 andi(scratch, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
3708 bne(not_data_object, cr0);
3709 bind(&is_data_object);
3710 }
3711
3712
3713 void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
3714 Register mask_reg) {
3715 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3716 DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
3717 lis(r0, Operand((~Page::kPageAlignmentMask >> 16)));
3718 and_(bitmap_reg, addr_reg, r0);
3719 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3720 ExtractBitRange(mask_reg, addr_reg, kLowBits - 1, kPointerSizeLog2);
3721 ExtractBitRange(ip, addr_reg, kPageSizeBits - 1, kLowBits);
3722 ShiftLeftImm(ip, ip, Operand(Bitmap::kBytesPerCellLog2));
3723 add(bitmap_reg, bitmap_reg, ip);
3724 li(ip, Operand(1));
3725 slw(mask_reg, ip, mask_reg);
3726 }
3727
3728
3729 void MacroAssembler::EnsureNotWhite(Register value, Register bitmap_scratch,
3730 Register mask_scratch,
3731 Register load_scratch,
3732 Label* value_is_white_and_not_data) {
3733 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3734 GetMarkBits(value, bitmap_scratch, mask_scratch);
3735
3736 // If the value is black or grey we don't need to do anything.
3737 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3738 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
3739 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
3740 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3741
3742 Label done;
3743
3744 // Since both black and grey have a 1 in the first position and white does
3745 // not have a 1 there we only need to check one bit.
3746 lwz(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3747 and_(r0, mask_scratch, load_scratch, SetRC);
3748 bne(&done, cr0);
3749
3750 if (emit_debug_code()) {
3751 // Check for impossible bit pattern.
3752 Label ok;
3753 // LSL may overflow, making the check conservative.
3754 slwi(r0, mask_scratch, Operand(1));
3755 and_(r0, load_scratch, r0, SetRC);
3756 beq(&ok, cr0);
3757 stop("Impossible marking bit pattern");
3758 bind(&ok);
3759 }
3760
3761 // Value is white. We check whether it is data that doesn't need scanning.
3762 // Currently only checks for HeapNumber and non-cons strings.
3763 Register map = load_scratch; // Holds map while checking type.
3764 Register length = load_scratch; // Holds length of object after testing type.
3765 Label is_data_object, maybe_string_object, is_string_object, is_encoded;
3766 #if V8_TARGET_ARCH_PPC64
3767 Label length_computed;
3768 #endif
3769
3770
3771 // Check for heap-number
3772 LoadP(map, FieldMemOperand(value, HeapObject::kMapOffset));
3773 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
3774 bne(&maybe_string_object);
3775 li(length, Operand(HeapNumber::kSize));
3776 b(&is_data_object);
3777 bind(&maybe_string_object);
3778
3779 // Check for strings.
3780 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3781 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3782 // If it's a string and it's not a cons string then it's an object containing
3783 // no GC pointers.
3784 Register instance_type = load_scratch;
3785 lbz(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
3786 andi(r0, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
3787 bne(value_is_white_and_not_data, cr0);
3788 // It's a non-indirect (non-cons and non-slice) string.
3789 // If it's external, the length is just ExternalString::kSize.
3790 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
3791 // External strings are the only ones with the kExternalStringTag bit
3792 // set.
3793 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
3794 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
3795 andi(r0, instance_type, Operand(kExternalStringTag));
3796 beq(&is_string_object, cr0);
3797 li(length, Operand(ExternalString::kSize));
3798 b(&is_data_object);
3799 bind(&is_string_object);
3800
3801 // Sequential string, either ASCII or UC16.
3802 // For ASCII (char-size of 1) we untag the smi to get the length.
3803 // For UC16 (char-size of 2):
3804 // - (32-bit) we just leave the smi tag in place, thereby getting
3805 // the length multiplied by 2.
3806 // - (64-bit) we compute the offset in the 2-byte array
3807 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
3808 LoadP(ip, FieldMemOperand(value, String::kLengthOffset));
3809 andi(r0, instance_type, Operand(kStringEncodingMask));
3810 beq(&is_encoded, cr0);
3811 SmiUntag(ip);
3812 #if V8_TARGET_ARCH_PPC64
3813 b(&length_computed);
3814 #endif
3815 bind(&is_encoded);
3816 #if V8_TARGET_ARCH_PPC64
3817 SmiToShortArrayOffset(ip, ip);
3818 bind(&length_computed);
3819 #else
3820 DCHECK(kSmiShift == 1);
3821 #endif
3822 addi(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
3823 li(r0, Operand(~kObjectAlignmentMask));
3824 and_(length, length, r0);
3825
3826 bind(&is_data_object);
3827 // Value is a data object, and it is white. Mark it black. Since we know
3828 // that the object is white we can make it black by flipping one bit.
3829 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3830 orx(ip, ip, mask_scratch);
3831 stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3832
3833 mov(ip, Operand(~Page::kPageAlignmentMask));
3834 and_(bitmap_scratch, bitmap_scratch, ip);
3835 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3836 add(ip, ip, length);
3837 stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3838
3839 bind(&done);
3840 }
3841
3842
3843 // Saturate a value into 8-bit unsigned integer
3844 // if input_value < 0, output_value is 0
3845 // if input_value > 255, output_value is 255
3846 // otherwise output_value is the input_value
3847 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3848 Label done, negative_label, overflow_label;
3849 int satval = (1 << 8) - 1;
3850
3851 cmpi(input_reg, Operand::Zero());
3852 blt(&negative_label);
3853
3854 cmpi(input_reg, Operand(satval));
3855 bgt(&overflow_label);
3856 if (!output_reg.is(input_reg)) {
3857 mr(output_reg, input_reg);
3858 }
3859 b(&done);
3860
3861 bind(&negative_label);
3862 li(output_reg, Operand::Zero()); // set to 0 if negative
3863 b(&done);
3864
3865
3866 bind(&overflow_label); // set to satval if > satval
3867 li(output_reg, Operand(satval));
3868
3869 bind(&done);
3870 }
3871
3872
3873 void MacroAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
3874
3875
3876 void MacroAssembler::ResetRoundingMode() {
3877 mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest)
3878 }
3879
3880
3881 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3882 DoubleRegister input_reg,
3883 DoubleRegister double_scratch) {
3884 Label above_zero;
3885 Label done;
3886 Label in_bounds;
3887
3888 LoadDoubleLiteral(double_scratch, 0.0, result_reg);
3889 fcmpu(input_reg, double_scratch);
3890 bgt(&above_zero);
3891
3892 // Double value is less than zero, NaN or Inf, return 0.
3893 LoadIntLiteral(result_reg, 0);
3894 b(&done);
3895
3896 // Double value is >= 255, return 255.
3897 bind(&above_zero);
3898 LoadDoubleLiteral(double_scratch, 255.0, result_reg);
3899 fcmpu(input_reg, double_scratch);
3900 ble(&in_bounds);
3901 LoadIntLiteral(result_reg, 255);
3902 b(&done);
3903
3904 // In 0-255 range, round and truncate.
3905 bind(&in_bounds);
3906
3907 // round to nearest (default rounding mode)
3908 fctiw(double_scratch, input_reg);
3909 MovDoubleLowToInt(result_reg, double_scratch);
3910 bind(&done);
3911 }
3912
3913
3914 void MacroAssembler::LoadInstanceDescriptors(Register map,
3915 Register descriptors) {
3916 LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3917 }
3918
3919
3920 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3921 lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
3922 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3923 }
3924
3925
3926 void MacroAssembler::EnumLength(Register dst, Register map) {
3927 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3928 lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
3929 ExtractBitMask(dst, dst, Map::EnumLengthBits::kMask);
3930 SmiTag(dst);
3931 }
3932
3933
3934 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
3935 Register empty_fixed_array_value = r9;
3936 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3937 Label next, start;
3938 mr(r5, r3);
3939
3940 // Check if the enum length field is properly initialized, indicating that
3941 // there is an enum cache.
3942 LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
3943
3944 EnumLength(r6, r4);
3945 CmpSmiLiteral(r6, Smi::FromInt(kInvalidEnumCacheSentinel), r0);
3946 beq(call_runtime);
3947
3948 b(&start);
3949
3950 bind(&next);
3951 LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
3952
3953 // For all objects but the receiver, check that the cache is empty.
3954 EnumLength(r6, r4);
3955 CmpSmiLiteral(r6, Smi::FromInt(0), r0);
3956 bne(call_runtime);
3957
3958 bind(&start);
3959
3960 // Check that there are no elements. Register r5 contains the current JS
3961 // object we've reached through the prototype chain.
3962 Label no_elements;
3963 LoadP(r5, FieldMemOperand(r5, JSObject::kElementsOffset));
3964 cmp(r5, empty_fixed_array_value);
3965 beq(&no_elements);
3966
3967 // Second chance, the object may be using the empty slow element dictionary.
3968 CompareRoot(r5, Heap::kEmptySlowElementDictionaryRootIndex);
3969 bne(call_runtime);
3970
3971 bind(&no_elements);
3972 LoadP(r5, FieldMemOperand(r4, Map::kPrototypeOffset));
3973 cmp(r5, null_value);
3974 bne(&next);
3975 }
3976
3977
3978 ////////////////////////////////////////////////////////////////////////////////
3979 //
3980 // New MacroAssembler Interfaces added for PPC
3981 //
3982 ////////////////////////////////////////////////////////////////////////////////
3983 void MacroAssembler::LoadIntLiteral(Register dst, int value) {
3984 mov(dst, Operand(value));
3985 }
3986
3987
3988 void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
3989 mov(dst, Operand(smi));
3990 }
3991
3992
3993 void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
3994 Register scratch) {
3995 #if V8_OOL_CONSTANT_POOL
3996 // TODO(mbrandy): enable extended constant pool usage for doubles.
3997 // See ARM commit e27ab337 for a reference.
3998 if (is_constant_pool_available() && !is_constant_pool_full()) {
3999 RelocInfo rinfo(pc_, value);
4000 ConstantPoolAddEntry(rinfo);
4001 #if V8_TARGET_ARCH_PPC64
4002 // We use 2 instruction sequence here for consistency with mov.
4003 li(scratch, Operand::Zero());
4004 lfdx(result, MemOperand(kConstantPoolRegister, scratch));
4005 #else
4006 lfd(result, MemOperand(kConstantPoolRegister, 0));
4007 #endif
4008 return;
4009 }
4010 #endif
4011
4012 // avoid gcc strict aliasing error using union cast
4013 union {
4014 double dval;
4015 #if V8_TARGET_ARCH_PPC64
4016 intptr_t ival;
4017 #else
4018 intptr_t ival[2];
4019 #endif
4020 } litVal;
4021
4022 litVal.dval = value;
4023
4024 #if V8_TARGET_ARCH_PPC64
4025 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4026 mov(scratch, Operand(litVal.ival));
4027 mtfprd(result, scratch);
4028 return;
4029 }
4030 #endif
4031
4032 addi(sp, sp, Operand(-kDoubleSize));
4033 #if V8_TARGET_ARCH_PPC64
4034 mov(scratch, Operand(litVal.ival));
4035 std(scratch, MemOperand(sp));
4036 #else
4037 LoadIntLiteral(scratch, litVal.ival[0]);
4038 stw(scratch, MemOperand(sp, 0));
4039 LoadIntLiteral(scratch, litVal.ival[1]);
4040 stw(scratch, MemOperand(sp, 4));
4041 #endif
4042 nop(); // LHS/RAW optimization
4043 lfd(result, MemOperand(sp, 0));
4044 addi(sp, sp, Operand(kDoubleSize));
4045 }
4046
4047
4048 void MacroAssembler::MovIntToDouble(DoubleRegister dst, Register src,
4049 Register scratch) {
4050 // sign-extend src to 64-bit
4051 #if V8_TARGET_ARCH_PPC64
4052 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4053 mtfprwa(dst, src);
4054 return;
4055 }
4056 #endif
4057
4058 DCHECK(!src.is(scratch));
4059 subi(sp, sp, Operand(kDoubleSize));
4060 #if V8_TARGET_ARCH_PPC64
4061 extsw(scratch, src);
4062 std(scratch, MemOperand(sp, 0));
4063 #else
4064 srawi(scratch, src, 31);
4065 stw(scratch, MemOperand(sp, Register::kExponentOffset));
4066 stw(src, MemOperand(sp, Register::kMantissaOffset));
4067 #endif
4068 nop(); // LHS/RAW optimization
4069 lfd(dst, MemOperand(sp, 0));
4070 addi(sp, sp, Operand(kDoubleSize));
4071 }
4072
4073
4074 void MacroAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
4075 Register scratch) {
4076 // zero-extend src to 64-bit
4077 #if V8_TARGET_ARCH_PPC64
4078 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4079 mtfprwz(dst, src);
4080 return;
4081 }
4082 #endif
4083
4084 DCHECK(!src.is(scratch));
4085 subi(sp, sp, Operand(kDoubleSize));
4086 #if V8_TARGET_ARCH_PPC64
4087 clrldi(scratch, src, Operand(32));
4088 std(scratch, MemOperand(sp, 0));
4089 #else
4090 li(scratch, Operand::Zero());
4091 stw(scratch, MemOperand(sp, Register::kExponentOffset));
4092 stw(src, MemOperand(sp, Register::kMantissaOffset));
4093 #endif
4094 nop(); // LHS/RAW optimization
4095 lfd(dst, MemOperand(sp, 0));
4096 addi(sp, sp, Operand(kDoubleSize));
4097 }
4098
4099
4100 void MacroAssembler::MovInt64ToDouble(DoubleRegister dst,
4101 #if !V8_TARGET_ARCH_PPC64
4102 Register src_hi,
4103 #endif
4104 Register src) {
4105 #if V8_TARGET_ARCH_PPC64
4106 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4107 mtfprd(dst, src);
4108 return;
4109 }
4110 #endif
4111
4112 subi(sp, sp, Operand(kDoubleSize));
4113 #if V8_TARGET_ARCH_PPC64
4114 std(src, MemOperand(sp, 0));
4115 #else
4116 stw(src_hi, MemOperand(sp, Register::kExponentOffset));
4117 stw(src, MemOperand(sp, Register::kMantissaOffset));
4118 #endif
4119 nop(); // LHS/RAW optimization
4120 lfd(dst, MemOperand(sp, 0));
4121 addi(sp, sp, Operand(kDoubleSize));
4122 }
4123
4124
4125 #if V8_TARGET_ARCH_PPC64
4126 void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
4127 Register src_hi,
4128 Register src_lo,
4129 Register scratch) {
4130 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4131 sldi(scratch, src_hi, Operand(32));
4132 rldimi(scratch, src_lo, 0, 32);
4133 mtfprd(dst, scratch);
4134 return;
4135 }
4136
4137 subi(sp, sp, Operand(kDoubleSize));
4138 stw(src_hi, MemOperand(sp, Register::kExponentOffset));
4139 stw(src_lo, MemOperand(sp, Register::kMantissaOffset));
4140 nop(); // LHS/RAW optimization
4141 lfd(dst, MemOperand(sp));
4142 addi(sp, sp, Operand(kDoubleSize));
4143 }
4144 #endif
4145
4146
4147 void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
4148 #if V8_TARGET_ARCH_PPC64
4149 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4150 mffprwz(dst, src);
4151 return;
4152 }
4153 #endif
4154
4155 subi(sp, sp, Operand(kDoubleSize));
4156 stfd(src, MemOperand(sp));
4157 nop(); // LHS/RAW optimization
4158 lwz(dst, MemOperand(sp, Register::kMantissaOffset));
4159 addi(sp, sp, Operand(kDoubleSize));
4160 }
4161
4162
4163 void MacroAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
4164 #if V8_TARGET_ARCH_PPC64
4165 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4166 mffprd(dst, src);
4167 srdi(dst, dst, Operand(32));
4168 return;
4169 }
4170 #endif
4171
4172 subi(sp, sp, Operand(kDoubleSize));
4173 stfd(src, MemOperand(sp));
4174 nop(); // LHS/RAW optimization
4175 lwz(dst, MemOperand(sp, Register::kExponentOffset));
4176 addi(sp, sp, Operand(kDoubleSize));
4177 }
4178
4179
4180 void MacroAssembler::MovDoubleToInt64(
4181 #if !V8_TARGET_ARCH_PPC64
4182 Register dst_hi,
4183 #endif
4184 Register dst, DoubleRegister src) {
4185 #if V8_TARGET_ARCH_PPC64
4186 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4187 mffprd(dst, src);
4188 return;
4189 }
4190 #endif
4191
4192 subi(sp, sp, Operand(kDoubleSize));
4193 stfd(src, MemOperand(sp));
4194 nop(); // LHS/RAW optimization
4195 #if V8_TARGET_ARCH_PPC64
4196 ld(dst, MemOperand(sp, 0));
4197 #else
4198 lwz(dst_hi, MemOperand(sp, Register::kExponentOffset));
4199 lwz(dst, MemOperand(sp, Register::kMantissaOffset));
4200 #endif
4201 addi(sp, sp, Operand(kDoubleSize));
4202 }
4203
4204
4205 void MacroAssembler::Add(Register dst, Register src, intptr_t value,
4206 Register scratch) {
4207 if (is_int16(value)) {
4208 addi(dst, src, Operand(value));
4209 } else {
4210 mov(scratch, Operand(value));
4211 add(dst, src, scratch);
4212 }
4213 }
4214
4215
4216 void MacroAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
4217 CRegister cr) {
4218 intptr_t value = src2.immediate();
4219 if (is_int16(value)) {
4220 cmpi(src1, src2, cr);
4221 } else {
4222 mov(scratch, src2);
4223 cmp(src1, scratch, cr);
4224 }
4225 }
4226
4227
4228 void MacroAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
4229 CRegister cr) {
4230 intptr_t value = src2.immediate();
4231 if (is_uint16(value)) {
4232 cmpli(src1, src2, cr);
4233 } else {
4234 mov(scratch, src2);
4235 cmpl(src1, scratch, cr);
4236 }
4237 }
4238
4239
4240 void MacroAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
4241 CRegister cr) {
4242 intptr_t value = src2.immediate();
4243 if (is_int16(value)) {
4244 cmpwi(src1, src2, cr);
4245 } else {
4246 mov(scratch, src2);
4247 cmpw(src1, scratch, cr);
4248 }
4249 }
4250
4251
4252 void MacroAssembler::Cmplwi(Register src1, const Operand& src2,
4253 Register scratch, CRegister cr) {
4254 intptr_t value = src2.immediate();
4255 if (is_uint16(value)) {
4256 cmplwi(src1, src2, cr);
4257 } else {
4258 mov(scratch, src2);
4259 cmplw(src1, scratch, cr);
4260 }
4261 }
4262
4263
4264 void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
4265 RCBit rc) {
4266 if (rb.is_reg()) {
4267 and_(ra, rs, rb.rm(), rc);
4268 } else {
4269 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == SetRC) {
4270 andi(ra, rs, rb);
4271 } else {
4272 // mov handles the relocation.
4273 DCHECK(!rs.is(r0));
4274 mov(r0, rb);
4275 and_(ra, rs, r0, rc);
4276 }
4277 }
4278 }
4279
4280
4281 void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
4282 if (rb.is_reg()) {
4283 orx(ra, rs, rb.rm(), rc);
4284 } else {
4285 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
4286 ori(ra, rs, rb);
4287 } else {
4288 // mov handles the relocation.
4289 DCHECK(!rs.is(r0));
4290 mov(r0, rb);
4291 orx(ra, rs, r0, rc);
4292 }
4293 }
4294 }
4295
4296
4297 void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
4298 RCBit rc) {
4299 if (rb.is_reg()) {
4300 xor_(ra, rs, rb.rm(), rc);
4301 } else {
4302 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
4303 xori(ra, rs, rb);
4304 } else {
4305 // mov handles the relocation.
4306 DCHECK(!rs.is(r0));
4307 mov(r0, rb);
4308 xor_(ra, rs, r0, rc);
4309 }
4310 }
4311 }
4312
4313
4314 void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
4315 CRegister cr) {
4316 #if V8_TARGET_ARCH_PPC64
4317 LoadSmiLiteral(scratch, smi);
4318 cmp(src1, scratch, cr);
4319 #else
4320 Cmpi(src1, Operand(smi), scratch, cr);
4321 #endif
4322 }
4323
4324
4325 void MacroAssembler::CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
4326 CRegister cr) {
4327 #if V8_TARGET_ARCH_PPC64
4328 LoadSmiLiteral(scratch, smi);
4329 cmpl(src1, scratch, cr);
4330 #else
4331 Cmpli(src1, Operand(smi), scratch, cr);
4332 #endif
4333 }
4334
4335
4336 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
4337 Register scratch) {
4338 #if V8_TARGET_ARCH_PPC64
4339 LoadSmiLiteral(scratch, smi);
4340 add(dst, src, scratch);
4341 #else
4342 Add(dst, src, reinterpret_cast<intptr_t>(smi), scratch);
4343 #endif
4344 }
4345
4346
4347 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
4348 Register scratch) {
4349 #if V8_TARGET_ARCH_PPC64
4350 LoadSmiLiteral(scratch, smi);
4351 sub(dst, src, scratch);
4352 #else
4353 Add(dst, src, -(reinterpret_cast<intptr_t>(smi)), scratch);
4354 #endif
4355 }
4356
4357
4358 void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi,
4359 Register scratch, RCBit rc) {
4360 #if V8_TARGET_ARCH_PPC64
4361 LoadSmiLiteral(scratch, smi);
4362 and_(dst, src, scratch, rc);
4363 #else
4364 And(dst, src, Operand(smi), rc);
4365 #endif
4366 }
4367
4368
4369 // Load a "pointer" sized value from the memory location
4370 void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
4371 Register scratch) {
4372 int offset = mem.offset();
4373
4374 if (!scratch.is(no_reg) && !is_int16(offset)) {
4375 /* cannot use d-form */
4376 LoadIntLiteral(scratch, offset);
4377 #if V8_TARGET_ARCH_PPC64
4378 ldx(dst, MemOperand(mem.ra(), scratch));
4379 #else
4380 lwzx(dst, MemOperand(mem.ra(), scratch));
4381 #endif
4382 } else {
4383 #if V8_TARGET_ARCH_PPC64
4384 int misaligned = (offset & 3);
4385 if (misaligned) {
4386 // adjust base to conform to offset alignment requirements
4387 // Todo: enhance to use scratch if dst is unsuitable
4388 DCHECK(!dst.is(r0));
4389 addi(dst, mem.ra(), Operand((offset & 3) - 4));
4390 ld(dst, MemOperand(dst, (offset & ~3) + 4));
4391 } else {
4392 ld(dst, mem);
4393 }
4394 #else
4395 lwz(dst, mem);
4396 #endif
4397 }
4398 }
4399
4400
4401 // Store a "pointer" sized value to the memory location
4402 void MacroAssembler::StoreP(Register src, const MemOperand& mem,
4403 Register scratch) {
4404 int offset = mem.offset();
4405
4406 if (!scratch.is(no_reg) && !is_int16(offset)) {
4407 /* cannot use d-form */
4408 LoadIntLiteral(scratch, offset);
4409 #if V8_TARGET_ARCH_PPC64
4410 stdx(src, MemOperand(mem.ra(), scratch));
4411 #else
4412 stwx(src, MemOperand(mem.ra(), scratch));
4413 #endif
4414 } else {
4415 #if V8_TARGET_ARCH_PPC64
4416 int misaligned = (offset & 3);
4417 if (misaligned) {
4418 // adjust base to conform to offset alignment requirements
4419 // a suitable scratch is required here
4420 DCHECK(!scratch.is(no_reg));
4421 if (scratch.is(r0)) {
4422 LoadIntLiteral(scratch, offset);
4423 stdx(src, MemOperand(mem.ra(), scratch));
4424 } else {
4425 addi(scratch, mem.ra(), Operand((offset & 3) - 4));
4426 std(src, MemOperand(scratch, (offset & ~3) + 4));
4427 }
4428 } else {
4429 std(src, mem);
4430 }
4431 #else
4432 stw(src, mem);
4433 #endif
4434 }
4435 }
4436
4437 void MacroAssembler::LoadWordArith(Register dst, const MemOperand& mem,
4438 Register scratch) {
4439 int offset = mem.offset();
4440
4441 if (!scratch.is(no_reg) && !is_int16(offset)) {
4442 /* cannot use d-form */
4443 LoadIntLiteral(scratch, offset);
4444 #if V8_TARGET_ARCH_PPC64
4445 // lwax(dst, MemOperand(mem.ra(), scratch));
4446 DCHECK(0); // lwax not yet implemented
4447 #else
4448 lwzx(dst, MemOperand(mem.ra(), scratch));
4449 #endif
4450 } else {
4451 #if V8_TARGET_ARCH_PPC64
4452 int misaligned = (offset & 3);
4453 if (misaligned) {
4454 // adjust base to conform to offset alignment requirements
4455 // Todo: enhance to use scratch if dst is unsuitable
4456 DCHECK(!dst.is(r0));
4457 addi(dst, mem.ra(), Operand((offset & 3) - 4));
4458 lwa(dst, MemOperand(dst, (offset & ~3) + 4));
4459 } else {
4460 lwa(dst, mem);
4461 }
4462 #else
4463 lwz(dst, mem);
4464 #endif
4465 }
4466 }
4467
4468
4469 // Variable length depending on whether offset fits into immediate field
4470 // MemOperand currently only supports d-form
4471 void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
4472 Register scratch, bool updateForm) {
4473 Register base = mem.ra();
4474 int offset = mem.offset();
4475
4476 bool use_dform = true;
4477 if (!is_int16(offset)) {
4478 use_dform = false;
4479 LoadIntLiteral(scratch, offset);
4480 }
4481
4482 if (!updateForm) {
4483 if (use_dform) {
4484 lwz(dst, mem);
4485 } else {
4486 lwzx(dst, MemOperand(base, scratch));
4487 }
4488 } else {
4489 if (use_dform) {
4490 lwzu(dst, mem);
4491 } else {
4492 lwzux(dst, MemOperand(base, scratch));
4493 }
4494 }
4495 }
4496
4497
4498 // Variable length depending on whether offset fits into immediate field
4499 // MemOperand current only supports d-form
4500 void MacroAssembler::StoreWord(Register src, const MemOperand& mem,
4501 Register scratch, bool updateForm) {
4502 Register base = mem.ra();
4503 int offset = mem.offset();
4504
4505 bool use_dform = true;
4506 if (!is_int16(offset)) {
4507 use_dform = false;
4508 LoadIntLiteral(scratch, offset);
4509 }
4510
4511 if (!updateForm) {
4512 if (use_dform) {
4513 stw(src, mem);
4514 } else {
4515 stwx(src, MemOperand(base, scratch));
4516 }
4517 } else {
4518 if (use_dform) {
4519 stwu(src, mem);
4520 } else {
4521 stwux(src, MemOperand(base, scratch));
4522 }
4523 }
4524 }
4525
4526
4527 // Variable length depending on whether offset fits into immediate field
4528 // MemOperand currently only supports d-form
4529 void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
4530 Register scratch, bool updateForm) {
4531 Register base = mem.ra();
4532 int offset = mem.offset();
4533
4534 bool use_dform = true;
4535 if (!is_int16(offset)) {
4536 use_dform = false;
4537 LoadIntLiteral(scratch, offset);
4538 }
4539
4540 if (!updateForm) {
4541 if (use_dform) {
4542 lhz(dst, mem);
4543 } else {
4544 lhzx(dst, MemOperand(base, scratch));
4545 }
4546 } else {
4547 // If updateForm is ever true, then lhzu will
4548 // need to be implemented
4549 assert(0);
4550 #if 0 // LoadHalfWord w\ update not yet needed
4551 if (use_dform) {
4552 lhzu(dst, mem);
4553 } else {
4554 lhzux(dst, MemOperand(base, scratch));
4555 }
4556 #endif
4557 }
4558 }
4559
4560
4561 // Variable length depending on whether offset fits into immediate field
4562 // MemOperand current only supports d-form
4563 void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
4564 Register scratch, bool updateForm) {
4565 Register base = mem.ra();
4566 int offset = mem.offset();
4567
4568 bool use_dform = true;
4569 if (!is_int16(offset)) {
4570 use_dform = false;
4571 LoadIntLiteral(scratch, offset);
4572 }
4573
4574 if (!updateForm) {
4575 if (use_dform) {
4576 sth(src, mem);
4577 } else {
4578 sthx(src, MemOperand(base, scratch));
4579 }
4580 } else {
4581 // If updateForm is ever true, then sthu will
4582 // need to be implemented
4583 assert(0);
4584 #if 0 // StoreHalfWord w\ update not yet needed
4585 if (use_dform) {
4586 sthu(src, mem);
4587 } else {
4588 sthux(src, MemOperand(base, scratch));
4589 }
4590 #endif
4591 }
4592 }
4593
4594
4595 // Variable length depending on whether offset fits into immediate field
4596 // MemOperand currently only supports d-form
4597 void MacroAssembler::LoadByte(Register dst, const MemOperand& mem,
4598 Register scratch, bool updateForm) {
4599 Register base = mem.ra();
4600 int offset = mem.offset();
4601
4602 bool use_dform = true;
4603 if (!is_int16(offset)) {
4604 use_dform = false;
4605 LoadIntLiteral(scratch, offset);
4606 }
4607
4608 if (!updateForm) {
4609 if (use_dform) {
4610 lbz(dst, mem);
4611 } else {
4612 lbzx(dst, MemOperand(base, scratch));
4613 }
4614 } else {
4615 // If updateForm is ever true, then lbzu will
4616 // need to be implemented
4617 assert(0);
4618 #if 0 // LoadByte w\ update not yet needed
4619 if (use_dform) {
4620 lbzu(dst, mem);
4621 } else {
4622 lbzux(dst, MemOperand(base, scratch));
4623 }
4624 #endif
4625 }
4626 }
4627
4628
4629 // Variable length depending on whether offset fits into immediate field
4630 // MemOperand current only supports d-form
4631 void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
4632 Register scratch, bool updateForm) {
4633 Register base = mem.ra();
4634 int offset = mem.offset();
4635
4636 bool use_dform = true;
4637 if (!is_int16(offset)) {
4638 use_dform = false;
4639 LoadIntLiteral(scratch, offset);
4640 }
4641
4642 if (!updateForm) {
4643 if (use_dform) {
4644 stb(src, mem);
4645 } else {
4646 stbx(src, MemOperand(base, scratch));
4647 }
4648 } else {
4649 // If updateForm is ever true, then stbu will
4650 // need to be implemented
4651 assert(0);
4652 #if 0 // StoreByte w\ update not yet needed
4653 if (use_dform) {
4654 stbu(src, mem);
4655 } else {
4656 stbux(src, MemOperand(base, scratch));
4657 }
4658 #endif
4659 }
4660 }
4661
4662
4663 void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
4664 Representation r, Register scratch) {
4665 DCHECK(!r.IsDouble());
4666 if (r.IsInteger8()) {
4667 LoadByte(dst, mem, scratch);
4668 extsb(dst, dst);
4669 } else if (r.IsUInteger8()) {
4670 LoadByte(dst, mem, scratch);
4671 } else if (r.IsInteger16()) {
4672 LoadHalfWord(dst, mem, scratch);
4673 extsh(dst, dst);
4674 } else if (r.IsUInteger16()) {
4675 LoadHalfWord(dst, mem, scratch);
4676 #if V8_TARGET_ARCH_PPC64
4677 } else if (r.IsInteger32()) {
4678 LoadWord(dst, mem, scratch);
4679 #endif
4680 } else {
4681 LoadP(dst, mem, scratch);
4682 }
4683 }
4684
4685
4686 void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
4687 Representation r, Register scratch) {
4688 DCHECK(!r.IsDouble());
4689 if (r.IsInteger8() || r.IsUInteger8()) {
4690 StoreByte(src, mem, scratch);
4691 } else if (r.IsInteger16() || r.IsUInteger16()) {
4692 StoreHalfWord(src, mem, scratch);
4693 #if V8_TARGET_ARCH_PPC64
4694 } else if (r.IsInteger32()) {
4695 StoreWord(src, mem, scratch);
4696 #endif
4697 } else {
4698 if (r.IsHeapObject()) {
4699 AssertNotSmi(src);
4700 } else if (r.IsSmi()) {
4701 AssertSmi(src);
4702 }
4703 StoreP(src, mem, scratch);
4704 }
4705 }
4706
4707
4708 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
4709 Register scratch_reg,
4710 Label* no_memento_found) {
4711 ExternalReference new_space_start =
4712 ExternalReference::new_space_start(isolate());
4713 ExternalReference new_space_allocation_top =
4714 ExternalReference::new_space_allocation_top_address(isolate());
4715 addi(scratch_reg, receiver_reg,
4716 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
4717 Cmpi(scratch_reg, Operand(new_space_start), r0);
4718 blt(no_memento_found);
4719 mov(ip, Operand(new_space_allocation_top));
4720 LoadP(ip, MemOperand(ip));
4721 cmp(scratch_reg, ip);
4722 bgt(no_memento_found);
4723 LoadP(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
4724 Cmpi(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()),
4725 r0);
4726 }
4727
4728
4729 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
4730 Register reg4, Register reg5,
4731 Register reg6) {
4732 RegList regs = 0;
4733 if (reg1.is_valid()) regs |= reg1.bit();
4734 if (reg2.is_valid()) regs |= reg2.bit();
4735 if (reg3.is_valid()) regs |= reg3.bit();
4736 if (reg4.is_valid()) regs |= reg4.bit();
4737 if (reg5.is_valid()) regs |= reg5.bit();
4738 if (reg6.is_valid()) regs |= reg6.bit();
4739
4740 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
4741 Register candidate = Register::FromAllocationIndex(i);
4742 if (regs & candidate.bit()) continue;
4743 return candidate;
4744 }
4745 UNREACHABLE();
4746 return no_reg;
4747 }
4748
4749
4750 void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
4751 Register scratch0,
4752 Register scratch1,
4753 Label* found) {
4754 DCHECK(!scratch1.is(scratch0));
4755 Factory* factory = isolate()->factory();
4756 Register current = scratch0;
4757 Label loop_again;
4758
4759 // scratch contained elements pointer.
4760 mr(current, object);
4761
4762 // Loop based on the map going up the prototype chain.
4763 bind(&loop_again);
4764 LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
4765 lbz(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4766 DecodeField<Map::ElementsKindBits>(scratch1);
4767 cmpi(scratch1, Operand(DICTIONARY_ELEMENTS));
4768 beq(found);
4769 LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
4770 Cmpi(current, Operand(factory->null_value()), r0);
4771 bne(&loop_again);
4772 }
4773
4774
4775 #ifdef DEBUG
4776 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
4777 Register reg5, Register reg6, Register reg7, Register reg8) {
4778 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
4779 reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
4780 reg7.is_valid() + reg8.is_valid();
4781
4782 RegList regs = 0;
4783 if (reg1.is_valid()) regs |= reg1.bit();
4784 if (reg2.is_valid()) regs |= reg2.bit();
4785 if (reg3.is_valid()) regs |= reg3.bit();
4786 if (reg4.is_valid()) regs |= reg4.bit();
4787 if (reg5.is_valid()) regs |= reg5.bit();
4788 if (reg6.is_valid()) regs |= reg6.bit();
4789 if (reg7.is_valid()) regs |= reg7.bit();
4790 if (reg8.is_valid()) regs |= reg8.bit();
4791 int n_of_non_aliasing_regs = NumRegs(regs);
4792
4793 return n_of_valid_regs != n_of_non_aliasing_regs;
4794 }
4795 #endif
4796
4797
4798 CodePatcher::CodePatcher(byte* address, int instructions,
4799 FlushICache flush_cache)
4800 : address_(address),
4801 size_(instructions * Assembler::kInstrSize),
4802 masm_(NULL, address, size_ + Assembler::kGap),
4803 flush_cache_(flush_cache) {
4804 // Create a new macro assembler pointing to the address of the code to patch.
4805 // The size is adjusted with kGap on order for the assembler to generate size
4806 // bytes of instructions without failing with buffer size constraints.
4807 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4808 }
4809
4810
4811 CodePatcher::~CodePatcher() {
4812 // Indicate that code has changed.
4813 if (flush_cache_ == FLUSH) {
4814 CpuFeatures::FlushICache(address_, size_);
4815 }
4816
4817 // Check that the code was patched as expected.
4818 DCHECK(masm_.pc_ == address_ + size_);
4819 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4820 }
4821
4822
4823 void CodePatcher::Emit(Instr instr) { masm()->emit(instr); }
4824
4825
4826 void CodePatcher::EmitCondition(Condition cond) {
4827 Instr instr = Assembler::instr_at(masm_.pc_);
4828 switch (cond) {
4829 case eq:
4830 instr = (instr & ~kCondMask) | BT;
4831 break;
4832 case ne:
4833 instr = (instr & ~kCondMask) | BF;
4834 break;
4835 default:
4836 UNIMPLEMENTED();
4837 }
4838 masm_.emit(instr);
4839 }
4840
4841
4842 void MacroAssembler::TruncatingDiv(Register result, Register dividend,
4843 int32_t divisor) {
4844 DCHECK(!dividend.is(result));
4845 DCHECK(!dividend.is(r0));
4846 DCHECK(!result.is(r0));
4847 MultiplierAndShift ms(divisor);
4848 mov(r0, Operand(ms.multiplier()));
4849 mulhw(result, dividend, r0);
4850 if (divisor > 0 && ms.multiplier() < 0) {
4851 add(result, result, dividend);
4852 }
4853 if (divisor < 0 && ms.multiplier() > 0) {
4854 sub(result, result, dividend);
4855 }
4856 if (ms.shift() > 0) srawi(result, result, ms.shift());
4857 ExtractBit(r0, dividend, 31);
4858 add(result, result, r0);
4859 }
4860 }
4861 } // namespace v8::internal
4862
4863 #endif // V8_TARGET_ARCH_PPC
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698