Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(83)

Side by Side Diff: src/ppc/macro-assembler-ppc.cc

Issue 571173003: PowerPC specific sub-directories (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Updated ppc sub-dirs to current V8 code levels Created 6 years, 2 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 //
3 // Copyright IBM Corp. 2012, 2013. All rights reserved.
4 //
5 // Use of this source code is governed by a BSD-style license that can be
6 // found in the LICENSE file.
7
8 #include <assert.h> // For assert
9 #include <limits.h> // For LONG_MIN, LONG_MAX.
10
11 #include "src/v8.h"
12
13 #if V8_TARGET_ARCH_PPC
14
15 #include "src/base/bits.h"
16 #include "src/base/division-by-constant.h"
17 #include "src/bootstrapper.h"
18 #include "src/codegen.h"
19 #include "src/cpu-profiler.h"
20 #include "src/debug.h"
21 #include "src/isolate-inl.h"
22 #include "src/runtime.h"
23
24 namespace v8 {
25 namespace internal {
26
27 MacroAssembler::MacroAssembler(Isolate* arg_isolate, void* buffer, int size)
28 : Assembler(arg_isolate, buffer, size),
29 generating_stub_(false),
30 has_frame_(false) {
31 if (isolate() != NULL) {
32 code_object_ =
33 Handle<Object>(isolate()->heap()->undefined_value(), isolate());
34 }
35 }
36
37
38 void MacroAssembler::Jump(Register target, Condition cond) {
39 DCHECK(cond == al);
40 mtctr(target);
41 bctr();
42 }
43
44
45 void MacroAssembler::Jump(intptr_t target, RelocInfo::Mode rmode,
46 Condition cond, CRegister cr) {
47 Label skip;
48
49 if (cond != al) b(NegateCondition(cond), &skip, cr);
50
51 DCHECK(rmode == RelocInfo::CODE_TARGET || rmode == RelocInfo::RUNTIME_ENTRY);
52
53 mov(r0, Operand(target, rmode));
54 mtctr(r0);
55 bctr();
56
57 bind(&skip);
58 // mov(pc, Operand(target, rmode), LeaveCC, cond);
59 }
60
61
62 void MacroAssembler::Jump(Address target, RelocInfo::Mode rmode, Condition cond,
63 CRegister cr) {
64 DCHECK(!RelocInfo::IsCodeTarget(rmode));
65 Jump(reinterpret_cast<intptr_t>(target), rmode, cond, cr);
66 }
67
68
69 void MacroAssembler::Jump(Handle<Code> code, RelocInfo::Mode rmode,
70 Condition cond) {
71 DCHECK(RelocInfo::IsCodeTarget(rmode));
72 // 'code' is always generated ppc code, never THUMB code
73 AllowDeferredHandleDereference embedding_raw_address;
74 Jump(reinterpret_cast<intptr_t>(code.location()), rmode, cond);
75 }
76
77
78 int MacroAssembler::CallSize(Register target, Condition cond) {
79 return 2 * kInstrSize;
80 }
81
82
83 void MacroAssembler::Call(Register target, Condition cond) {
84 BlockTrampolinePoolScope block_trampoline_pool(this);
85 Label start;
86 bind(&start);
87 DCHECK(cond == al); // in prep of removal of condition
88
89 // Statement positions are expected to be recorded when the target
90 // address is loaded.
91 positions_recorder()->WriteRecordedPositions();
92
93 // branch via link register and set LK bit for return point
94 mtlr(target);
95 bclr(BA, SetLK);
96
97 DCHECK_EQ(CallSize(target, cond), SizeOfCodeGeneratedSince(&start));
98 }
99
100
101 int MacroAssembler::CallSize(Address target, RelocInfo::Mode rmode,
102 Condition cond) {
103 Operand mov_operand = Operand(reinterpret_cast<intptr_t>(target), rmode);
104 return (2 + instructions_required_for_mov(mov_operand)) * kInstrSize;
105 }
106
107
108 int MacroAssembler::CallSizeNotPredictableCodeSize(Address target,
109 RelocInfo::Mode rmode,
110 Condition cond) {
111 return (2 + kMovInstructionsNoConstantPool) * kInstrSize;
112 }
113
114
115 void MacroAssembler::Call(Address target, RelocInfo::Mode rmode,
116 Condition cond) {
117 BlockTrampolinePoolScope block_trampoline_pool(this);
118 DCHECK(cond == al);
119
120 #ifdef DEBUG
121 // Check the expected size before generating code to ensure we assume the same
122 // constant pool availability (e.g., whether constant pool is full or not).
123 int expected_size = CallSize(target, rmode, cond);
124 Label start;
125 bind(&start);
126 #endif
127
128 // Statement positions are expected to be recorded when the target
129 // address is loaded.
130 positions_recorder()->WriteRecordedPositions();
131
132 // This can likely be optimized to make use of bc() with 24bit relative
133 //
134 // RecordRelocInfo(x.rmode_, x.imm_);
135 // bc( BA, .... offset, LKset);
136 //
137
138 mov(ip, Operand(reinterpret_cast<intptr_t>(target), rmode));
139 mtlr(ip);
140 bclr(BA, SetLK);
141
142 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
143 }
144
145
146 int MacroAssembler::CallSize(Handle<Code> code, RelocInfo::Mode rmode,
147 TypeFeedbackId ast_id, Condition cond) {
148 AllowDeferredHandleDereference using_raw_address;
149 return CallSize(reinterpret_cast<Address>(code.location()), rmode, cond);
150 }
151
152
153 void MacroAssembler::Call(Handle<Code> code, RelocInfo::Mode rmode,
154 TypeFeedbackId ast_id, Condition cond) {
155 BlockTrampolinePoolScope block_trampoline_pool(this);
156 DCHECK(RelocInfo::IsCodeTarget(rmode));
157
158 #ifdef DEBUG
159 // Check the expected size before generating code to ensure we assume the same
160 // constant pool availability (e.g., whether constant pool is full or not).
161 int expected_size = CallSize(code, rmode, ast_id, cond);
162 Label start;
163 bind(&start);
164 #endif
165
166 if (rmode == RelocInfo::CODE_TARGET && !ast_id.IsNone()) {
167 SetRecordedAstId(ast_id);
168 rmode = RelocInfo::CODE_TARGET_WITH_ID;
169 }
170 AllowDeferredHandleDereference using_raw_address;
171 Call(reinterpret_cast<Address>(code.location()), rmode, cond);
172 DCHECK_EQ(expected_size, SizeOfCodeGeneratedSince(&start));
173 }
174
175
176 void MacroAssembler::Ret(Condition cond) {
177 DCHECK(cond == al);
178 blr();
179 }
180
181
182 void MacroAssembler::Drop(int count, Condition cond) {
183 DCHECK(cond == al);
184 if (count > 0) {
185 Add(sp, sp, count * kPointerSize, r0);
186 }
187 }
188
189
190 void MacroAssembler::Ret(int drop, Condition cond) {
191 Drop(drop, cond);
192 Ret(cond);
193 }
194
195
196 void MacroAssembler::Call(Label* target) { b(target, SetLK); }
197
198
199 void MacroAssembler::Push(Handle<Object> handle) {
200 mov(ip, Operand(handle));
201 push(ip);
202 }
203
204
205 void MacroAssembler::Move(Register dst, Handle<Object> value) {
206 AllowDeferredHandleDereference smi_check;
207 if (value->IsSmi()) {
208 LoadSmiLiteral(dst, reinterpret_cast<Smi*>(*value));
209 } else {
210 DCHECK(value->IsHeapObject());
211 if (isolate()->heap()->InNewSpace(*value)) {
212 Handle<Cell> cell = isolate()->factory()->NewCell(value);
213 mov(dst, Operand(cell));
214 LoadP(dst, FieldMemOperand(dst, Cell::kValueOffset));
215 } else {
216 mov(dst, Operand(value));
217 }
218 }
219 }
220
221
222 void MacroAssembler::Move(Register dst, Register src, Condition cond) {
223 DCHECK(cond == al);
224 if (!dst.is(src)) {
225 mr(dst, src);
226 }
227 }
228
229
230 void MacroAssembler::Move(DoubleRegister dst, DoubleRegister src) {
231 if (!dst.is(src)) {
232 fmr(dst, src);
233 }
234 }
235
236
237 void MacroAssembler::MultiPush(RegList regs) {
238 int16_t num_to_push = NumberOfBitsSet(regs);
239 int16_t stack_offset = num_to_push * kPointerSize;
240
241 subi(sp, sp, Operand(stack_offset));
242 for (int16_t i = kNumRegisters - 1; i >= 0; i--) {
243 if ((regs & (1 << i)) != 0) {
244 stack_offset -= kPointerSize;
245 StoreP(ToRegister(i), MemOperand(sp, stack_offset));
246 }
247 }
248 }
249
250
251 void MacroAssembler::MultiPop(RegList regs) {
252 int16_t stack_offset = 0;
253
254 for (int16_t i = 0; i < kNumRegisters; i++) {
255 if ((regs & (1 << i)) != 0) {
256 LoadP(ToRegister(i), MemOperand(sp, stack_offset));
257 stack_offset += kPointerSize;
258 }
259 }
260 addi(sp, sp, Operand(stack_offset));
261 }
262
263
264 void MacroAssembler::LoadRoot(Register destination, Heap::RootListIndex index,
265 Condition cond) {
266 DCHECK(cond == al);
267 LoadP(destination, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
268 }
269
270
271 void MacroAssembler::StoreRoot(Register source, Heap::RootListIndex index,
272 Condition cond) {
273 DCHECK(cond == al);
274 StoreP(source, MemOperand(kRootRegister, index << kPointerSizeLog2), r0);
275 }
276
277
278 void MacroAssembler::InNewSpace(Register object, Register scratch,
279 Condition cond, Label* branch) {
280 // N.B. scratch may be same register as object
281 DCHECK(cond == eq || cond == ne);
282 mov(r0, Operand(ExternalReference::new_space_mask(isolate())));
283 and_(scratch, object, r0);
284 mov(r0, Operand(ExternalReference::new_space_start(isolate())));
285 cmp(scratch, r0);
286 b(cond, branch);
287 }
288
289
290 void MacroAssembler::RecordWriteField(
291 Register object, int offset, Register value, Register dst,
292 LinkRegisterStatus lr_status, SaveFPRegsMode save_fp,
293 RememberedSetAction remembered_set_action, SmiCheck smi_check,
294 PointersToHereCheck pointers_to_here_check_for_value) {
295 // First, check if a write barrier is even needed. The tests below
296 // catch stores of Smis.
297 Label done;
298
299 // Skip barrier if writing a smi.
300 if (smi_check == INLINE_SMI_CHECK) {
301 JumpIfSmi(value, &done);
302 }
303
304 // Although the object register is tagged, the offset is relative to the start
305 // of the object, so so offset must be a multiple of kPointerSize.
306 DCHECK(IsAligned(offset, kPointerSize));
307
308 Add(dst, object, offset - kHeapObjectTag, r0);
309 if (emit_debug_code()) {
310 Label ok;
311 andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
312 beq(&ok, cr0);
313 stop("Unaligned cell in write barrier");
314 bind(&ok);
315 }
316
317 RecordWrite(object, dst, value, lr_status, save_fp, remembered_set_action,
318 OMIT_SMI_CHECK, pointers_to_here_check_for_value);
319
320 bind(&done);
321
322 // Clobber clobbered input registers when running with the debug-code flag
323 // turned on to provoke errors.
324 if (emit_debug_code()) {
325 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 4)));
326 mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 8)));
327 }
328 }
329
330
331 // Will clobber 4 registers: object, map, dst, ip. The
332 // register 'object' contains a heap object pointer.
333 void MacroAssembler::RecordWriteForMap(Register object, Register map,
334 Register dst,
335 LinkRegisterStatus lr_status,
336 SaveFPRegsMode fp_mode) {
337 if (emit_debug_code()) {
338 LoadP(dst, FieldMemOperand(map, HeapObject::kMapOffset));
339 Cmpi(dst, Operand(isolate()->factory()->meta_map()), r0);
340 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
341 }
342
343 if (!FLAG_incremental_marking) {
344 return;
345 }
346
347 if (emit_debug_code()) {
348 LoadP(ip, FieldMemOperand(object, HeapObject::kMapOffset));
349 cmp(ip, map);
350 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
351 }
352
353 Label done;
354
355 // A single check of the map's pages interesting flag suffices, since it is
356 // only set during incremental collection, and then it's also guaranteed that
357 // the from object's page's interesting flag is also set. This optimization
358 // relies on the fact that maps can never be in new space.
359 CheckPageFlag(map,
360 map, // Used as scratch.
361 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
362
363 addi(dst, object, Operand(HeapObject::kMapOffset - kHeapObjectTag));
364 if (emit_debug_code()) {
365 Label ok;
366 andi(r0, dst, Operand((1 << kPointerSizeLog2) - 1));
367 beq(&ok, cr0);
368 stop("Unaligned cell in write barrier");
369 bind(&ok);
370 }
371
372 // Record the actual write.
373 if (lr_status == kLRHasNotBeenSaved) {
374 mflr(r0);
375 push(r0);
376 }
377 RecordWriteStub stub(isolate(), object, map, dst, OMIT_REMEMBERED_SET,
378 fp_mode);
379 CallStub(&stub);
380 if (lr_status == kLRHasNotBeenSaved) {
381 pop(r0);
382 mtlr(r0);
383 }
384
385 bind(&done);
386
387 // Count number of write barriers in generated code.
388 isolate()->counters()->write_barriers_static()->Increment();
389 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip, dst);
390
391 // Clobber clobbered registers when running with the debug-code flag
392 // turned on to provoke errors.
393 if (emit_debug_code()) {
394 mov(dst, Operand(bit_cast<intptr_t>(kZapValue + 12)));
395 mov(map, Operand(bit_cast<intptr_t>(kZapValue + 16)));
396 }
397 }
398
399
400 // Will clobber 4 registers: object, address, scratch, ip. The
401 // register 'object' contains a heap object pointer. The heap object
402 // tag is shifted away.
403 void MacroAssembler::RecordWrite(
404 Register object, Register address, Register value,
405 LinkRegisterStatus lr_status, SaveFPRegsMode fp_mode,
406 RememberedSetAction remembered_set_action, SmiCheck smi_check,
407 PointersToHereCheck pointers_to_here_check_for_value) {
408 DCHECK(!object.is(value));
409 if (emit_debug_code()) {
410 LoadP(ip, MemOperand(address));
411 cmp(ip, value);
412 Check(eq, kWrongAddressOrValuePassedToRecordWrite);
413 }
414
415 if (remembered_set_action == OMIT_REMEMBERED_SET &&
416 !FLAG_incremental_marking) {
417 return;
418 }
419
420 // First, check if a write barrier is even needed. The tests below
421 // catch stores of smis and stores into the young generation.
422 Label done;
423
424 if (smi_check == INLINE_SMI_CHECK) {
425 JumpIfSmi(value, &done);
426 }
427
428 if (pointers_to_here_check_for_value != kPointersToHereAreAlwaysInteresting) {
429 CheckPageFlag(value,
430 value, // Used as scratch.
431 MemoryChunk::kPointersToHereAreInterestingMask, eq, &done);
432 }
433 CheckPageFlag(object,
434 value, // Used as scratch.
435 MemoryChunk::kPointersFromHereAreInterestingMask, eq, &done);
436
437 // Record the actual write.
438 if (lr_status == kLRHasNotBeenSaved) {
439 mflr(r0);
440 push(r0);
441 }
442 RecordWriteStub stub(isolate(), object, value, address, remembered_set_action,
443 fp_mode);
444 CallStub(&stub);
445 if (lr_status == kLRHasNotBeenSaved) {
446 pop(r0);
447 mtlr(r0);
448 }
449
450 bind(&done);
451
452 // Count number of write barriers in generated code.
453 isolate()->counters()->write_barriers_static()->Increment();
454 IncrementCounter(isolate()->counters()->write_barriers_dynamic(), 1, ip,
455 value);
456
457 // Clobber clobbered registers when running with the debug-code flag
458 // turned on to provoke errors.
459 if (emit_debug_code()) {
460 mov(address, Operand(bit_cast<intptr_t>(kZapValue + 12)));
461 mov(value, Operand(bit_cast<intptr_t>(kZapValue + 16)));
462 }
463 }
464
465
466 void MacroAssembler::RememberedSetHelper(Register object, // For debug tests.
467 Register address, Register scratch,
468 SaveFPRegsMode fp_mode,
469 RememberedSetFinalAction and_then) {
470 Label done;
471 if (emit_debug_code()) {
472 Label ok;
473 JumpIfNotInNewSpace(object, scratch, &ok);
474 stop("Remembered set pointer is in new space");
475 bind(&ok);
476 }
477 // Load store buffer top.
478 ExternalReference store_buffer =
479 ExternalReference::store_buffer_top(isolate());
480 mov(ip, Operand(store_buffer));
481 LoadP(scratch, MemOperand(ip));
482 // Store pointer to buffer and increment buffer top.
483 StoreP(address, MemOperand(scratch));
484 addi(scratch, scratch, Operand(kPointerSize));
485 // Write back new top of buffer.
486 StoreP(scratch, MemOperand(ip));
487 // Call stub on end of buffer.
488 // Check for end of buffer.
489 mov(r0, Operand(StoreBuffer::kStoreBufferOverflowBit));
490 and_(r0, scratch, r0, SetRC);
491
492 if (and_then == kFallThroughAtEnd) {
493 beq(&done, cr0);
494 } else {
495 DCHECK(and_then == kReturnAtEnd);
496 beq(&done, cr0);
497 }
498 mflr(r0);
499 push(r0);
500 StoreBufferOverflowStub store_buffer_overflow(isolate(), fp_mode);
501 CallStub(&store_buffer_overflow);
502 pop(r0);
503 mtlr(r0);
504 bind(&done);
505 if (and_then == kReturnAtEnd) {
506 Ret();
507 }
508 }
509
510
511 void MacroAssembler::PushFixedFrame(Register marker_reg) {
512 mflr(r0);
513 #if V8_OOL_CONSTANT_POOL
514 if (marker_reg.is_valid()) {
515 Push(r0, fp, kConstantPoolRegister, cp, marker_reg);
516 } else {
517 Push(r0, fp, kConstantPoolRegister, cp);
518 }
519 #else
520 if (marker_reg.is_valid()) {
521 Push(r0, fp, cp, marker_reg);
522 } else {
523 Push(r0, fp, cp);
524 }
525 #endif
526 }
527
528
529 void MacroAssembler::PopFixedFrame(Register marker_reg) {
530 #if V8_OOL_CONSTANT_POOL
531 if (marker_reg.is_valid()) {
532 Pop(r0, fp, kConstantPoolRegister, cp, marker_reg);
533 } else {
534 Pop(r0, fp, kConstantPoolRegister, cp);
535 }
536 #else
537 if (marker_reg.is_valid()) {
538 Pop(r0, fp, cp, marker_reg);
539 } else {
540 Pop(r0, fp, cp);
541 }
542 #endif
543 mtlr(r0);
544 }
545
546
547 // Push and pop all registers that can hold pointers.
548 void MacroAssembler::PushSafepointRegisters() {
549 // Safepoints expect a block of kNumSafepointRegisters values on the
550 // stack, so adjust the stack for unsaved registers.
551 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
552 DCHECK(num_unsaved >= 0);
553 if (num_unsaved > 0) {
554 subi(sp, sp, Operand(num_unsaved * kPointerSize));
555 }
556 MultiPush(kSafepointSavedRegisters);
557 }
558
559
560 void MacroAssembler::PopSafepointRegisters() {
561 const int num_unsaved = kNumSafepointRegisters - kNumSafepointSavedRegisters;
562 MultiPop(kSafepointSavedRegisters);
563 if (num_unsaved > 0) {
564 addi(sp, sp, Operand(num_unsaved * kPointerSize));
565 }
566 }
567
568
569 void MacroAssembler::StoreToSafepointRegisterSlot(Register src, Register dst) {
570 StoreP(src, SafepointRegisterSlot(dst));
571 }
572
573
574 void MacroAssembler::LoadFromSafepointRegisterSlot(Register dst, Register src) {
575 LoadP(dst, SafepointRegisterSlot(src));
576 }
577
578
579 int MacroAssembler::SafepointRegisterStackIndex(int reg_code) {
580 // The registers are pushed starting with the highest encoding,
581 // which means that lowest encodings are closest to the stack pointer.
582 RegList regs = kSafepointSavedRegisters;
583 int index = 0;
584
585 DCHECK(reg_code >= 0 && reg_code < kNumRegisters);
586
587 for (int16_t i = 0; i < reg_code; i++) {
588 if ((regs & (1 << i)) != 0) {
589 index++;
590 }
591 }
592
593 return index;
594 }
595
596
597 MemOperand MacroAssembler::SafepointRegisterSlot(Register reg) {
598 return MemOperand(sp, SafepointRegisterStackIndex(reg.code()) * kPointerSize);
599 }
600
601
602 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
603 // General purpose registers are pushed last on the stack.
604 int doubles_size = DoubleRegister::NumAllocatableRegisters() * kDoubleSize;
605 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
606 return MemOperand(sp, doubles_size + register_offset);
607 }
608
609
610 void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
611 const DoubleRegister src) {
612 Label done;
613
614 // Test for NaN
615 fcmpu(src, src);
616
617 if (dst.is(src)) {
618 bordered(&done);
619 } else {
620 Label is_nan;
621 bunordered(&is_nan);
622 fmr(dst, src);
623 b(&done);
624 bind(&is_nan);
625 }
626
627 // Replace with canonical NaN.
628 double nan_value = FixedDoubleArray::canonical_not_the_hole_nan_as_double();
629 LoadDoubleLiteral(dst, nan_value, r0);
630
631 bind(&done);
632 }
633
634
635 void MacroAssembler::ConvertIntToDouble(Register src,
636 DoubleRegister double_dst) {
637 MovIntToDouble(double_dst, src, r0);
638 fcfid(double_dst, double_dst);
639 }
640
641
642 void MacroAssembler::ConvertUnsignedIntToDouble(Register src,
643 DoubleRegister double_dst) {
644 MovUnsignedIntToDouble(double_dst, src, r0);
645 fcfid(double_dst, double_dst);
646 }
647
648
649 void MacroAssembler::ConvertIntToFloat(const DoubleRegister dst,
650 const Register src,
651 const Register int_scratch) {
652 MovIntToDouble(dst, src, int_scratch);
653 fcfid(dst, dst);
654 frsp(dst, dst);
655 }
656
657
658 void MacroAssembler::ConvertDoubleToInt64(const DoubleRegister double_input,
659 #if !V8_TARGET_ARCH_PPC64
660 const Register dst_hi,
661 #endif
662 const Register dst,
663 const DoubleRegister double_dst,
664 FPRoundingMode rounding_mode) {
665 if (rounding_mode == kRoundToZero) {
666 fctidz(double_dst, double_input);
667 } else {
668 SetRoundingMode(rounding_mode);
669 fctid(double_dst, double_input);
670 ResetRoundingMode();
671 }
672
673 MovDoubleToInt64(
674 #if !V8_TARGET_ARCH_PPC64
675 dst_hi,
676 #endif
677 dst, double_dst);
678 }
679
680
681 #if V8_OOL_CONSTANT_POOL
682 void MacroAssembler::LoadConstantPoolPointerRegister() {
683 ConstantPoolUnavailableScope constant_pool_unavailable(this);
684
685 // CheckBuffer() is called too frequently. This will pre-grow
686 // the buffer if needed to avoid spliting the relocation and instructions
687 EnsureSpaceFor(kMovInstructionsNoConstantPool * kInstrSize);
688
689 uintptr_t code_start = reinterpret_cast<uintptr_t>(pc_) - pc_offset();
690 int constant_pool_offset = Code::kConstantPoolOffset - Code::kHeaderSize;
691 mov(kConstantPoolRegister,
692 Operand(code_start, RelocInfo::INTERNAL_REFERENCE));
693 LoadP(kConstantPoolRegister,
694 MemOperand(kConstantPoolRegister, constant_pool_offset));
695 }
696 #endif
697
698
699 void MacroAssembler::StubPrologue() {
700 PushFixedFrame();
701 Push(Smi::FromInt(StackFrame::STUB));
702 // Adjust FP to point to saved FP.
703 addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
704 #if V8_OOL_CONSTANT_POOL
705 LoadConstantPoolPointerRegister();
706 set_constant_pool_available(true);
707 #endif
708 }
709
710
711 void MacroAssembler::Prologue(bool code_pre_aging) {
712 {
713 PredictableCodeSizeScope predictible_code_size_scope(
714 this, kNoCodeAgeSequenceLength);
715 Assembler::BlockTrampolinePoolScope block_trampoline_pool(this);
716 // The following instructions must remain together and unmodified
717 // for code aging to work properly.
718 if (code_pre_aging) {
719 // Pre-age the code.
720 // This matches the code found in PatchPlatformCodeAge()
721 Code* stub = Code::GetPreAgedCodeAgeStub(isolate());
722 intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
723 mflr(ip);
724 mov(r3, Operand(target));
725 Call(r3);
726 for (int i = 0; i < kCodeAgingSequenceNops; i++) {
727 nop();
728 }
729 } else {
730 // This matches the code found in GetNoCodeAgeSequence()
731 PushFixedFrame(r4);
732 // Adjust fp to point to saved fp.
733 addi(fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
734 for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
735 nop();
736 }
737 }
738 }
739 #if V8_OOL_CONSTANT_POOL
740 LoadConstantPoolPointerRegister();
741 set_constant_pool_available(true);
742 #endif
743 }
744
745
746 void MacroAssembler::EnterFrame(StackFrame::Type type,
747 bool load_constant_pool) {
748 PushFixedFrame();
749 #if V8_OOL_CONSTANT_POOL
750 if (load_constant_pool) {
751 LoadConstantPoolPointerRegister();
752 }
753 #endif
754 LoadSmiLiteral(r0, Smi::FromInt(type));
755 push(r0);
756 mov(r0, Operand(CodeObject()));
757 push(r0);
758 // Adjust FP to point to saved FP.
759 addi(fp, sp,
760 Operand(StandardFrameConstants::kFixedFrameSizeFromFp + kPointerSize));
761 }
762
763
764 int MacroAssembler::LeaveFrame(StackFrame::Type type) {
765 // r3: preserved
766 // r4: preserved
767 // r5: preserved
768
769 // Drop the execution stack down to the frame pointer and restore
770 // the caller frame pointer, return address and constant pool pointer.
771 int frame_ends;
772 #if V8_OOL_CONSTANT_POOL
773 addi(sp, fp, Operand(StandardFrameConstants::kConstantPoolOffset));
774 frame_ends = pc_offset();
775 Pop(r0, fp, kConstantPoolRegister);
776 #else
777 mr(sp, fp);
778 frame_ends = pc_offset();
779 Pop(r0, fp);
780 #endif
781 mtlr(r0);
782 return frame_ends;
783 }
784
785
786 // ExitFrame layout (probably wrongish.. needs updating)
787 //
788 // SP -> previousSP
789 // LK reserved
790 // code
791 // sp_on_exit (for debug?)
792 // oldSP->prev SP
793 // LK
794 // <parameters on stack>
795
796 // Prior to calling EnterExitFrame, we've got a bunch of parameters
797 // on the stack that we need to wrap a real frame around.. so first
798 // we reserve a slot for LK and push the previous SP which is captured
799 // in the fp register (r31)
800 // Then - we buy a new frame
801
802 void MacroAssembler::EnterExitFrame(bool save_doubles, int stack_space) {
803 // Set up the frame structure on the stack.
804 DCHECK_EQ(2 * kPointerSize, ExitFrameConstants::kCallerSPDisplacement);
805 DCHECK_EQ(1 * kPointerSize, ExitFrameConstants::kCallerPCOffset);
806 DCHECK_EQ(0 * kPointerSize, ExitFrameConstants::kCallerFPOffset);
807 DCHECK(stack_space > 0);
808
809 // This is an opportunity to build a frame to wrap
810 // all of the pushes that have happened inside of V8
811 // since we were called from C code
812
813 // replicate ARM frame - TODO make this more closely follow PPC ABI
814 mflr(r0);
815 Push(r0, fp);
816 mr(fp, sp);
817 // Reserve room for saved entry sp and code object.
818 subi(sp, sp, Operand(ExitFrameConstants::kFrameSize));
819
820 if (emit_debug_code()) {
821 li(r8, Operand::Zero());
822 StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
823 }
824 #if V8_OOL_CONSTANT_POOL
825 StoreP(kConstantPoolRegister,
826 MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
827 #endif
828 mov(r8, Operand(CodeObject()));
829 StoreP(r8, MemOperand(fp, ExitFrameConstants::kCodeOffset));
830
831 // Save the frame pointer and the context in top.
832 mov(r8, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
833 StoreP(fp, MemOperand(r8));
834 mov(r8, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
835 StoreP(cp, MemOperand(r8));
836
837 // Optionally save all volatile double registers.
838 if (save_doubles) {
839 SaveFPRegs(sp, 0, DoubleRegister::kNumVolatileRegisters);
840 // Note that d0 will be accessible at
841 // fp - ExitFrameConstants::kFrameSize -
842 // kNumVolatileRegisters * kDoubleSize,
843 // since the sp slot and code slot were pushed after the fp.
844 }
845
846 addi(sp, sp, Operand(-stack_space * kPointerSize));
847
848 // Allocate and align the frame preparing for calling the runtime
849 // function.
850 const int frame_alignment = ActivationFrameAlignment();
851 if (frame_alignment > kPointerSize) {
852 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
853 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
854 }
855 li(r0, Operand::Zero());
856 StorePU(r0, MemOperand(sp, -kNumRequiredStackFrameSlots * kPointerSize));
857
858 // Set the exit frame sp value to point just before the return address
859 // location.
860 addi(r8, sp, Operand((kStackFrameExtraParamSlot + 1) * kPointerSize));
861 StoreP(r8, MemOperand(fp, ExitFrameConstants::kSPOffset));
862 }
863
864
865 void MacroAssembler::InitializeNewString(Register string, Register length,
866 Heap::RootListIndex map_index,
867 Register scratch1, Register scratch2) {
868 SmiTag(scratch1, length);
869 LoadRoot(scratch2, map_index);
870 StoreP(scratch1, FieldMemOperand(string, String::kLengthOffset), r0);
871 li(scratch1, Operand(String::kEmptyHashField));
872 StoreP(scratch2, FieldMemOperand(string, HeapObject::kMapOffset), r0);
873 StoreP(scratch1, FieldMemOperand(string, String::kHashFieldSlot), r0);
874 }
875
876
877 int MacroAssembler::ActivationFrameAlignment() {
878 #if !defined(USE_SIMULATOR)
879 // Running on the real platform. Use the alignment as mandated by the local
880 // environment.
881 // Note: This will break if we ever start generating snapshots on one PPC
882 // platform for another PPC platform with a different alignment.
883 return base::OS::ActivationFrameAlignment();
884 #else // Simulated
885 // If we are using the simulator then we should always align to the expected
886 // alignment. As the simulator is used to generate snapshots we do not know
887 // if the target platform will need alignment, so this is controlled from a
888 // flag.
889 return FLAG_sim_stack_alignment;
890 #endif
891 }
892
893
894 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
895 bool restore_context) {
896 #if V8_OOL_CONSTANT_POOL
897 ConstantPoolUnavailableScope constant_pool_unavailable(this);
898 #endif
899 // Optionally restore all double registers.
900 if (save_doubles) {
901 // Calculate the stack location of the saved doubles and restore them.
902 const int kNumRegs = DoubleRegister::kNumVolatileRegisters;
903 const int offset =
904 (ExitFrameConstants::kFrameSize + kNumRegs * kDoubleSize);
905 addi(r6, fp, Operand(-offset));
906 RestoreFPRegs(r6, 0, kNumRegs);
907 }
908
909 // Clear top frame.
910 li(r6, Operand::Zero());
911 mov(ip, Operand(ExternalReference(Isolate::kCEntryFPAddress, isolate())));
912 StoreP(r6, MemOperand(ip));
913
914 // Restore current context from top and clear it in debug mode.
915 if (restore_context) {
916 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
917 LoadP(cp, MemOperand(ip));
918 }
919 #ifdef DEBUG
920 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
921 StoreP(r6, MemOperand(ip));
922 #endif
923
924 // Tear down the exit frame, pop the arguments, and return.
925 #if V8_OOL_CONSTANT_POOL
926 LoadP(kConstantPoolRegister,
927 MemOperand(fp, ExitFrameConstants::kConstantPoolOffset));
928 #endif
929 mr(sp, fp);
930 pop(fp);
931 pop(r0);
932 mtlr(r0);
933
934 if (argument_count.is_valid()) {
935 ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2));
936 add(sp, sp, argument_count);
937 }
938 }
939
940
941 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
942 Move(dst, d1);
943 }
944
945
946 void MacroAssembler::MovFromFloatParameter(const DoubleRegister dst) {
947 Move(dst, d1);
948 }
949
950
951 void MacroAssembler::InvokePrologue(const ParameterCount& expected,
952 const ParameterCount& actual,
953 Handle<Code> code_constant,
954 Register code_reg, Label* done,
955 bool* definitely_mismatches,
956 InvokeFlag flag,
957 const CallWrapper& call_wrapper) {
958 bool definitely_matches = false;
959 *definitely_mismatches = false;
960 Label regular_invoke;
961
962 // Check whether the expected and actual arguments count match. If not,
963 // setup registers according to contract with ArgumentsAdaptorTrampoline:
964 // r3: actual arguments count
965 // r4: function (passed through to callee)
966 // r5: expected arguments count
967
968 // The code below is made a lot easier because the calling code already sets
969 // up actual and expected registers according to the contract if values are
970 // passed in registers.
971
972 // roohack - remove these 3 checks temporarily
973 // DCHECK(actual.is_immediate() || actual.reg().is(r3));
974 // DCHECK(expected.is_immediate() || expected.reg().is(r5));
975 // DCHECK((!code_constant.is_null() && code_reg.is(no_reg))
976 // || code_reg.is(r6));
977
978 if (expected.is_immediate()) {
979 DCHECK(actual.is_immediate());
980 if (expected.immediate() == actual.immediate()) {
981 definitely_matches = true;
982 } else {
983 mov(r3, Operand(actual.immediate()));
984 const int sentinel = SharedFunctionInfo::kDontAdaptArgumentsSentinel;
985 if (expected.immediate() == sentinel) {
986 // Don't worry about adapting arguments for builtins that
987 // don't want that done. Skip adaption code by making it look
988 // like we have a match between expected and actual number of
989 // arguments.
990 definitely_matches = true;
991 } else {
992 *definitely_mismatches = true;
993 mov(r5, Operand(expected.immediate()));
994 }
995 }
996 } else {
997 if (actual.is_immediate()) {
998 cmpi(expected.reg(), Operand(actual.immediate()));
999 beq(&regular_invoke);
1000 mov(r3, Operand(actual.immediate()));
1001 } else {
1002 cmp(expected.reg(), actual.reg());
1003 beq(&regular_invoke);
1004 }
1005 }
1006
1007 if (!definitely_matches) {
1008 if (!code_constant.is_null()) {
1009 mov(r6, Operand(code_constant));
1010 addi(r6, r6, Operand(Code::kHeaderSize - kHeapObjectTag));
1011 }
1012
1013 Handle<Code> adaptor = isolate()->builtins()->ArgumentsAdaptorTrampoline();
1014 if (flag == CALL_FUNCTION) {
1015 call_wrapper.BeforeCall(CallSize(adaptor));
1016 Call(adaptor);
1017 call_wrapper.AfterCall();
1018 if (!*definitely_mismatches) {
1019 b(done);
1020 }
1021 } else {
1022 Jump(adaptor, RelocInfo::CODE_TARGET);
1023 }
1024 bind(&regular_invoke);
1025 }
1026 }
1027
1028
1029 void MacroAssembler::InvokeCode(Register code, const ParameterCount& expected,
1030 const ParameterCount& actual, InvokeFlag flag,
1031 const CallWrapper& call_wrapper) {
1032 // You can't call a function without a valid frame.
1033 DCHECK(flag == JUMP_FUNCTION || has_frame());
1034
1035 Label done;
1036 bool definitely_mismatches = false;
1037 InvokePrologue(expected, actual, Handle<Code>::null(), code, &done,
1038 &definitely_mismatches, flag, call_wrapper);
1039 if (!definitely_mismatches) {
1040 if (flag == CALL_FUNCTION) {
1041 call_wrapper.BeforeCall(CallSize(code));
1042 Call(code);
1043 call_wrapper.AfterCall();
1044 } else {
1045 DCHECK(flag == JUMP_FUNCTION);
1046 Jump(code);
1047 }
1048
1049 // Continue here if InvokePrologue does handle the invocation due to
1050 // mismatched parameter counts.
1051 bind(&done);
1052 }
1053 }
1054
1055
1056 void MacroAssembler::InvokeFunction(Register fun, const ParameterCount& actual,
1057 InvokeFlag flag,
1058 const CallWrapper& call_wrapper) {
1059 // You can't call a function without a valid frame.
1060 DCHECK(flag == JUMP_FUNCTION || has_frame());
1061
1062 // Contract with called JS functions requires that function is passed in r4.
1063 DCHECK(fun.is(r4));
1064
1065 Register expected_reg = r5;
1066 Register code_reg = r6;
1067
1068 LoadP(code_reg, FieldMemOperand(r4, JSFunction::kSharedFunctionInfoOffset));
1069 LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1070 LoadWordArith(expected_reg,
1071 FieldMemOperand(
1072 code_reg, SharedFunctionInfo::kFormalParameterCountOffset));
1073 #if !defined(V8_TARGET_ARCH_PPC64)
1074 SmiUntag(expected_reg);
1075 #endif
1076 LoadP(code_reg, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
1077
1078 ParameterCount expected(expected_reg);
1079 InvokeCode(code_reg, expected, actual, flag, call_wrapper);
1080 }
1081
1082
1083 void MacroAssembler::InvokeFunction(Register function,
1084 const ParameterCount& expected,
1085 const ParameterCount& actual,
1086 InvokeFlag flag,
1087 const CallWrapper& call_wrapper) {
1088 // You can't call a function without a valid frame.
1089 DCHECK(flag == JUMP_FUNCTION || has_frame());
1090
1091 // Contract with called JS functions requires that function is passed in r4.
1092 DCHECK(function.is(r4));
1093
1094 // Get the function and setup the context.
1095 LoadP(cp, FieldMemOperand(r4, JSFunction::kContextOffset));
1096
1097 // We call indirectly through the code field in the function to
1098 // allow recompilation to take effect without changing any of the
1099 // call sites.
1100 LoadP(r6, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
1101 InvokeCode(r6, expected, actual, flag, call_wrapper);
1102 }
1103
1104
1105 void MacroAssembler::InvokeFunction(Handle<JSFunction> function,
1106 const ParameterCount& expected,
1107 const ParameterCount& actual,
1108 InvokeFlag flag,
1109 const CallWrapper& call_wrapper) {
1110 Move(r4, function);
1111 InvokeFunction(r4, expected, actual, flag, call_wrapper);
1112 }
1113
1114
1115 void MacroAssembler::IsObjectJSObjectType(Register heap_object, Register map,
1116 Register scratch, Label* fail) {
1117 LoadP(map, FieldMemOperand(heap_object, HeapObject::kMapOffset));
1118 IsInstanceJSObjectType(map, scratch, fail);
1119 }
1120
1121
1122 void MacroAssembler::IsInstanceJSObjectType(Register map, Register scratch,
1123 Label* fail) {
1124 lbz(scratch, FieldMemOperand(map, Map::kInstanceTypeOffset));
1125 cmpi(scratch, Operand(FIRST_NONCALLABLE_SPEC_OBJECT_TYPE));
1126 blt(fail);
1127 cmpi(scratch, Operand(LAST_NONCALLABLE_SPEC_OBJECT_TYPE));
1128 bgt(fail);
1129 }
1130
1131
1132 void MacroAssembler::IsObjectJSStringType(Register object, Register scratch,
1133 Label* fail) {
1134 DCHECK(kNotStringTag != 0);
1135
1136 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1137 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1138 andi(r0, scratch, Operand(kIsNotStringMask));
1139 bne(fail, cr0);
1140 }
1141
1142
1143 void MacroAssembler::IsObjectNameType(Register object, Register scratch,
1144 Label* fail) {
1145 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
1146 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
1147 cmpi(scratch, Operand(LAST_NAME_TYPE));
1148 bgt(fail);
1149 }
1150
1151
1152 void MacroAssembler::DebugBreak() {
1153 li(r3, Operand::Zero());
1154 mov(r4, Operand(ExternalReference(Runtime::kDebugBreak, isolate())));
1155 CEntryStub ces(isolate(), 1);
1156 DCHECK(AllowThisStubCall(&ces));
1157 Call(ces.GetCode(), RelocInfo::DEBUG_BREAK);
1158 }
1159
1160
1161 void MacroAssembler::PushTryHandler(StackHandler::Kind kind,
1162 int handler_index) {
1163 // Adjust this code if not the case.
1164 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1165 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1166 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1167 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1168 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1169 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1170
1171 // For the JSEntry handler, we must preserve r1-r7, r0,r8-r15 are available.
1172 // We want the stack to look like
1173 // sp -> NextOffset
1174 // CodeObject
1175 // state
1176 // context
1177 // frame pointer
1178
1179 // Link the current handler as the next handler.
1180 mov(r8, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1181 LoadP(r0, MemOperand(r8));
1182 StorePU(r0, MemOperand(sp, -StackHandlerConstants::kSize));
1183 // Set this new handler as the current one.
1184 StoreP(sp, MemOperand(r8));
1185
1186 if (kind == StackHandler::JS_ENTRY) {
1187 li(r8, Operand::Zero()); // NULL frame pointer.
1188 StoreP(r8, MemOperand(sp, StackHandlerConstants::kFPOffset));
1189 LoadSmiLiteral(r8, Smi::FromInt(0)); // Indicates no context.
1190 StoreP(r8, MemOperand(sp, StackHandlerConstants::kContextOffset));
1191 } else {
1192 // still not sure if fp is right
1193 StoreP(fp, MemOperand(sp, StackHandlerConstants::kFPOffset));
1194 StoreP(cp, MemOperand(sp, StackHandlerConstants::kContextOffset));
1195 }
1196 unsigned state = StackHandler::IndexField::encode(handler_index) |
1197 StackHandler::KindField::encode(kind);
1198 LoadIntLiteral(r8, state);
1199 StoreP(r8, MemOperand(sp, StackHandlerConstants::kStateOffset));
1200 mov(r8, Operand(CodeObject()));
1201 StoreP(r8, MemOperand(sp, StackHandlerConstants::kCodeOffset));
1202 }
1203
1204
1205 void MacroAssembler::PopTryHandler() {
1206 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1207 pop(r4);
1208 mov(ip, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1209 addi(sp, sp, Operand(StackHandlerConstants::kSize - kPointerSize));
1210 StoreP(r4, MemOperand(ip));
1211 }
1212
1213
1214 // PPC - make use of ip as a temporary register
1215 void MacroAssembler::JumpToHandlerEntry() {
1216 // Compute the handler entry address and jump to it. The handler table is
1217 // a fixed array of (smi-tagged) code offsets.
1218 // r3 = exception, r4 = code object, r5 = state.
1219 #if V8_OOL_CONSTANT_POOL
1220 ConstantPoolUnavailableScope constant_pool_unavailable(this);
1221 LoadP(kConstantPoolRegister, FieldMemOperand(r4, Code::kConstantPoolOffset));
1222 #endif
1223 LoadP(r6, FieldMemOperand(r4, Code::kHandlerTableOffset)); // Handler table.
1224 addi(r6, r6, Operand(FixedArray::kHeaderSize - kHeapObjectTag));
1225 srwi(r5, r5, Operand(StackHandler::kKindWidth)); // Handler index.
1226 slwi(ip, r5, Operand(kPointerSizeLog2));
1227 add(ip, r6, ip);
1228 LoadP(r5, MemOperand(ip)); // Smi-tagged offset.
1229 addi(r4, r4, Operand(Code::kHeaderSize - kHeapObjectTag)); // Code start.
1230 SmiUntag(ip, r5);
1231 add(r0, r4, ip);
1232 mtctr(r0);
1233 bctr();
1234 }
1235
1236
1237 void MacroAssembler::Throw(Register value) {
1238 // Adjust this code if not the case.
1239 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1240 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0);
1241 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1242 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1243 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1244 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1245 Label skip;
1246
1247 // The exception is expected in r3.
1248 if (!value.is(r3)) {
1249 mr(r3, value);
1250 }
1251 // Drop the stack pointer to the top of the top handler.
1252 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1253 LoadP(sp, MemOperand(r6));
1254 // Restore the next handler.
1255 pop(r5);
1256 StoreP(r5, MemOperand(r6));
1257
1258 // Get the code object (r4) and state (r5). Restore the context and frame
1259 // pointer.
1260 pop(r4);
1261 pop(r5);
1262 pop(cp);
1263 pop(fp);
1264
1265 // If the handler is a JS frame, restore the context to the frame.
1266 // (kind == ENTRY) == (fp == 0) == (cp == 0), so we could test either fp
1267 // or cp.
1268 cmpi(cp, Operand::Zero());
1269 beq(&skip);
1270 StoreP(cp, MemOperand(fp, StandardFrameConstants::kContextOffset));
1271 bind(&skip);
1272
1273 JumpToHandlerEntry();
1274 }
1275
1276
1277 void MacroAssembler::ThrowUncatchable(Register value) {
1278 // Adjust this code if not the case.
1279 STATIC_ASSERT(StackHandlerConstants::kSize == 5 * kPointerSize);
1280 STATIC_ASSERT(StackHandlerConstants::kNextOffset == 0 * kPointerSize);
1281 STATIC_ASSERT(StackHandlerConstants::kCodeOffset == 1 * kPointerSize);
1282 STATIC_ASSERT(StackHandlerConstants::kStateOffset == 2 * kPointerSize);
1283 STATIC_ASSERT(StackHandlerConstants::kContextOffset == 3 * kPointerSize);
1284 STATIC_ASSERT(StackHandlerConstants::kFPOffset == 4 * kPointerSize);
1285
1286 // The exception is expected in r3.
1287 if (!value.is(r3)) {
1288 mr(r3, value);
1289 }
1290 // Drop the stack pointer to the top of the top stack handler.
1291 mov(r6, Operand(ExternalReference(Isolate::kHandlerAddress, isolate())));
1292 LoadP(sp, MemOperand(r6));
1293
1294 // Unwind the handlers until the ENTRY handler is found.
1295 Label fetch_next, check_kind;
1296 b(&check_kind);
1297 bind(&fetch_next);
1298 LoadP(sp, MemOperand(sp, StackHandlerConstants::kNextOffset));
1299
1300 bind(&check_kind);
1301 STATIC_ASSERT(StackHandler::JS_ENTRY == 0);
1302 LoadP(r5, MemOperand(sp, StackHandlerConstants::kStateOffset));
1303 andi(r0, r5, Operand(StackHandler::KindField::kMask));
1304 bne(&fetch_next, cr0);
1305
1306 // Set the top handler address to next handler past the top ENTRY handler.
1307 pop(r5);
1308 StoreP(r5, MemOperand(r6));
1309 // Get the code object (r4) and state (r5). Clear the context and frame
1310 // pointer (0 was saved in the handler).
1311 pop(r4);
1312 pop(r5);
1313 pop(cp);
1314 pop(fp);
1315
1316 JumpToHandlerEntry();
1317 }
1318
1319
1320 void MacroAssembler::CheckAccessGlobalProxy(Register holder_reg,
1321 Register scratch, Label* miss) {
1322 Label same_contexts;
1323
1324 DCHECK(!holder_reg.is(scratch));
1325 DCHECK(!holder_reg.is(ip));
1326 DCHECK(!scratch.is(ip));
1327
1328 // Load current lexical context from the stack frame.
1329 LoadP(scratch, MemOperand(fp, StandardFrameConstants::kContextOffset));
1330 // In debug mode, make sure the lexical context is set.
1331 #ifdef DEBUG
1332 cmpi(scratch, Operand::Zero());
1333 Check(ne, kWeShouldNotHaveAnEmptyLexicalContext);
1334 #endif
1335
1336 // Load the native context of the current context.
1337 int offset =
1338 Context::kHeaderSize + Context::GLOBAL_OBJECT_INDEX * kPointerSize;
1339 LoadP(scratch, FieldMemOperand(scratch, offset));
1340 LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
1341
1342 // Check the context is a native context.
1343 if (emit_debug_code()) {
1344 // Cannot use ip as a temporary in this verification code. Due to the fact
1345 // that ip is clobbered as part of cmp with an object Operand.
1346 push(holder_reg); // Temporarily save holder on the stack.
1347 // Read the first word and compare to the native_context_map.
1348 LoadP(holder_reg, FieldMemOperand(scratch, HeapObject::kMapOffset));
1349 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1350 cmp(holder_reg, ip);
1351 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1352 pop(holder_reg); // Restore holder.
1353 }
1354
1355 // Check if both contexts are the same.
1356 LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1357 cmp(scratch, ip);
1358 beq(&same_contexts);
1359
1360 // Check the context is a native context.
1361 if (emit_debug_code()) {
1362 // Cannot use ip as a temporary in this verification code. Due to the fact
1363 // that ip is clobbered as part of cmp with an object Operand.
1364 push(holder_reg); // Temporarily save holder on the stack.
1365 mr(holder_reg, ip); // Move ip to its holding place.
1366 LoadRoot(ip, Heap::kNullValueRootIndex);
1367 cmp(holder_reg, ip);
1368 Check(ne, kJSGlobalProxyContextShouldNotBeNull);
1369
1370 LoadP(holder_reg, FieldMemOperand(holder_reg, HeapObject::kMapOffset));
1371 LoadRoot(ip, Heap::kNativeContextMapRootIndex);
1372 cmp(holder_reg, ip);
1373 Check(eq, kJSGlobalObjectNativeContextShouldBeANativeContext);
1374 // Restore ip is not needed. ip is reloaded below.
1375 pop(holder_reg); // Restore holder.
1376 // Restore ip to holder's context.
1377 LoadP(ip, FieldMemOperand(holder_reg, JSGlobalProxy::kNativeContextOffset));
1378 }
1379
1380 // Check that the security token in the calling global object is
1381 // compatible with the security token in the receiving global
1382 // object.
1383 int token_offset =
1384 Context::kHeaderSize + Context::SECURITY_TOKEN_INDEX * kPointerSize;
1385
1386 LoadP(scratch, FieldMemOperand(scratch, token_offset));
1387 LoadP(ip, FieldMemOperand(ip, token_offset));
1388 cmp(scratch, ip);
1389 bne(miss);
1390
1391 bind(&same_contexts);
1392 }
1393
1394
1395 // Compute the hash code from the untagged key. This must be kept in sync with
1396 // ComputeIntegerHash in utils.h and KeyedLoadGenericStub in
1397 // code-stub-hydrogen.cc
1398 void MacroAssembler::GetNumberHash(Register t0, Register scratch) {
1399 // First of all we assign the hash seed to scratch.
1400 LoadRoot(scratch, Heap::kHashSeedRootIndex);
1401 SmiUntag(scratch);
1402
1403 // Xor original key with a seed.
1404 xor_(t0, t0, scratch);
1405
1406 // Compute the hash code from the untagged key. This must be kept in sync
1407 // with ComputeIntegerHash in utils.h.
1408 //
1409 // hash = ~hash + (hash << 15);
1410 notx(scratch, t0);
1411 slwi(t0, t0, Operand(15));
1412 add(t0, scratch, t0);
1413 // hash = hash ^ (hash >> 12);
1414 srwi(scratch, t0, Operand(12));
1415 xor_(t0, t0, scratch);
1416 // hash = hash + (hash << 2);
1417 slwi(scratch, t0, Operand(2));
1418 add(t0, t0, scratch);
1419 // hash = hash ^ (hash >> 4);
1420 srwi(scratch, t0, Operand(4));
1421 xor_(t0, t0, scratch);
1422 // hash = hash * 2057;
1423 mr(r0, t0);
1424 slwi(scratch, t0, Operand(3));
1425 add(t0, t0, scratch);
1426 slwi(scratch, r0, Operand(11));
1427 add(t0, t0, scratch);
1428 // hash = hash ^ (hash >> 16);
1429 srwi(scratch, t0, Operand(16));
1430 xor_(t0, t0, scratch);
1431 }
1432
1433
1434 void MacroAssembler::LoadFromNumberDictionary(Label* miss, Register elements,
1435 Register key, Register result,
1436 Register t0, Register t1,
1437 Register t2) {
1438 // Register use:
1439 //
1440 // elements - holds the slow-case elements of the receiver on entry.
1441 // Unchanged unless 'result' is the same register.
1442 //
1443 // key - holds the smi key on entry.
1444 // Unchanged unless 'result' is the same register.
1445 //
1446 // result - holds the result on exit if the load succeeded.
1447 // Allowed to be the same as 'key' or 'result'.
1448 // Unchanged on bailout so 'key' or 'result' can be used
1449 // in further computation.
1450 //
1451 // Scratch registers:
1452 //
1453 // t0 - holds the untagged key on entry and holds the hash once computed.
1454 //
1455 // t1 - used to hold the capacity mask of the dictionary
1456 //
1457 // t2 - used for the index into the dictionary.
1458 Label done;
1459
1460 GetNumberHash(t0, t1);
1461
1462 // Compute the capacity mask.
1463 LoadP(t1, FieldMemOperand(elements, SeededNumberDictionary::kCapacityOffset));
1464 SmiUntag(t1);
1465 subi(t1, t1, Operand(1));
1466
1467 // Generate an unrolled loop that performs a few probes before giving up.
1468 for (int i = 0; i < kNumberDictionaryProbes; i++) {
1469 // Use t2 for index calculations and keep the hash intact in t0.
1470 mr(t2, t0);
1471 // Compute the masked index: (hash + i + i * i) & mask.
1472 if (i > 0) {
1473 addi(t2, t2, Operand(SeededNumberDictionary::GetProbeOffset(i)));
1474 }
1475 and_(t2, t2, t1);
1476
1477 // Scale the index by multiplying by the element size.
1478 DCHECK(SeededNumberDictionary::kEntrySize == 3);
1479 slwi(ip, t2, Operand(1));
1480 add(t2, t2, ip); // t2 = t2 * 3
1481
1482 // Check if the key is identical to the name.
1483 slwi(t2, t2, Operand(kPointerSizeLog2));
1484 add(t2, elements, t2);
1485 LoadP(ip,
1486 FieldMemOperand(t2, SeededNumberDictionary::kElementsStartOffset));
1487 cmp(key, ip);
1488 if (i != kNumberDictionaryProbes - 1) {
1489 beq(&done);
1490 } else {
1491 bne(miss);
1492 }
1493 }
1494
1495 bind(&done);
1496 // Check that the value is a normal property.
1497 // t2: elements + (index * kPointerSize)
1498 const int kDetailsOffset =
1499 SeededNumberDictionary::kElementsStartOffset + 2 * kPointerSize;
1500 LoadP(t1, FieldMemOperand(t2, kDetailsOffset));
1501 LoadSmiLiteral(ip, Smi::FromInt(PropertyDetails::TypeField::kMask));
1502 and_(r0, t1, ip, SetRC);
1503 bne(miss, cr0);
1504
1505 // Get the value at the masked, scaled index and return.
1506 const int kValueOffset =
1507 SeededNumberDictionary::kElementsStartOffset + kPointerSize;
1508 LoadP(result, FieldMemOperand(t2, kValueOffset));
1509 }
1510
1511
1512 void MacroAssembler::Allocate(int object_size, Register result,
1513 Register scratch1, Register scratch2,
1514 Label* gc_required, AllocationFlags flags) {
1515 DCHECK(object_size <= Page::kMaxRegularHeapObjectSize);
1516 if (!FLAG_inline_new) {
1517 if (emit_debug_code()) {
1518 // Trash the registers to simulate an allocation failure.
1519 li(result, Operand(0x7091));
1520 li(scratch1, Operand(0x7191));
1521 li(scratch2, Operand(0x7291));
1522 }
1523 b(gc_required);
1524 return;
1525 }
1526
1527 DCHECK(!result.is(scratch1));
1528 DCHECK(!result.is(scratch2));
1529 DCHECK(!scratch1.is(scratch2));
1530 DCHECK(!scratch1.is(ip));
1531 DCHECK(!scratch2.is(ip));
1532
1533 // Make object size into bytes.
1534 if ((flags & SIZE_IN_WORDS) != 0) {
1535 object_size *= kPointerSize;
1536 }
1537 DCHECK_EQ(0, static_cast<int>(object_size & kObjectAlignmentMask));
1538
1539 // Check relative positions of allocation top and limit addresses.
1540 ExternalReference allocation_top =
1541 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1542 ExternalReference allocation_limit =
1543 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1544
1545 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1546 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1547 DCHECK((limit - top) == kPointerSize);
1548
1549 // Set up allocation top address register.
1550 Register topaddr = scratch1;
1551 mov(topaddr, Operand(allocation_top));
1552
1553 // This code stores a temporary value in ip. This is OK, as the code below
1554 // does not need ip for implicit literal generation.
1555 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1556 // Load allocation top into result and allocation limit into ip.
1557 LoadP(result, MemOperand(topaddr));
1558 LoadP(ip, MemOperand(topaddr, kPointerSize));
1559 } else {
1560 if (emit_debug_code()) {
1561 // Assert that result actually contains top on entry. ip is used
1562 // immediately below so this use of ip does not cause difference with
1563 // respect to register content between debug and release mode.
1564 LoadP(ip, MemOperand(topaddr));
1565 cmp(result, ip);
1566 Check(eq, kUnexpectedAllocationTop);
1567 }
1568 // Load allocation limit into ip. Result already contains allocation top.
1569 LoadP(ip, MemOperand(topaddr, limit - top), r0);
1570 }
1571
1572 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1573 // Align the next allocation. Storing the filler map without checking top is
1574 // safe in new-space because the limit of the heap is aligned there.
1575 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1576 #if V8_TARGET_ARCH_PPC64
1577 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1578 #else
1579 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1580 andi(scratch2, result, Operand(kDoubleAlignmentMask));
1581 Label aligned;
1582 beq(&aligned, cr0);
1583 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1584 cmpl(result, ip);
1585 bge(gc_required);
1586 }
1587 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1588 stw(scratch2, MemOperand(result));
1589 addi(result, result, Operand(kDoubleSize / 2));
1590 bind(&aligned);
1591 #endif
1592 }
1593
1594 // Calculate new top and bail out if new space is exhausted. Use result
1595 // to calculate the new top.
1596 li(r0, Operand(-1));
1597 if (is_int16(object_size)) {
1598 addic(scratch2, result, Operand(object_size));
1599 } else {
1600 mov(scratch2, Operand(object_size));
1601 addc(scratch2, result, scratch2);
1602 }
1603 addze(r0, r0, LeaveOE, SetRC);
1604 beq(gc_required, cr0);
1605 cmpl(scratch2, ip);
1606 bgt(gc_required);
1607 StoreP(scratch2, MemOperand(topaddr));
1608
1609 // Tag object if requested.
1610 if ((flags & TAG_OBJECT) != 0) {
1611 addi(result, result, Operand(kHeapObjectTag));
1612 }
1613 }
1614
1615
1616 void MacroAssembler::Allocate(Register object_size, Register result,
1617 Register scratch1, Register scratch2,
1618 Label* gc_required, AllocationFlags flags) {
1619 if (!FLAG_inline_new) {
1620 if (emit_debug_code()) {
1621 // Trash the registers to simulate an allocation failure.
1622 li(result, Operand(0x7091));
1623 li(scratch1, Operand(0x7191));
1624 li(scratch2, Operand(0x7291));
1625 }
1626 b(gc_required);
1627 return;
1628 }
1629
1630 // Assert that the register arguments are different and that none of
1631 // them are ip. ip is used explicitly in the code generated below.
1632 DCHECK(!result.is(scratch1));
1633 DCHECK(!result.is(scratch2));
1634 DCHECK(!scratch1.is(scratch2));
1635 DCHECK(!object_size.is(ip));
1636 DCHECK(!result.is(ip));
1637 DCHECK(!scratch1.is(ip));
1638 DCHECK(!scratch2.is(ip));
1639
1640 // Check relative positions of allocation top and limit addresses.
1641 ExternalReference allocation_top =
1642 AllocationUtils::GetAllocationTopReference(isolate(), flags);
1643 ExternalReference allocation_limit =
1644 AllocationUtils::GetAllocationLimitReference(isolate(), flags);
1645 intptr_t top = reinterpret_cast<intptr_t>(allocation_top.address());
1646 intptr_t limit = reinterpret_cast<intptr_t>(allocation_limit.address());
1647 DCHECK((limit - top) == kPointerSize);
1648
1649 // Set up allocation top address.
1650 Register topaddr = scratch1;
1651 mov(topaddr, Operand(allocation_top));
1652
1653 // This code stores a temporary value in ip. This is OK, as the code below
1654 // does not need ip for implicit literal generation.
1655 if ((flags & RESULT_CONTAINS_TOP) == 0) {
1656 // Load allocation top into result and allocation limit into ip.
1657 LoadP(result, MemOperand(topaddr));
1658 LoadP(ip, MemOperand(topaddr, kPointerSize));
1659 } else {
1660 if (emit_debug_code()) {
1661 // Assert that result actually contains top on entry. ip is used
1662 // immediately below so this use of ip does not cause difference with
1663 // respect to register content between debug and release mode.
1664 LoadP(ip, MemOperand(topaddr));
1665 cmp(result, ip);
1666 Check(eq, kUnexpectedAllocationTop);
1667 }
1668 // Load allocation limit into ip. Result already contains allocation top.
1669 LoadP(ip, MemOperand(topaddr, limit - top));
1670 }
1671
1672 if ((flags & DOUBLE_ALIGNMENT) != 0) {
1673 // Align the next allocation. Storing the filler map without checking top is
1674 // safe in new-space because the limit of the heap is aligned there.
1675 DCHECK((flags & PRETENURE_OLD_POINTER_SPACE) == 0);
1676 #if V8_TARGET_ARCH_PPC64
1677 STATIC_ASSERT(kPointerAlignment == kDoubleAlignment);
1678 #else
1679 STATIC_ASSERT(kPointerAlignment * 2 == kDoubleAlignment);
1680 andi(scratch2, result, Operand(kDoubleAlignmentMask));
1681 Label aligned;
1682 beq(&aligned, cr0);
1683 if ((flags & PRETENURE_OLD_DATA_SPACE) != 0) {
1684 cmpl(result, ip);
1685 bge(gc_required);
1686 }
1687 mov(scratch2, Operand(isolate()->factory()->one_pointer_filler_map()));
1688 stw(scratch2, MemOperand(result));
1689 addi(result, result, Operand(kDoubleSize / 2));
1690 bind(&aligned);
1691 #endif
1692 }
1693
1694 // Calculate new top and bail out if new space is exhausted. Use result
1695 // to calculate the new top. Object size may be in words so a shift is
1696 // required to get the number of bytes.
1697 li(r0, Operand(-1));
1698 if ((flags & SIZE_IN_WORDS) != 0) {
1699 ShiftLeftImm(scratch2, object_size, Operand(kPointerSizeLog2));
1700 addc(scratch2, result, scratch2);
1701 } else {
1702 addc(scratch2, result, object_size);
1703 }
1704 addze(r0, r0, LeaveOE, SetRC);
1705 beq(gc_required, cr0);
1706 cmpl(scratch2, ip);
1707 bgt(gc_required);
1708
1709 // Update allocation top. result temporarily holds the new top.
1710 if (emit_debug_code()) {
1711 andi(r0, scratch2, Operand(kObjectAlignmentMask));
1712 Check(eq, kUnalignedAllocationInNewSpace, cr0);
1713 }
1714 StoreP(scratch2, MemOperand(topaddr));
1715
1716 // Tag object if requested.
1717 if ((flags & TAG_OBJECT) != 0) {
1718 addi(result, result, Operand(kHeapObjectTag));
1719 }
1720 }
1721
1722
1723 void MacroAssembler::UndoAllocationInNewSpace(Register object,
1724 Register scratch) {
1725 ExternalReference new_space_allocation_top =
1726 ExternalReference::new_space_allocation_top_address(isolate());
1727
1728 // Make sure the object has no tag before resetting top.
1729 mov(r0, Operand(~kHeapObjectTagMask));
1730 and_(object, object, r0);
1731 // was.. and_(object, object, Operand(~kHeapObjectTagMask));
1732 #ifdef DEBUG
1733 // Check that the object un-allocated is below the current top.
1734 mov(scratch, Operand(new_space_allocation_top));
1735 LoadP(scratch, MemOperand(scratch));
1736 cmp(object, scratch);
1737 Check(lt, kUndoAllocationOfNonAllocatedMemory);
1738 #endif
1739 // Write the address of the object to un-allocate as the current top.
1740 mov(scratch, Operand(new_space_allocation_top));
1741 StoreP(object, MemOperand(scratch));
1742 }
1743
1744
1745 void MacroAssembler::AllocateTwoByteString(Register result, Register length,
1746 Register scratch1, Register scratch2,
1747 Register scratch3,
1748 Label* gc_required) {
1749 // Calculate the number of bytes needed for the characters in the string while
1750 // observing object alignment.
1751 DCHECK((SeqTwoByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1752 slwi(scratch1, length, Operand(1)); // Length in bytes, not chars.
1753 addi(scratch1, scratch1,
1754 Operand(kObjectAlignmentMask + SeqTwoByteString::kHeaderSize));
1755 mov(r0, Operand(~kObjectAlignmentMask));
1756 and_(scratch1, scratch1, r0);
1757
1758 // Allocate two-byte string in new space.
1759 Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
1760
1761 // Set the map, length and hash field.
1762 InitializeNewString(result, length, Heap::kStringMapRootIndex, scratch1,
1763 scratch2);
1764 }
1765
1766
1767 void MacroAssembler::AllocateOneByteString(Register result, Register length,
1768 Register scratch1, Register scratch2,
1769 Register scratch3,
1770 Label* gc_required) {
1771 // Calculate the number of bytes needed for the characters in the string while
1772 // observing object alignment.
1773 DCHECK((SeqOneByteString::kHeaderSize & kObjectAlignmentMask) == 0);
1774 DCHECK(kCharSize == 1);
1775 addi(scratch1, length,
1776 Operand(kObjectAlignmentMask + SeqOneByteString::kHeaderSize));
1777 li(r0, Operand(~kObjectAlignmentMask));
1778 and_(scratch1, scratch1, r0);
1779
1780 // Allocate one-byte string in new space.
1781 Allocate(scratch1, result, scratch2, scratch3, gc_required, TAG_OBJECT);
1782
1783 // Set the map, length and hash field.
1784 InitializeNewString(result, length, Heap::kOneByteStringMapRootIndex,
1785 scratch1, scratch2);
1786 }
1787
1788
1789 void MacroAssembler::AllocateTwoByteConsString(Register result, Register length,
1790 Register scratch1,
1791 Register scratch2,
1792 Label* gc_required) {
1793 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1794 TAG_OBJECT);
1795
1796 InitializeNewString(result, length, Heap::kConsStringMapRootIndex, scratch1,
1797 scratch2);
1798 }
1799
1800
1801 void MacroAssembler::AllocateOneByteConsString(Register result, Register length,
1802 Register scratch1,
1803 Register scratch2,
1804 Label* gc_required) {
1805 Allocate(ConsString::kSize, result, scratch1, scratch2, gc_required,
1806 TAG_OBJECT);
1807
1808 InitializeNewString(result, length, Heap::kConsOneByteStringMapRootIndex,
1809 scratch1, scratch2);
1810 }
1811
1812
1813 void MacroAssembler::AllocateTwoByteSlicedString(Register result,
1814 Register length,
1815 Register scratch1,
1816 Register scratch2,
1817 Label* gc_required) {
1818 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1819 TAG_OBJECT);
1820
1821 InitializeNewString(result, length, Heap::kSlicedStringMapRootIndex, scratch1,
1822 scratch2);
1823 }
1824
1825
1826 void MacroAssembler::AllocateOneByteSlicedString(Register result,
1827 Register length,
1828 Register scratch1,
1829 Register scratch2,
1830 Label* gc_required) {
1831 Allocate(SlicedString::kSize, result, scratch1, scratch2, gc_required,
1832 TAG_OBJECT);
1833
1834 InitializeNewString(result, length, Heap::kSlicedOneByteStringMapRootIndex,
1835 scratch1, scratch2);
1836 }
1837
1838
1839 void MacroAssembler::CompareObjectType(Register object, Register map,
1840 Register type_reg, InstanceType type) {
1841 const Register temp = type_reg.is(no_reg) ? ip : type_reg;
1842
1843 LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
1844 CompareInstanceType(map, temp, type);
1845 }
1846
1847
1848 void MacroAssembler::CheckObjectTypeRange(Register object, Register map,
1849 InstanceType min_type,
1850 InstanceType max_type,
1851 Label* false_label) {
1852 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1853 STATIC_ASSERT(LAST_TYPE < 256);
1854 LoadP(map, FieldMemOperand(object, HeapObject::kMapOffset));
1855 lbz(ip, FieldMemOperand(map, Map::kInstanceTypeOffset));
1856 subi(ip, ip, Operand(min_type));
1857 cmpli(ip, Operand(max_type - min_type));
1858 bgt(false_label);
1859 }
1860
1861
1862 void MacroAssembler::CompareInstanceType(Register map, Register type_reg,
1863 InstanceType type) {
1864 STATIC_ASSERT(Map::kInstanceTypeOffset < 4096);
1865 STATIC_ASSERT(LAST_TYPE < 256);
1866 lbz(type_reg, FieldMemOperand(map, Map::kInstanceTypeOffset));
1867 cmpi(type_reg, Operand(type));
1868 }
1869
1870
1871 void MacroAssembler::CompareRoot(Register obj, Heap::RootListIndex index) {
1872 DCHECK(!obj.is(ip));
1873 LoadRoot(ip, index);
1874 cmp(obj, ip);
1875 }
1876
1877
1878 void MacroAssembler::CheckFastElements(Register map, Register scratch,
1879 Label* fail) {
1880 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
1881 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
1882 STATIC_ASSERT(FAST_ELEMENTS == 2);
1883 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
1884 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1885 STATIC_ASSERT(Map::kMaximumBitField2FastHoleyElementValue < 0x8000);
1886 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
1887 bgt(fail);
1888 }
1889
1890
1891 void MacroAssembler::CheckFastObjectElements(Register map, Register scratch,
1892 Label* fail) {
1893 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
1894 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
1895 STATIC_ASSERT(FAST_ELEMENTS == 2);
1896 STATIC_ASSERT(FAST_HOLEY_ELEMENTS == 3);
1897 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1898 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
1899 ble(fail);
1900 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleyElementValue));
1901 bgt(fail);
1902 }
1903
1904
1905 void MacroAssembler::CheckFastSmiElements(Register map, Register scratch,
1906 Label* fail) {
1907 STATIC_ASSERT(FAST_SMI_ELEMENTS == 0);
1908 STATIC_ASSERT(FAST_HOLEY_SMI_ELEMENTS == 1);
1909 lbz(scratch, FieldMemOperand(map, Map::kBitField2Offset));
1910 cmpli(scratch, Operand(Map::kMaximumBitField2FastHoleySmiElementValue));
1911 bgt(fail);
1912 }
1913
1914
1915 void MacroAssembler::StoreNumberToDoubleElements(
1916 Register value_reg, Register key_reg, Register elements_reg,
1917 Register scratch1, DoubleRegister double_scratch, Label* fail,
1918 int elements_offset) {
1919 Label smi_value, store;
1920
1921 // Handle smi values specially.
1922 JumpIfSmi(value_reg, &smi_value);
1923
1924 // Ensure that the object is a heap number
1925 CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
1926 DONT_DO_SMI_CHECK);
1927
1928 lfd(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
1929 // Force a canonical NaN.
1930 CanonicalizeNaN(double_scratch);
1931 b(&store);
1932
1933 bind(&smi_value);
1934 SmiToDouble(double_scratch, value_reg);
1935
1936 bind(&store);
1937 SmiToDoubleArrayOffset(scratch1, key_reg);
1938 add(scratch1, elements_reg, scratch1);
1939 stfd(double_scratch, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize -
1940 elements_offset));
1941 }
1942
1943
1944 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
1945 Register right,
1946 Register overflow_dst,
1947 Register scratch) {
1948 DCHECK(!dst.is(overflow_dst));
1949 DCHECK(!dst.is(scratch));
1950 DCHECK(!overflow_dst.is(scratch));
1951 DCHECK(!overflow_dst.is(left));
1952 DCHECK(!overflow_dst.is(right));
1953
1954 // C = A+B; C overflows if A/B have same sign and C has diff sign than A
1955 if (dst.is(left)) {
1956 mr(scratch, left); // Preserve left.
1957 add(dst, left, right); // Left is overwritten.
1958 xor_(scratch, dst, scratch); // Original left.
1959 xor_(overflow_dst, dst, right);
1960 and_(overflow_dst, overflow_dst, scratch, SetRC);
1961 } else if (dst.is(right)) {
1962 mr(scratch, right); // Preserve right.
1963 add(dst, left, right); // Right is overwritten.
1964 xor_(scratch, dst, scratch); // Original right.
1965 xor_(overflow_dst, dst, left);
1966 and_(overflow_dst, overflow_dst, scratch, SetRC);
1967 } else {
1968 add(dst, left, right);
1969 xor_(overflow_dst, dst, left);
1970 xor_(scratch, dst, right);
1971 and_(overflow_dst, scratch, overflow_dst, SetRC);
1972 }
1973 }
1974
1975 void MacroAssembler::SubAndCheckForOverflow(Register dst, Register left,
1976 Register right,
1977 Register overflow_dst,
1978 Register scratch) {
1979 DCHECK(!dst.is(overflow_dst));
1980 DCHECK(!dst.is(scratch));
1981 DCHECK(!overflow_dst.is(scratch));
1982 DCHECK(!overflow_dst.is(left));
1983 DCHECK(!overflow_dst.is(right));
1984
1985 // C = A-B; C overflows if A/B have diff signs and C has diff sign than A
1986 if (dst.is(left)) {
1987 mr(scratch, left); // Preserve left.
1988 sub(dst, left, right); // Left is overwritten.
1989 xor_(overflow_dst, dst, scratch);
1990 xor_(scratch, scratch, right);
1991 and_(overflow_dst, overflow_dst, scratch, SetRC);
1992 } else if (dst.is(right)) {
1993 mr(scratch, right); // Preserve right.
1994 sub(dst, left, right); // Right is overwritten.
1995 xor_(overflow_dst, dst, left);
1996 xor_(scratch, left, scratch);
1997 and_(overflow_dst, overflow_dst, scratch, SetRC);
1998 } else {
1999 sub(dst, left, right);
2000 xor_(overflow_dst, dst, left);
2001 xor_(scratch, left, right);
2002 and_(overflow_dst, scratch, overflow_dst, SetRC);
2003 }
2004 }
2005
2006
2007 void MacroAssembler::CompareMap(Register obj, Register scratch, Handle<Map> map,
2008 Label* early_success) {
2009 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2010 CompareMap(scratch, map, early_success);
2011 }
2012
2013
2014 void MacroAssembler::CompareMap(Register obj_map, Handle<Map> map,
2015 Label* early_success) {
2016 mov(r0, Operand(map));
2017 cmp(obj_map, r0);
2018 }
2019
2020
2021 void MacroAssembler::CheckMap(Register obj, Register scratch, Handle<Map> map,
2022 Label* fail, SmiCheckType smi_check_type) {
2023 if (smi_check_type == DO_SMI_CHECK) {
2024 JumpIfSmi(obj, fail);
2025 }
2026
2027 Label success;
2028 CompareMap(obj, scratch, map, &success);
2029 bne(fail);
2030 bind(&success);
2031 }
2032
2033
2034 void MacroAssembler::CheckMap(Register obj, Register scratch,
2035 Heap::RootListIndex index, Label* fail,
2036 SmiCheckType smi_check_type) {
2037 if (smi_check_type == DO_SMI_CHECK) {
2038 JumpIfSmi(obj, fail);
2039 }
2040 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2041 LoadRoot(ip, index);
2042 cmp(scratch, ip);
2043 bne(fail);
2044 }
2045
2046
2047 void MacroAssembler::DispatchMap(Register obj, Register scratch,
2048 Handle<Map> map, Handle<Code> success,
2049 SmiCheckType smi_check_type) {
2050 Label fail;
2051 if (smi_check_type == DO_SMI_CHECK) {
2052 JumpIfSmi(obj, &fail);
2053 }
2054 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2055 mov(ip, Operand(map));
2056 cmp(scratch, ip);
2057 bne(&fail);
2058 Jump(success, RelocInfo::CODE_TARGET, al);
2059 bind(&fail);
2060 }
2061
2062
2063 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
2064 Register scratch, Label* miss,
2065 bool miss_on_bound_function) {
2066 Label non_instance;
2067 if (miss_on_bound_function) {
2068 // Check that the receiver isn't a smi.
2069 JumpIfSmi(function, miss);
2070
2071 // Check that the function really is a function. Load map into result reg.
2072 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
2073 bne(miss);
2074
2075 LoadP(scratch,
2076 FieldMemOperand(function, JSFunction::kSharedFunctionInfoOffset));
2077 lwz(scratch,
2078 FieldMemOperand(scratch, SharedFunctionInfo::kCompilerHintsOffset));
2079 TestBit(scratch,
2080 #if V8_TARGET_ARCH_PPC64
2081 SharedFunctionInfo::kBoundFunction,
2082 #else
2083 SharedFunctionInfo::kBoundFunction + kSmiTagSize,
2084 #endif
2085 r0);
2086 bne(miss, cr0);
2087
2088 // Make sure that the function has an instance prototype.
2089 lbz(scratch, FieldMemOperand(result, Map::kBitFieldOffset));
2090 andi(r0, scratch, Operand(1 << Map::kHasNonInstancePrototype));
2091 bne(&non_instance, cr0);
2092 }
2093
2094 // Get the prototype or initial map from the function.
2095 LoadP(result,
2096 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2097
2098 // If the prototype or initial map is the hole, don't return it and
2099 // simply miss the cache instead. This will allow us to allocate a
2100 // prototype object on-demand in the runtime system.
2101 LoadRoot(ip, Heap::kTheHoleValueRootIndex);
2102 cmp(result, ip);
2103 beq(miss);
2104
2105 // If the function does not have an initial map, we're done.
2106 Label done;
2107 CompareObjectType(result, scratch, scratch, MAP_TYPE);
2108 bne(&done);
2109
2110 // Get the prototype from the initial map.
2111 LoadP(result, FieldMemOperand(result, Map::kPrototypeOffset));
2112
2113 if (miss_on_bound_function) {
2114 b(&done);
2115
2116 // Non-instance prototype: Fetch prototype from constructor field
2117 // in initial map.
2118 bind(&non_instance);
2119 LoadP(result, FieldMemOperand(result, Map::kConstructorOffset));
2120 }
2121
2122 // All done.
2123 bind(&done);
2124 }
2125
2126
2127 void MacroAssembler::CallStub(CodeStub* stub, TypeFeedbackId ast_id,
2128 Condition cond) {
2129 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
2130 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
2131 }
2132
2133
2134 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2135 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2136 }
2137
2138
2139 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
2140 return ref0.address() - ref1.address();
2141 }
2142
2143
2144 void MacroAssembler::CallApiFunctionAndReturn(
2145 Register function_address, ExternalReference thunk_ref, int stack_space,
2146 MemOperand return_value_operand, MemOperand* context_restore_operand) {
2147 ExternalReference next_address =
2148 ExternalReference::handle_scope_next_address(isolate());
2149 const int kNextOffset = 0;
2150 const int kLimitOffset = AddressOffset(
2151 ExternalReference::handle_scope_limit_address(isolate()), next_address);
2152 const int kLevelOffset = AddressOffset(
2153 ExternalReference::handle_scope_level_address(isolate()), next_address);
2154
2155 DCHECK(function_address.is(r4) || function_address.is(r5));
2156 Register scratch = r6;
2157
2158 Label profiler_disabled;
2159 Label end_profiler_check;
2160 mov(scratch, Operand(ExternalReference::is_profiling_address(isolate())));
2161 lbz(scratch, MemOperand(scratch, 0));
2162 cmpi(scratch, Operand::Zero());
2163 beq(&profiler_disabled);
2164
2165 // Additional parameter is the address of the actual callback.
2166 mov(scratch, Operand(thunk_ref));
2167 jmp(&end_profiler_check);
2168
2169 bind(&profiler_disabled);
2170 mr(scratch, function_address);
2171 bind(&end_profiler_check);
2172
2173 // Allocate HandleScope in callee-save registers.
2174 // r17 - next_address
2175 // r14 - next_address->kNextOffset
2176 // r15 - next_address->kLimitOffset
2177 // r16 - next_address->kLevelOffset
2178 mov(r17, Operand(next_address));
2179 LoadP(r14, MemOperand(r17, kNextOffset));
2180 LoadP(r15, MemOperand(r17, kLimitOffset));
2181 lwz(r16, MemOperand(r17, kLevelOffset));
2182 addi(r16, r16, Operand(1));
2183 stw(r16, MemOperand(r17, kLevelOffset));
2184
2185 if (FLAG_log_timer_events) {
2186 FrameScope frame(this, StackFrame::MANUAL);
2187 PushSafepointRegisters();
2188 PrepareCallCFunction(1, r3);
2189 mov(r3, Operand(ExternalReference::isolate_address(isolate())));
2190 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
2191 PopSafepointRegisters();
2192 }
2193
2194 // Native call returns to the DirectCEntry stub which redirects to the
2195 // return address pushed on stack (could have moved after GC).
2196 // DirectCEntry stub itself is generated early and never moves.
2197 DirectCEntryStub stub(isolate());
2198 stub.GenerateCall(this, scratch);
2199
2200 if (FLAG_log_timer_events) {
2201 FrameScope frame(this, StackFrame::MANUAL);
2202 PushSafepointRegisters();
2203 PrepareCallCFunction(1, r3);
2204 mov(r3, Operand(ExternalReference::isolate_address(isolate())));
2205 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
2206 PopSafepointRegisters();
2207 }
2208
2209 Label promote_scheduled_exception;
2210 Label exception_handled;
2211 Label delete_allocated_handles;
2212 Label leave_exit_frame;
2213 Label return_value_loaded;
2214
2215 // load value from ReturnValue
2216 LoadP(r3, return_value_operand);
2217 bind(&return_value_loaded);
2218 // No more valid handles (the result handle was the last one). Restore
2219 // previous handle scope.
2220 StoreP(r14, MemOperand(r17, kNextOffset));
2221 if (emit_debug_code()) {
2222 lwz(r4, MemOperand(r17, kLevelOffset));
2223 cmp(r4, r16);
2224 Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
2225 }
2226 subi(r16, r16, Operand(1));
2227 stw(r16, MemOperand(r17, kLevelOffset));
2228 LoadP(ip, MemOperand(r17, kLimitOffset));
2229 cmp(r15, ip);
2230 bne(&delete_allocated_handles);
2231
2232 // Check if the function scheduled an exception.
2233 bind(&leave_exit_frame);
2234 LoadRoot(r14, Heap::kTheHoleValueRootIndex);
2235 mov(ip, Operand(ExternalReference::scheduled_exception_address(isolate())));
2236 LoadP(r15, MemOperand(ip));
2237 cmp(r14, r15);
2238 bne(&promote_scheduled_exception);
2239 bind(&exception_handled);
2240
2241 bool restore_context = context_restore_operand != NULL;
2242 if (restore_context) {
2243 LoadP(cp, *context_restore_operand);
2244 }
2245 // LeaveExitFrame expects unwind space to be in a register.
2246 mov(r14, Operand(stack_space));
2247 LeaveExitFrame(false, r14, !restore_context);
2248 blr();
2249
2250 bind(&promote_scheduled_exception);
2251 {
2252 FrameScope frame(this, StackFrame::INTERNAL);
2253 CallExternalReference(
2254 ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0);
2255 }
2256 jmp(&exception_handled);
2257
2258 // HandleScope limit has changed. Delete allocated extensions.
2259 bind(&delete_allocated_handles);
2260 StoreP(r15, MemOperand(r17, kLimitOffset));
2261 mr(r14, r3);
2262 PrepareCallCFunction(1, r15);
2263 mov(r3, Operand(ExternalReference::isolate_address(isolate())));
2264 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
2265 1);
2266 mr(r3, r14);
2267 b(&leave_exit_frame);
2268 }
2269
2270
2271 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2272 return has_frame_ || !stub->SometimesSetsUpAFrame();
2273 }
2274
2275
2276 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2277 // If the hash field contains an array index pick it out. The assert checks
2278 // that the constants for the maximum number of digits for an array index
2279 // cached in the hash field and the number of bits reserved for it does not
2280 // conflict.
2281 DCHECK(TenToThe(String::kMaxCachedArrayIndexLength) <
2282 (1 << String::kArrayIndexValueBits));
2283 DecodeFieldToSmi<String::ArrayIndexValueBits>(index, hash);
2284 }
2285
2286
2287 void MacroAssembler::SmiToDouble(DoubleRegister value, Register smi) {
2288 SmiUntag(ip, smi);
2289 ConvertIntToDouble(ip, value);
2290 }
2291
2292
2293 void MacroAssembler::TestDoubleIsInt32(DoubleRegister double_input,
2294 Register scratch1, Register scratch2,
2295 DoubleRegister double_scratch) {
2296 TryDoubleToInt32Exact(scratch1, double_input, scratch2, double_scratch);
2297 }
2298
2299
2300 void MacroAssembler::TryDoubleToInt32Exact(Register result,
2301 DoubleRegister double_input,
2302 Register scratch,
2303 DoubleRegister double_scratch) {
2304 Label done;
2305 DCHECK(!double_input.is(double_scratch));
2306
2307 ConvertDoubleToInt64(double_input,
2308 #if !V8_TARGET_ARCH_PPC64
2309 scratch,
2310 #endif
2311 result, double_scratch);
2312
2313 #if V8_TARGET_ARCH_PPC64
2314 TestIfInt32(result, scratch, r0);
2315 #else
2316 TestIfInt32(scratch, result, r0);
2317 #endif
2318 bne(&done);
2319
2320 // convert back and compare
2321 fcfid(double_scratch, double_scratch);
2322 fcmpu(double_scratch, double_input);
2323 bind(&done);
2324 }
2325
2326
2327 void MacroAssembler::TryInt32Floor(Register result, DoubleRegister double_input,
2328 Register input_high, Register scratch,
2329 DoubleRegister double_scratch, Label* done,
2330 Label* exact) {
2331 DCHECK(!result.is(input_high));
2332 DCHECK(!double_input.is(double_scratch));
2333 Label exception;
2334
2335 MovDoubleHighToInt(input_high, double_input);
2336
2337 // Test for NaN/Inf
2338 ExtractBitMask(result, input_high, HeapNumber::kExponentMask);
2339 cmpli(result, Operand(0x7ff));
2340 beq(&exception);
2341
2342 // Convert (rounding to -Inf)
2343 ConvertDoubleToInt64(double_input,
2344 #if !V8_TARGET_ARCH_PPC64
2345 scratch,
2346 #endif
2347 result, double_scratch, kRoundToMinusInf);
2348
2349 // Test for overflow
2350 #if V8_TARGET_ARCH_PPC64
2351 TestIfInt32(result, scratch, r0);
2352 #else
2353 TestIfInt32(scratch, result, r0);
2354 #endif
2355 bne(&exception);
2356
2357 // Test for exactness
2358 fcfid(double_scratch, double_scratch);
2359 fcmpu(double_scratch, double_input);
2360 beq(exact);
2361 b(done);
2362
2363 bind(&exception);
2364 }
2365
2366
2367 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2368 DoubleRegister double_input,
2369 Label* done) {
2370 DoubleRegister double_scratch = kScratchDoubleReg;
2371 Register scratch = ip;
2372
2373 ConvertDoubleToInt64(double_input,
2374 #if !V8_TARGET_ARCH_PPC64
2375 scratch,
2376 #endif
2377 result, double_scratch);
2378
2379 // Test for overflow
2380 #if V8_TARGET_ARCH_PPC64
2381 TestIfInt32(result, scratch, r0);
2382 #else
2383 TestIfInt32(scratch, result, r0);
2384 #endif
2385 beq(done);
2386 }
2387
2388
2389 void MacroAssembler::TruncateDoubleToI(Register result,
2390 DoubleRegister double_input) {
2391 Label done;
2392
2393 TryInlineTruncateDoubleToI(result, double_input, &done);
2394
2395 // If we fell through then inline version didn't succeed - call stub instead.
2396 mflr(r0);
2397 push(r0);
2398 // Put input on stack.
2399 stfdu(double_input, MemOperand(sp, -kDoubleSize));
2400
2401 DoubleToIStub stub(isolate(), sp, result, 0, true, true);
2402 CallStub(&stub);
2403
2404 addi(sp, sp, Operand(kDoubleSize));
2405 pop(r0);
2406 mtlr(r0);
2407
2408 bind(&done);
2409 }
2410
2411
2412 void MacroAssembler::TruncateHeapNumberToI(Register result, Register object) {
2413 Label done;
2414 DoubleRegister double_scratch = kScratchDoubleReg;
2415 DCHECK(!result.is(object));
2416
2417 lfd(double_scratch, FieldMemOperand(object, HeapNumber::kValueOffset));
2418 TryInlineTruncateDoubleToI(result, double_scratch, &done);
2419
2420 // If we fell through then inline version didn't succeed - call stub instead.
2421 mflr(r0);
2422 push(r0);
2423 DoubleToIStub stub(isolate(), object, result,
2424 HeapNumber::kValueOffset - kHeapObjectTag, true, true);
2425 CallStub(&stub);
2426 pop(r0);
2427 mtlr(r0);
2428
2429 bind(&done);
2430 }
2431
2432
2433 void MacroAssembler::TruncateNumberToI(Register object, Register result,
2434 Register heap_number_map,
2435 Register scratch1, Label* not_number) {
2436 Label done;
2437 DCHECK(!result.is(object));
2438
2439 UntagAndJumpIfSmi(result, object, &done);
2440 JumpIfNotHeapNumber(object, heap_number_map, scratch1, not_number);
2441 TruncateHeapNumberToI(result, object);
2442
2443 bind(&done);
2444 }
2445
2446
2447 void MacroAssembler::GetLeastBitsFromSmi(Register dst, Register src,
2448 int num_least_bits) {
2449 #if V8_TARGET_ARCH_PPC64
2450 rldicl(dst, src, kBitsPerPointer - kSmiShift,
2451 kBitsPerPointer - num_least_bits);
2452 #else
2453 rlwinm(dst, src, kBitsPerPointer - kSmiShift,
2454 kBitsPerPointer - num_least_bits, 31);
2455 #endif
2456 }
2457
2458
2459 void MacroAssembler::GetLeastBitsFromInt32(Register dst, Register src,
2460 int num_least_bits) {
2461 rlwinm(dst, src, 0, 32 - num_least_bits, 31);
2462 }
2463
2464
2465 void MacroAssembler::CallRuntime(const Runtime::Function* f, int num_arguments,
2466 SaveFPRegsMode save_doubles) {
2467 // All parameters are on the stack. r3 has the return value after call.
2468
2469 // If the expected number of arguments of the runtime function is
2470 // constant, we check that the actual number of arguments match the
2471 // expectation.
2472 CHECK(f->nargs < 0 || f->nargs == num_arguments);
2473
2474 // TODO(1236192): Most runtime routines don't need the number of
2475 // arguments passed in because it is constant. At some point we
2476 // should remove this need and make the runtime routine entry code
2477 // smarter.
2478 mov(r3, Operand(num_arguments));
2479 mov(r4, Operand(ExternalReference(f, isolate())));
2480 CEntryStub stub(isolate(),
2481 #if V8_TARGET_ARCH_PPC64
2482 f->result_size,
2483 #else
2484 1,
2485 #endif
2486 save_doubles);
2487 CallStub(&stub);
2488 }
2489
2490
2491 void MacroAssembler::CallExternalReference(const ExternalReference& ext,
2492 int num_arguments) {
2493 mov(r3, Operand(num_arguments));
2494 mov(r4, Operand(ext));
2495
2496 CEntryStub stub(isolate(), 1);
2497 CallStub(&stub);
2498 }
2499
2500
2501 void MacroAssembler::TailCallExternalReference(const ExternalReference& ext,
2502 int num_arguments,
2503 int result_size) {
2504 // TODO(1236192): Most runtime routines don't need the number of
2505 // arguments passed in because it is constant. At some point we
2506 // should remove this need and make the runtime routine entry code
2507 // smarter.
2508 mov(r3, Operand(num_arguments));
2509 JumpToExternalReference(ext);
2510 }
2511
2512
2513 void MacroAssembler::TailCallRuntime(Runtime::FunctionId fid, int num_arguments,
2514 int result_size) {
2515 TailCallExternalReference(ExternalReference(fid, isolate()), num_arguments,
2516 result_size);
2517 }
2518
2519
2520 void MacroAssembler::JumpToExternalReference(const ExternalReference& builtin) {
2521 mov(r4, Operand(builtin));
2522 CEntryStub stub(isolate(), 1);
2523 Jump(stub.GetCode(), RelocInfo::CODE_TARGET);
2524 }
2525
2526
2527 void MacroAssembler::InvokeBuiltin(Builtins::JavaScript id, InvokeFlag flag,
2528 const CallWrapper& call_wrapper) {
2529 // You can't call a builtin without a valid frame.
2530 DCHECK(flag == JUMP_FUNCTION || has_frame());
2531
2532 GetBuiltinEntry(r5, id);
2533 if (flag == CALL_FUNCTION) {
2534 call_wrapper.BeforeCall(CallSize(r5));
2535 Call(r5);
2536 call_wrapper.AfterCall();
2537 } else {
2538 DCHECK(flag == JUMP_FUNCTION);
2539 Jump(r5);
2540 }
2541 }
2542
2543
2544 void MacroAssembler::GetBuiltinFunction(Register target,
2545 Builtins::JavaScript id) {
2546 // Load the builtins object into target register.
2547 LoadP(target,
2548 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2549 LoadP(target, FieldMemOperand(target, GlobalObject::kBuiltinsOffset));
2550 // Load the JavaScript builtin function from the builtins object.
2551 LoadP(target,
2552 FieldMemOperand(target, JSBuiltinsObject::OffsetOfFunctionWithId(id)),
2553 r0);
2554 }
2555
2556
2557 void MacroAssembler::GetBuiltinEntry(Register target, Builtins::JavaScript id) {
2558 DCHECK(!target.is(r4));
2559 GetBuiltinFunction(r4, id);
2560 // Load the code entry point from the builtins object.
2561 LoadP(target, FieldMemOperand(r4, JSFunction::kCodeEntryOffset));
2562 }
2563
2564
2565 void MacroAssembler::SetCounter(StatsCounter* counter, int value,
2566 Register scratch1, Register scratch2) {
2567 if (FLAG_native_code_counters && counter->Enabled()) {
2568 mov(scratch1, Operand(value));
2569 mov(scratch2, Operand(ExternalReference(counter)));
2570 stw(scratch1, MemOperand(scratch2));
2571 }
2572 }
2573
2574
2575 void MacroAssembler::IncrementCounter(StatsCounter* counter, int value,
2576 Register scratch1, Register scratch2) {
2577 DCHECK(value > 0);
2578 if (FLAG_native_code_counters && counter->Enabled()) {
2579 mov(scratch2, Operand(ExternalReference(counter)));
2580 lwz(scratch1, MemOperand(scratch2));
2581 addi(scratch1, scratch1, Operand(value));
2582 stw(scratch1, MemOperand(scratch2));
2583 }
2584 }
2585
2586
2587 void MacroAssembler::DecrementCounter(StatsCounter* counter, int value,
2588 Register scratch1, Register scratch2) {
2589 DCHECK(value > 0);
2590 if (FLAG_native_code_counters && counter->Enabled()) {
2591 mov(scratch2, Operand(ExternalReference(counter)));
2592 lwz(scratch1, MemOperand(scratch2));
2593 subi(scratch1, scratch1, Operand(value));
2594 stw(scratch1, MemOperand(scratch2));
2595 }
2596 }
2597
2598
2599 void MacroAssembler::Assert(Condition cond, BailoutReason reason,
2600 CRegister cr) {
2601 if (emit_debug_code()) Check(cond, reason, cr);
2602 }
2603
2604
2605 void MacroAssembler::AssertFastElements(Register elements) {
2606 if (emit_debug_code()) {
2607 DCHECK(!elements.is(ip));
2608 Label ok;
2609 push(elements);
2610 LoadP(elements, FieldMemOperand(elements, HeapObject::kMapOffset));
2611 LoadRoot(ip, Heap::kFixedArrayMapRootIndex);
2612 cmp(elements, ip);
2613 beq(&ok);
2614 LoadRoot(ip, Heap::kFixedDoubleArrayMapRootIndex);
2615 cmp(elements, ip);
2616 beq(&ok);
2617 LoadRoot(ip, Heap::kFixedCOWArrayMapRootIndex);
2618 cmp(elements, ip);
2619 beq(&ok);
2620 Abort(kJSObjectWithFastElementsMapHasSlowElements);
2621 bind(&ok);
2622 pop(elements);
2623 }
2624 }
2625
2626
2627 void MacroAssembler::Check(Condition cond, BailoutReason reason, CRegister cr) {
2628 Label L;
2629 b(cond, &L, cr);
2630 Abort(reason);
2631 // will not return here
2632 bind(&L);
2633 }
2634
2635
2636 void MacroAssembler::Abort(BailoutReason reason) {
2637 Label abort_start;
2638 bind(&abort_start);
2639 #ifdef DEBUG
2640 const char* msg = GetBailoutReason(reason);
2641 if (msg != NULL) {
2642 RecordComment("Abort message: ");
2643 RecordComment(msg);
2644 }
2645
2646 if (FLAG_trap_on_abort) {
2647 stop(msg);
2648 return;
2649 }
2650 #endif
2651
2652 LoadSmiLiteral(r0, Smi::FromInt(reason));
2653 push(r0);
2654 // Disable stub call restrictions to always allow calls to abort.
2655 if (!has_frame_) {
2656 // We don't actually want to generate a pile of code for this, so just
2657 // claim there is a stack frame, without generating one.
2658 FrameScope scope(this, StackFrame::NONE);
2659 CallRuntime(Runtime::kAbort, 1);
2660 } else {
2661 CallRuntime(Runtime::kAbort, 1);
2662 }
2663 // will not return here
2664 }
2665
2666
2667 void MacroAssembler::LoadContext(Register dst, int context_chain_length) {
2668 if (context_chain_length > 0) {
2669 // Move up the chain of contexts to the context containing the slot.
2670 LoadP(dst, MemOperand(cp, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2671 for (int i = 1; i < context_chain_length; i++) {
2672 LoadP(dst, MemOperand(dst, Context::SlotOffset(Context::PREVIOUS_INDEX)));
2673 }
2674 } else {
2675 // Slot is in the current function context. Move it into the
2676 // destination register in case we store into it (the write barrier
2677 // cannot be allowed to destroy the context in esi).
2678 mr(dst, cp);
2679 }
2680 }
2681
2682
2683 void MacroAssembler::LoadTransitionedArrayMapConditional(
2684 ElementsKind expected_kind, ElementsKind transitioned_kind,
2685 Register map_in_out, Register scratch, Label* no_map_match) {
2686 // Load the global or builtins object from the current context.
2687 LoadP(scratch,
2688 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2689 LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
2690
2691 // Check that the function's map is the same as the expected cached map.
2692 LoadP(scratch,
2693 MemOperand(scratch, Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2694 size_t offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize;
2695 LoadP(ip, FieldMemOperand(scratch, offset));
2696 cmp(map_in_out, ip);
2697 bne(no_map_match);
2698
2699 // Use the transitioned cached map.
2700 offset = transitioned_kind * kPointerSize + FixedArrayBase::kHeaderSize;
2701 LoadP(map_in_out, FieldMemOperand(scratch, offset));
2702 }
2703
2704
2705 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2706 // Load the global or builtins object from the current context.
2707 LoadP(function,
2708 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2709 // Load the native context from the global or builtins object.
2710 LoadP(function,
2711 FieldMemOperand(function, GlobalObject::kNativeContextOffset));
2712 // Load the function from the native context.
2713 LoadP(function, MemOperand(function, Context::SlotOffset(index)), r0);
2714 }
2715
2716
2717 void MacroAssembler::LoadGlobalFunctionInitialMap(Register function,
2718 Register map,
2719 Register scratch) {
2720 // Load the initial map. The global functions all have initial maps.
2721 LoadP(map,
2722 FieldMemOperand(function, JSFunction::kPrototypeOrInitialMapOffset));
2723 if (emit_debug_code()) {
2724 Label ok, fail;
2725 CheckMap(map, scratch, Heap::kMetaMapRootIndex, &fail, DO_SMI_CHECK);
2726 b(&ok);
2727 bind(&fail);
2728 Abort(kGlobalFunctionsMustHaveInitialMap);
2729 bind(&ok);
2730 }
2731 }
2732
2733
2734 void MacroAssembler::JumpIfNotPowerOfTwoOrZero(
2735 Register reg, Register scratch, Label* not_power_of_two_or_zero) {
2736 subi(scratch, reg, Operand(1));
2737 cmpi(scratch, Operand::Zero());
2738 blt(not_power_of_two_or_zero);
2739 and_(r0, scratch, reg, SetRC);
2740 bne(not_power_of_two_or_zero, cr0);
2741 }
2742
2743
2744 void MacroAssembler::JumpIfNotPowerOfTwoOrZeroAndNeg(Register reg,
2745 Register scratch,
2746 Label* zero_and_neg,
2747 Label* not_power_of_two) {
2748 subi(scratch, reg, Operand(1));
2749 cmpi(scratch, Operand::Zero());
2750 blt(zero_and_neg);
2751 and_(r0, scratch, reg, SetRC);
2752 bne(not_power_of_two, cr0);
2753 }
2754
2755 #if !V8_TARGET_ARCH_PPC64
2756 void MacroAssembler::SmiTagCheckOverflow(Register reg, Register overflow) {
2757 DCHECK(!reg.is(overflow));
2758 mr(overflow, reg); // Save original value.
2759 SmiTag(reg);
2760 xor_(overflow, overflow, reg, SetRC); // Overflow if (value ^ 2 * value) < 0.
2761 }
2762
2763
2764 void MacroAssembler::SmiTagCheckOverflow(Register dst, Register src,
2765 Register overflow) {
2766 if (dst.is(src)) {
2767 // Fall back to slower case.
2768 SmiTagCheckOverflow(dst, overflow);
2769 } else {
2770 DCHECK(!dst.is(src));
2771 DCHECK(!dst.is(overflow));
2772 DCHECK(!src.is(overflow));
2773 SmiTag(dst, src);
2774 xor_(overflow, dst, src, SetRC); // Overflow if (value ^ 2 * value) < 0.
2775 }
2776 }
2777 #endif
2778
2779 void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2,
2780 Label* on_not_both_smi) {
2781 STATIC_ASSERT(kSmiTag == 0);
2782 DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
2783 orx(r0, reg1, reg2, LeaveRC);
2784 JumpIfNotSmi(r0, on_not_both_smi);
2785 }
2786
2787
2788 void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
2789 Label* smi_case) {
2790 STATIC_ASSERT(kSmiTag == 0);
2791 STATIC_ASSERT(kSmiTagSize == 1);
2792 TestBit(src, 0, r0);
2793 SmiUntag(dst, src);
2794 beq(smi_case, cr0);
2795 }
2796
2797
2798 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
2799 Label* non_smi_case) {
2800 STATIC_ASSERT(kSmiTag == 0);
2801 STATIC_ASSERT(kSmiTagSize == 1);
2802 TestBit(src, 0, r0);
2803 SmiUntag(dst, src);
2804 bne(non_smi_case, cr0);
2805 }
2806
2807
2808 void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
2809 Label* on_either_smi) {
2810 STATIC_ASSERT(kSmiTag == 0);
2811 JumpIfSmi(reg1, on_either_smi);
2812 JumpIfSmi(reg2, on_either_smi);
2813 }
2814
2815
2816 void MacroAssembler::AssertNotSmi(Register object) {
2817 if (emit_debug_code()) {
2818 STATIC_ASSERT(kSmiTag == 0);
2819 TestIfSmi(object, r0);
2820 Check(ne, kOperandIsASmi, cr0);
2821 }
2822 }
2823
2824
2825 void MacroAssembler::AssertSmi(Register object) {
2826 if (emit_debug_code()) {
2827 STATIC_ASSERT(kSmiTag == 0);
2828 TestIfSmi(object, r0);
2829 Check(eq, kOperandIsNotSmi, cr0);
2830 }
2831 }
2832
2833
2834 void MacroAssembler::AssertString(Register object) {
2835 if (emit_debug_code()) {
2836 STATIC_ASSERT(kSmiTag == 0);
2837 TestIfSmi(object, r0);
2838 Check(ne, kOperandIsASmiAndNotAString, cr0);
2839 push(object);
2840 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2841 CompareInstanceType(object, object, FIRST_NONSTRING_TYPE);
2842 pop(object);
2843 Check(lt, kOperandIsNotAString);
2844 }
2845 }
2846
2847
2848 void MacroAssembler::AssertName(Register object) {
2849 if (emit_debug_code()) {
2850 STATIC_ASSERT(kSmiTag == 0);
2851 TestIfSmi(object, r0);
2852 Check(ne, kOperandIsASmiAndNotAName, cr0);
2853 push(object);
2854 LoadP(object, FieldMemOperand(object, HeapObject::kMapOffset));
2855 CompareInstanceType(object, object, LAST_NAME_TYPE);
2856 pop(object);
2857 Check(le, kOperandIsNotAName);
2858 }
2859 }
2860
2861
2862 void MacroAssembler::AssertUndefinedOrAllocationSite(Register object,
2863 Register scratch) {
2864 if (emit_debug_code()) {
2865 Label done_checking;
2866 AssertNotSmi(object);
2867 CompareRoot(object, Heap::kUndefinedValueRootIndex);
2868 beq(&done_checking);
2869 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2870 CompareRoot(scratch, Heap::kAllocationSiteMapRootIndex);
2871 Assert(eq, kExpectedUndefinedOrCell);
2872 bind(&done_checking);
2873 }
2874 }
2875
2876
2877 void MacroAssembler::AssertIsRoot(Register reg, Heap::RootListIndex index) {
2878 if (emit_debug_code()) {
2879 CompareRoot(reg, index);
2880 Check(eq, kHeapNumberMapRegisterClobbered);
2881 }
2882 }
2883
2884
2885 void MacroAssembler::JumpIfNotHeapNumber(Register object,
2886 Register heap_number_map,
2887 Register scratch,
2888 Label* on_not_heap_number) {
2889 LoadP(scratch, FieldMemOperand(object, HeapObject::kMapOffset));
2890 AssertIsRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
2891 cmp(scratch, heap_number_map);
2892 bne(on_not_heap_number);
2893 }
2894
2895
2896 void MacroAssembler::LookupNumberStringCache(Register object, Register result,
2897 Register scratch1,
2898 Register scratch2,
2899 Register scratch3,
2900 Label* not_found) {
2901 // Use of registers. Register result is used as a temporary.
2902 Register number_string_cache = result;
2903 Register mask = scratch3;
2904
2905 // Load the number string cache.
2906 LoadRoot(number_string_cache, Heap::kNumberStringCacheRootIndex);
2907
2908 // Make the hash mask from the length of the number string cache. It
2909 // contains two elements (number and string) for each cache entry.
2910 LoadP(mask, FieldMemOperand(number_string_cache, FixedArray::kLengthOffset));
2911 // Divide length by two (length is a smi).
2912 ShiftRightArithImm(mask, mask, kSmiTagSize + kSmiShiftSize + 1);
2913 subi(mask, mask, Operand(1)); // Make mask.
2914
2915 // Calculate the entry in the number string cache. The hash value in the
2916 // number string cache for smis is just the smi value, and the hash for
2917 // doubles is the xor of the upper and lower words. See
2918 // Heap::GetNumberStringCache.
2919 Label is_smi;
2920 Label load_result_from_cache;
2921 JumpIfSmi(object, &is_smi);
2922 CheckMap(object, scratch1, Heap::kHeapNumberMapRootIndex, not_found,
2923 DONT_DO_SMI_CHECK);
2924
2925 STATIC_ASSERT(8 == kDoubleSize);
2926 lwz(scratch1, FieldMemOperand(object, HeapNumber::kExponentOffset));
2927 lwz(scratch2, FieldMemOperand(object, HeapNumber::kMantissaOffset));
2928 xor_(scratch1, scratch1, scratch2);
2929 and_(scratch1, scratch1, mask);
2930
2931 // Calculate address of entry in string cache: each entry consists
2932 // of two pointer sized fields.
2933 ShiftLeftImm(scratch1, scratch1, Operand(kPointerSizeLog2 + 1));
2934 add(scratch1, number_string_cache, scratch1);
2935
2936 Register probe = mask;
2937 LoadP(probe, FieldMemOperand(scratch1, FixedArray::kHeaderSize));
2938 JumpIfSmi(probe, not_found);
2939 lfd(d0, FieldMemOperand(object, HeapNumber::kValueOffset));
2940 lfd(d1, FieldMemOperand(probe, HeapNumber::kValueOffset));
2941 fcmpu(d0, d1);
2942 bne(not_found); // The cache did not contain this value.
2943 b(&load_result_from_cache);
2944
2945 bind(&is_smi);
2946 Register scratch = scratch1;
2947 SmiUntag(scratch, object);
2948 and_(scratch, mask, scratch);
2949 // Calculate address of entry in string cache: each entry consists
2950 // of two pointer sized fields.
2951 ShiftLeftImm(scratch, scratch, Operand(kPointerSizeLog2 + 1));
2952 add(scratch, number_string_cache, scratch);
2953
2954 // Check if the entry is the smi we are looking for.
2955 LoadP(probe, FieldMemOperand(scratch, FixedArray::kHeaderSize));
2956 cmp(object, probe);
2957 bne(not_found);
2958
2959 // Get the result from the cache.
2960 bind(&load_result_from_cache);
2961 LoadP(result,
2962 FieldMemOperand(scratch, FixedArray::kHeaderSize + kPointerSize));
2963 IncrementCounter(isolate()->counters()->number_to_string_native(), 1,
2964 scratch1, scratch2);
2965 }
2966
2967
2968 void MacroAssembler::JumpIfNonSmisNotBothSequentialOneByteStrings(
2969 Register first, Register second, Register scratch1, Register scratch2,
2970 Label* failure) {
2971 // Test that both first and second are sequential one-byte strings.
2972 // Assume that they are non-smis.
2973 LoadP(scratch1, FieldMemOperand(first, HeapObject::kMapOffset));
2974 LoadP(scratch2, FieldMemOperand(second, HeapObject::kMapOffset));
2975 lbz(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset));
2976 lbz(scratch2, FieldMemOperand(scratch2, Map::kInstanceTypeOffset));
2977
2978 JumpIfBothInstanceTypesAreNotSequentialOneByte(scratch1, scratch2, scratch1,
2979 scratch2, failure);
2980 }
2981
2982 void MacroAssembler::JumpIfNotBothSequentialOneByteStrings(Register first,
2983 Register second,
2984 Register scratch1,
2985 Register scratch2,
2986 Label* failure) {
2987 // Check that neither is a smi.
2988 and_(scratch1, first, second);
2989 JumpIfSmi(scratch1, failure);
2990 JumpIfNonSmisNotBothSequentialOneByteStrings(first, second, scratch1,
2991 scratch2, failure);
2992 }
2993
2994
2995 void MacroAssembler::JumpIfNotUniqueName(Register reg, Label* not_unique_name) {
2996 STATIC_ASSERT(kInternalizedTag == 0 && kStringTag == 0);
2997 Label succeed;
2998 andi(r0, reg, Operand(kIsNotStringMask | kIsNotInternalizedMask));
2999 beq(&succeed, cr0);
3000 cmpi(reg, Operand(SYMBOL_TYPE));
3001 bne(not_unique_name);
3002
3003 bind(&succeed);
3004 }
3005
3006
3007 // Allocates a heap number or jumps to the need_gc label if the young space
3008 // is full and a scavenge is needed.
3009 void MacroAssembler::AllocateHeapNumber(Register result, Register scratch1,
3010 Register scratch2,
3011 Register heap_number_map,
3012 Label* gc_required,
3013 TaggingMode tagging_mode,
3014 MutableMode mode) {
3015 // Allocate an object in the heap for the heap number and tag it as a heap
3016 // object.
3017 Allocate(HeapNumber::kSize, result, scratch1, scratch2, gc_required,
3018 tagging_mode == TAG_RESULT ? TAG_OBJECT : NO_ALLOCATION_FLAGS);
3019
3020 Heap::RootListIndex map_index = mode == MUTABLE
3021 ? Heap::kMutableHeapNumberMapRootIndex
3022 : Heap::kHeapNumberMapRootIndex;
3023 AssertIsRoot(heap_number_map, map_index);
3024
3025 // Store heap number map in the allocated object.
3026 if (tagging_mode == TAG_RESULT) {
3027 StoreP(heap_number_map, FieldMemOperand(result, HeapObject::kMapOffset),
3028 r0);
3029 } else {
3030 StoreP(heap_number_map, MemOperand(result, HeapObject::kMapOffset));
3031 }
3032 }
3033
3034
3035 void MacroAssembler::AllocateHeapNumberWithValue(
3036 Register result, DoubleRegister value, Register scratch1, Register scratch2,
3037 Register heap_number_map, Label* gc_required) {
3038 AllocateHeapNumber(result, scratch1, scratch2, heap_number_map, gc_required);
3039 stfd(value, FieldMemOperand(result, HeapNumber::kValueOffset));
3040 }
3041
3042
3043 // Copies a fixed number of fields of heap objects from src to dst.
3044 void MacroAssembler::CopyFields(Register dst, Register src, RegList temps,
3045 int field_count) {
3046 // At least one bit set in the first 15 registers.
3047 DCHECK((temps & ((1 << 15) - 1)) != 0);
3048 DCHECK((temps & dst.bit()) == 0);
3049 DCHECK((temps & src.bit()) == 0);
3050 // Primitive implementation using only one temporary register.
3051
3052 Register tmp = no_reg;
3053 // Find a temp register in temps list.
3054 for (int i = 0; i < 15; i++) {
3055 if ((temps & (1 << i)) != 0) {
3056 tmp.set_code(i);
3057 break;
3058 }
3059 }
3060 DCHECK(!tmp.is(no_reg));
3061
3062 for (int i = 0; i < field_count; i++) {
3063 LoadP(tmp, FieldMemOperand(src, i * kPointerSize), r0);
3064 StoreP(tmp, FieldMemOperand(dst, i * kPointerSize), r0);
3065 }
3066 }
3067
3068
3069 void MacroAssembler::CopyBytes(Register src, Register dst, Register length,
3070 Register scratch) {
3071 Label align_loop, aligned, word_loop, byte_loop, byte_loop_1, done;
3072
3073 DCHECK(!scratch.is(r0));
3074
3075 cmpi(length, Operand::Zero());
3076 beq(&done);
3077
3078 // Check src alignment and length to see whether word_loop is possible
3079 andi(scratch, src, Operand(kPointerSize - 1));
3080 beq(&aligned, cr0);
3081 subfic(scratch, scratch, Operand(kPointerSize * 2));
3082 cmp(length, scratch);
3083 blt(&byte_loop);
3084
3085 // Align src before copying in word size chunks.
3086 subi(scratch, scratch, Operand(kPointerSize));
3087 mtctr(scratch);
3088 bind(&align_loop);
3089 lbz(scratch, MemOperand(src));
3090 addi(src, src, Operand(1));
3091 subi(length, length, Operand(1));
3092 stb(scratch, MemOperand(dst));
3093 addi(dst, dst, Operand(1));
3094 bdnz(&align_loop);
3095
3096 bind(&aligned);
3097
3098 // Copy bytes in word size chunks.
3099 if (emit_debug_code()) {
3100 andi(r0, src, Operand(kPointerSize - 1));
3101 Assert(eq, kExpectingAlignmentForCopyBytes, cr0);
3102 }
3103
3104 ShiftRightImm(scratch, length, Operand(kPointerSizeLog2));
3105 cmpi(scratch, Operand::Zero());
3106 beq(&byte_loop);
3107
3108 mtctr(scratch);
3109 bind(&word_loop);
3110 LoadP(scratch, MemOperand(src));
3111 addi(src, src, Operand(kPointerSize));
3112 subi(length, length, Operand(kPointerSize));
3113 if (CpuFeatures::IsSupported(UNALIGNED_ACCESSES)) {
3114 // currently false for PPC - but possible future opt
3115 StoreP(scratch, MemOperand(dst));
3116 addi(dst, dst, Operand(kPointerSize));
3117 } else {
3118 #if V8_TARGET_LITTLE_ENDIAN
3119 stb(scratch, MemOperand(dst, 0));
3120 ShiftRightImm(scratch, scratch, Operand(8));
3121 stb(scratch, MemOperand(dst, 1));
3122 ShiftRightImm(scratch, scratch, Operand(8));
3123 stb(scratch, MemOperand(dst, 2));
3124 ShiftRightImm(scratch, scratch, Operand(8));
3125 stb(scratch, MemOperand(dst, 3));
3126 #if V8_TARGET_ARCH_PPC64
3127 ShiftRightImm(scratch, scratch, Operand(8));
3128 stb(scratch, MemOperand(dst, 4));
3129 ShiftRightImm(scratch, scratch, Operand(8));
3130 stb(scratch, MemOperand(dst, 5));
3131 ShiftRightImm(scratch, scratch, Operand(8));
3132 stb(scratch, MemOperand(dst, 6));
3133 ShiftRightImm(scratch, scratch, Operand(8));
3134 stb(scratch, MemOperand(dst, 7));
3135 #endif
3136 #else
3137 #if V8_TARGET_ARCH_PPC64
3138 stb(scratch, MemOperand(dst, 7));
3139 ShiftRightImm(scratch, scratch, Operand(8));
3140 stb(scratch, MemOperand(dst, 6));
3141 ShiftRightImm(scratch, scratch, Operand(8));
3142 stb(scratch, MemOperand(dst, 5));
3143 ShiftRightImm(scratch, scratch, Operand(8));
3144 stb(scratch, MemOperand(dst, 4));
3145 ShiftRightImm(scratch, scratch, Operand(8));
3146 #endif
3147 stb(scratch, MemOperand(dst, 3));
3148 ShiftRightImm(scratch, scratch, Operand(8));
3149 stb(scratch, MemOperand(dst, 2));
3150 ShiftRightImm(scratch, scratch, Operand(8));
3151 stb(scratch, MemOperand(dst, 1));
3152 ShiftRightImm(scratch, scratch, Operand(8));
3153 stb(scratch, MemOperand(dst, 0));
3154 #endif
3155 addi(dst, dst, Operand(kPointerSize));
3156 }
3157 bdnz(&word_loop);
3158
3159 // Copy the last bytes if any left.
3160 cmpi(length, Operand::Zero());
3161 beq(&done);
3162
3163 bind(&byte_loop);
3164 mtctr(length);
3165 bind(&byte_loop_1);
3166 lbz(scratch, MemOperand(src));
3167 addi(src, src, Operand(1));
3168 stb(scratch, MemOperand(dst));
3169 addi(dst, dst, Operand(1));
3170 bdnz(&byte_loop_1);
3171
3172 bind(&done);
3173 }
3174
3175
3176 void MacroAssembler::InitializeNFieldsWithFiller(Register start_offset,
3177 Register count,
3178 Register filler) {
3179 Label loop;
3180 mtctr(count);
3181 bind(&loop);
3182 StoreP(filler, MemOperand(start_offset));
3183 addi(start_offset, start_offset, Operand(kPointerSize));
3184 bdnz(&loop);
3185 }
3186
3187 void MacroAssembler::InitializeFieldsWithFiller(Register start_offset,
3188 Register end_offset,
3189 Register filler) {
3190 Label done;
3191 sub(r0, end_offset, start_offset, LeaveOE, SetRC);
3192 beq(&done, cr0);
3193 ShiftRightImm(r0, r0, Operand(kPointerSizeLog2));
3194 InitializeNFieldsWithFiller(start_offset, r0, filler);
3195 bind(&done);
3196 }
3197
3198
3199 void MacroAssembler::SaveFPRegs(Register location, int first, int count) {
3200 DCHECK(count > 0);
3201 int cur = first;
3202 subi(location, location, Operand(count * kDoubleSize));
3203 for (int i = 0; i < count; i++) {
3204 DoubleRegister reg = DoubleRegister::from_code(cur++);
3205 stfd(reg, MemOperand(location, i * kDoubleSize));
3206 }
3207 }
3208
3209
3210 void MacroAssembler::RestoreFPRegs(Register location, int first, int count) {
3211 DCHECK(count > 0);
3212 int cur = first + count - 1;
3213 for (int i = count - 1; i >= 0; i--) {
3214 DoubleRegister reg = DoubleRegister::from_code(cur--);
3215 lfd(reg, MemOperand(location, i * kDoubleSize));
3216 }
3217 addi(location, location, Operand(count * kDoubleSize));
3218 }
3219
3220
3221 void MacroAssembler::JumpIfBothInstanceTypesAreNotSequentialOneByte(
3222 Register first, Register second, Register scratch1, Register scratch2,
3223 Label* failure) {
3224 const int kFlatOneByteStringMask =
3225 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3226 const int kFlatOneByteStringTag =
3227 kStringTag | kOneByteStringTag | kSeqStringTag;
3228 andi(scratch1, first, Operand(kFlatOneByteStringMask));
3229 andi(scratch2, second, Operand(kFlatOneByteStringMask));
3230 cmpi(scratch1, Operand(kFlatOneByteStringTag));
3231 bne(failure);
3232 cmpi(scratch2, Operand(kFlatOneByteStringTag));
3233 bne(failure);
3234 }
3235
3236
3237 void MacroAssembler::JumpIfInstanceTypeIsNotSequentialOneByte(Register type,
3238 Register scratch,
3239 Label* failure) {
3240 const int kFlatOneByteStringMask =
3241 kIsNotStringMask | kStringEncodingMask | kStringRepresentationMask;
3242 const int kFlatOneByteStringTag =
3243 kStringTag | kOneByteStringTag | kSeqStringTag;
3244 andi(scratch, type, Operand(kFlatOneByteStringMask));
3245 cmpi(scratch, Operand(kFlatOneByteStringTag));
3246 bne(failure);
3247 }
3248
3249 static const int kRegisterPassedArguments = 8;
3250
3251
3252 int MacroAssembler::CalculateStackPassedWords(int num_reg_arguments,
3253 int num_double_arguments) {
3254 int stack_passed_words = 0;
3255 if (num_double_arguments > DoubleRegister::kNumRegisters) {
3256 stack_passed_words +=
3257 2 * (num_double_arguments - DoubleRegister::kNumRegisters);
3258 }
3259 // Up to 8 simple arguments are passed in registers r3..r10.
3260 if (num_reg_arguments > kRegisterPassedArguments) {
3261 stack_passed_words += num_reg_arguments - kRegisterPassedArguments;
3262 }
3263 return stack_passed_words;
3264 }
3265
3266
3267 void MacroAssembler::EmitSeqStringSetCharCheck(Register string, Register index,
3268 Register value,
3269 uint32_t encoding_mask) {
3270 Label is_object;
3271 TestIfSmi(string, r0);
3272 Check(ne, kNonObject, cr0);
3273
3274 LoadP(ip, FieldMemOperand(string, HeapObject::kMapOffset));
3275 lbz(ip, FieldMemOperand(ip, Map::kInstanceTypeOffset));
3276
3277 andi(ip, ip, Operand(kStringRepresentationMask | kStringEncodingMask));
3278 cmpi(ip, Operand(encoding_mask));
3279 Check(eq, kUnexpectedStringType);
3280
3281 // The index is assumed to be untagged coming in, tag it to compare with the
3282 // string length without using a temp register, it is restored at the end of
3283 // this function.
3284 #if !V8_TARGET_ARCH_PPC64
3285 Label index_tag_ok, index_tag_bad;
3286 JumpIfNotSmiCandidate(index, r0, &index_tag_bad);
3287 #endif
3288 SmiTag(index, index);
3289 #if !V8_TARGET_ARCH_PPC64
3290 b(&index_tag_ok);
3291 bind(&index_tag_bad);
3292 Abort(kIndexIsTooLarge);
3293 bind(&index_tag_ok);
3294 #endif
3295
3296 LoadP(ip, FieldMemOperand(string, String::kLengthOffset));
3297 cmp(index, ip);
3298 Check(lt, kIndexIsTooLarge);
3299
3300 DCHECK(Smi::FromInt(0) == 0);
3301 cmpi(index, Operand::Zero());
3302 Check(ge, kIndexIsNegative);
3303
3304 SmiUntag(index, index);
3305 }
3306
3307
3308 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3309 int num_double_arguments,
3310 Register scratch) {
3311 int frame_alignment = ActivationFrameAlignment();
3312 int stack_passed_arguments =
3313 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
3314 int stack_space = kNumRequiredStackFrameSlots;
3315
3316 if (frame_alignment > kPointerSize) {
3317 // Make stack end at alignment and make room for stack arguments
3318 // -- preserving original value of sp.
3319 mr(scratch, sp);
3320 addi(sp, sp, Operand(-(stack_passed_arguments + 1) * kPointerSize));
3321 DCHECK(base::bits::IsPowerOfTwo32(frame_alignment));
3322 ClearRightImm(sp, sp, Operand(WhichPowerOf2(frame_alignment)));
3323 StoreP(scratch, MemOperand(sp, stack_passed_arguments * kPointerSize));
3324 } else {
3325 // Make room for stack arguments
3326 stack_space += stack_passed_arguments;
3327 }
3328
3329 // Allocate frame with required slots to make ABI work.
3330 li(r0, Operand::Zero());
3331 StorePU(r0, MemOperand(sp, -stack_space * kPointerSize));
3332 }
3333
3334
3335 void MacroAssembler::PrepareCallCFunction(int num_reg_arguments,
3336 Register scratch) {
3337 PrepareCallCFunction(num_reg_arguments, 0, scratch);
3338 }
3339
3340
3341 void MacroAssembler::MovToFloatParameter(DoubleRegister src) { Move(d1, src); }
3342
3343
3344 void MacroAssembler::MovToFloatResult(DoubleRegister src) { Move(d1, src); }
3345
3346
3347 void MacroAssembler::MovToFloatParameters(DoubleRegister src1,
3348 DoubleRegister src2) {
3349 if (src2.is(d1)) {
3350 DCHECK(!src1.is(d2));
3351 Move(d2, src2);
3352 Move(d1, src1);
3353 } else {
3354 Move(d1, src1);
3355 Move(d2, src2);
3356 }
3357 }
3358
3359
3360 void MacroAssembler::CallCFunction(ExternalReference function,
3361 int num_reg_arguments,
3362 int num_double_arguments) {
3363 mov(ip, Operand(function));
3364 CallCFunctionHelper(ip, num_reg_arguments, num_double_arguments);
3365 }
3366
3367
3368 void MacroAssembler::CallCFunction(Register function, int num_reg_arguments,
3369 int num_double_arguments) {
3370 CallCFunctionHelper(function, num_reg_arguments, num_double_arguments);
3371 }
3372
3373
3374 void MacroAssembler::CallCFunction(ExternalReference function,
3375 int num_arguments) {
3376 CallCFunction(function, num_arguments, 0);
3377 }
3378
3379
3380 void MacroAssembler::CallCFunction(Register function, int num_arguments) {
3381 CallCFunction(function, num_arguments, 0);
3382 }
3383
3384
3385 void MacroAssembler::CallCFunctionHelper(Register function,
3386 int num_reg_arguments,
3387 int num_double_arguments) {
3388 DCHECK(has_frame());
3389 // Just call directly. The function called cannot cause a GC, or
3390 // allow preemption, so the return address in the link register
3391 // stays correct.
3392 #if ABI_USES_FUNCTION_DESCRIPTORS && !defined(USE_SIMULATOR)
3393 // AIX uses a function descriptor. When calling C code be aware
3394 // of this descriptor and pick up values from it
3395 LoadP(ToRegister(ABI_TOC_REGISTER), MemOperand(function, kPointerSize));
3396 LoadP(ip, MemOperand(function, 0));
3397 Register dest = ip;
3398 #elif ABI_TOC_ADDRESSABILITY_VIA_IP
3399 Move(ip, function);
3400 Register dest = ip;
3401 #else
3402 Register dest = function;
3403 #endif
3404
3405 Call(dest);
3406
3407 // Remove frame bought in PrepareCallCFunction
3408 int stack_passed_arguments =
3409 CalculateStackPassedWords(num_reg_arguments, num_double_arguments);
3410 int stack_space = kNumRequiredStackFrameSlots + stack_passed_arguments;
3411 if (ActivationFrameAlignment() > kPointerSize) {
3412 LoadP(sp, MemOperand(sp, stack_space * kPointerSize));
3413 } else {
3414 addi(sp, sp, Operand(stack_space * kPointerSize));
3415 }
3416 }
3417
3418
3419 void MacroAssembler::FlushICache(Register address, size_t size,
3420 Register scratch) {
3421 if (CpuFeatures::IsSupported(INSTR_AND_DATA_CACHE_COHERENCY)) {
3422 sync();
3423 icbi(r0, address);
3424 isync();
3425 return;
3426 }
3427
3428 Label done;
3429
3430 dcbf(r0, address);
3431 sync();
3432 icbi(r0, address);
3433 isync();
3434
3435 // This code handles ranges which cross a single cacheline boundary.
3436 // scratch is last cacheline which intersects range.
3437 const int kCacheLineSizeLog2 = WhichPowerOf2(CpuFeatures::cache_line_size());
3438
3439 DCHECK(size > 0 && size <= (size_t)(1 << kCacheLineSizeLog2));
3440 addi(scratch, address, Operand(size - 1));
3441 ClearRightImm(scratch, scratch, Operand(kCacheLineSizeLog2));
3442 cmpl(scratch, address);
3443 ble(&done);
3444
3445 dcbf(r0, scratch);
3446 sync();
3447 icbi(r0, scratch);
3448 isync();
3449
3450 bind(&done);
3451 }
3452
3453
3454 void MacroAssembler::SetRelocatedValue(Register location, Register scratch,
3455 Register new_value) {
3456 lwz(scratch, MemOperand(location));
3457
3458 #if V8_OOL_CONSTANT_POOL
3459 if (emit_debug_code()) {
3460 // Check that the instruction sequence is a load from the constant pool
3461 #if V8_TARGET_ARCH_PPC64
3462 And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16)));
3463 Cmpi(scratch, Operand(ADDI), r0);
3464 Check(eq, kTheInstructionShouldBeALi);
3465 lwz(scratch, MemOperand(location, kInstrSize));
3466 #endif
3467 ExtractBitMask(scratch, scratch, 0x1f * B16);
3468 cmpi(scratch, Operand(kConstantPoolRegister.code()));
3469 Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
3470 // Scratch was clobbered. Restore it.
3471 lwz(scratch, MemOperand(location));
3472 }
3473 // Get the address of the constant and patch it.
3474 andi(scratch, scratch, Operand(kImm16Mask));
3475 StorePX(new_value, MemOperand(kConstantPoolRegister, scratch));
3476 #else
3477 // This code assumes a FIXED_SEQUENCE for lis/ori
3478
3479 // At this point scratch is a lis instruction.
3480 if (emit_debug_code()) {
3481 And(scratch, scratch, Operand(kOpcodeMask | (0x1f * B16)));
3482 Cmpi(scratch, Operand(ADDIS), r0);
3483 Check(eq, kTheInstructionToPatchShouldBeALis);
3484 lwz(scratch, MemOperand(location));
3485 }
3486
3487 // insert new high word into lis instruction
3488 #if V8_TARGET_ARCH_PPC64
3489 srdi(ip, new_value, Operand(32));
3490 rlwimi(scratch, ip, 16, 16, 31);
3491 #else
3492 rlwimi(scratch, new_value, 16, 16, 31);
3493 #endif
3494
3495 stw(scratch, MemOperand(location));
3496
3497 lwz(scratch, MemOperand(location, kInstrSize));
3498 // scratch is now ori.
3499 if (emit_debug_code()) {
3500 And(scratch, scratch, Operand(kOpcodeMask));
3501 Cmpi(scratch, Operand(ORI), r0);
3502 Check(eq, kTheInstructionShouldBeAnOri);
3503 lwz(scratch, MemOperand(location, kInstrSize));
3504 }
3505
3506 // insert new low word into ori instruction
3507 #if V8_TARGET_ARCH_PPC64
3508 rlwimi(scratch, ip, 0, 16, 31);
3509 #else
3510 rlwimi(scratch, new_value, 0, 16, 31);
3511 #endif
3512 stw(scratch, MemOperand(location, kInstrSize));
3513
3514 #if V8_TARGET_ARCH_PPC64
3515 if (emit_debug_code()) {
3516 lwz(scratch, MemOperand(location, 2 * kInstrSize));
3517 // scratch is now sldi.
3518 And(scratch, scratch, Operand(kOpcodeMask | kExt5OpcodeMask));
3519 Cmpi(scratch, Operand(EXT5 | RLDICR), r0);
3520 Check(eq, kTheInstructionShouldBeASldi);
3521 }
3522
3523 lwz(scratch, MemOperand(location, 3 * kInstrSize));
3524 // scratch is now ori.
3525 if (emit_debug_code()) {
3526 And(scratch, scratch, Operand(kOpcodeMask));
3527 Cmpi(scratch, Operand(ORIS), r0);
3528 Check(eq, kTheInstructionShouldBeAnOris);
3529 lwz(scratch, MemOperand(location, 3 * kInstrSize));
3530 }
3531
3532 rlwimi(scratch, new_value, 16, 16, 31);
3533 stw(scratch, MemOperand(location, 3 * kInstrSize));
3534
3535 lwz(scratch, MemOperand(location, 4 * kInstrSize));
3536 // scratch is now ori.
3537 if (emit_debug_code()) {
3538 And(scratch, scratch, Operand(kOpcodeMask));
3539 Cmpi(scratch, Operand(ORI), r0);
3540 Check(eq, kTheInstructionShouldBeAnOri);
3541 lwz(scratch, MemOperand(location, 4 * kInstrSize));
3542 }
3543 rlwimi(scratch, new_value, 0, 16, 31);
3544 stw(scratch, MemOperand(location, 4 * kInstrSize));
3545 #endif
3546
3547 // Update the I-cache so the new lis and addic can be executed.
3548 #if V8_TARGET_ARCH_PPC64
3549 FlushICache(location, 5 * kInstrSize, scratch);
3550 #else
3551 FlushICache(location, 2 * kInstrSize, scratch);
3552 #endif
3553 #endif
3554 }
3555
3556
3557 void MacroAssembler::GetRelocatedValue(Register location, Register result,
3558 Register scratch) {
3559 lwz(result, MemOperand(location));
3560
3561 #if V8_OOL_CONSTANT_POOL
3562 if (emit_debug_code()) {
3563 // Check that the instruction sequence is a load from the constant pool
3564 #if V8_TARGET_ARCH_PPC64
3565 And(result, result, Operand(kOpcodeMask | (0x1f * B16)));
3566 Cmpi(result, Operand(ADDI), r0);
3567 Check(eq, kTheInstructionShouldBeALi);
3568 lwz(result, MemOperand(location, kInstrSize));
3569 #endif
3570 ExtractBitMask(result, result, 0x1f * B16);
3571 cmpi(result, Operand(kConstantPoolRegister.code()));
3572 Check(eq, kTheInstructionToPatchShouldBeALoadFromConstantPool);
3573 lwz(result, MemOperand(location));
3574 }
3575 // Get the address of the constant and retrieve it.
3576 andi(result, result, Operand(kImm16Mask));
3577 LoadPX(result, MemOperand(kConstantPoolRegister, result));
3578 #else
3579 // This code assumes a FIXED_SEQUENCE for lis/ori
3580 if (emit_debug_code()) {
3581 And(result, result, Operand(kOpcodeMask | (0x1f * B16)));
3582 Cmpi(result, Operand(ADDIS), r0);
3583 Check(eq, kTheInstructionShouldBeALis);
3584 lwz(result, MemOperand(location));
3585 }
3586
3587 // result now holds a lis instruction. Extract the immediate.
3588 slwi(result, result, Operand(16));
3589
3590 lwz(scratch, MemOperand(location, kInstrSize));
3591 if (emit_debug_code()) {
3592 And(scratch, scratch, Operand(kOpcodeMask));
3593 Cmpi(scratch, Operand(ORI), r0);
3594 Check(eq, kTheInstructionShouldBeAnOri);
3595 lwz(scratch, MemOperand(location, kInstrSize));
3596 }
3597 // Copy the low 16bits from ori instruction into result
3598 rlwimi(result, scratch, 0, 16, 31);
3599
3600 #if V8_TARGET_ARCH_PPC64
3601 if (emit_debug_code()) {
3602 lwz(scratch, MemOperand(location, 2 * kInstrSize));
3603 // scratch is now sldi.
3604 And(scratch, scratch, Operand(kOpcodeMask | kExt5OpcodeMask));
3605 Cmpi(scratch, Operand(EXT5 | RLDICR), r0);
3606 Check(eq, kTheInstructionShouldBeASldi);
3607 }
3608
3609 lwz(scratch, MemOperand(location, 3 * kInstrSize));
3610 // scratch is now ori.
3611 if (emit_debug_code()) {
3612 And(scratch, scratch, Operand(kOpcodeMask));
3613 Cmpi(scratch, Operand(ORIS), r0);
3614 Check(eq, kTheInstructionShouldBeAnOris);
3615 lwz(scratch, MemOperand(location, 3 * kInstrSize));
3616 }
3617 sldi(result, result, Operand(16));
3618 rldimi(result, scratch, 0, 48);
3619
3620 lwz(scratch, MemOperand(location, 4 * kInstrSize));
3621 // scratch is now ori.
3622 if (emit_debug_code()) {
3623 And(scratch, scratch, Operand(kOpcodeMask));
3624 Cmpi(scratch, Operand(ORI), r0);
3625 Check(eq, kTheInstructionShouldBeAnOri);
3626 lwz(scratch, MemOperand(location, 4 * kInstrSize));
3627 }
3628 sldi(result, result, Operand(16));
3629 rldimi(result, scratch, 0, 48);
3630 #endif
3631 #endif
3632 }
3633
3634
3635 void MacroAssembler::CheckPageFlag(
3636 Register object,
3637 Register scratch, // scratch may be same register as object
3638 int mask, Condition cc, Label* condition_met) {
3639 DCHECK(cc == ne || cc == eq);
3640 ClearRightImm(scratch, object, Operand(kPageSizeBits));
3641 LoadP(scratch, MemOperand(scratch, MemoryChunk::kFlagsOffset));
3642
3643 And(r0, scratch, Operand(mask), SetRC);
3644
3645 if (cc == ne) {
3646 bne(condition_met, cr0);
3647 }
3648 if (cc == eq) {
3649 beq(condition_met, cr0);
3650 }
3651 }
3652
3653
3654 void MacroAssembler::CheckMapDeprecated(Handle<Map> map, Register scratch,
3655 Label* if_deprecated) {
3656 if (map->CanBeDeprecated()) {
3657 mov(scratch, Operand(map));
3658 lwz(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
3659 ExtractBitMask(scratch, scratch, Map::Deprecated::kMask, SetRC);
3660 bne(if_deprecated, cr0);
3661 }
3662 }
3663
3664
3665 void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
3666 Register scratch1, Label* on_black) {
3667 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
3668 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
3669 }
3670
3671
3672 void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
3673 Register mask_scratch, Label* has_color,
3674 int first_bit, int second_bit) {
3675 DCHECK(!AreAliased(object, bitmap_scratch, mask_scratch, no_reg));
3676
3677 GetMarkBits(object, bitmap_scratch, mask_scratch);
3678
3679 Label other_color, word_boundary;
3680 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3681 // Test the first bit
3682 and_(r0, ip, mask_scratch, SetRC);
3683 b(first_bit == 1 ? eq : ne, &other_color, cr0);
3684 // Shift left 1
3685 // May need to load the next cell
3686 slwi(mask_scratch, mask_scratch, Operand(1), SetRC);
3687 beq(&word_boundary, cr0);
3688 // Test the second bit
3689 and_(r0, ip, mask_scratch, SetRC);
3690 b(second_bit == 1 ? ne : eq, has_color, cr0);
3691 b(&other_color);
3692
3693 bind(&word_boundary);
3694 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize + kIntSize));
3695 andi(r0, ip, Operand(1));
3696 b(second_bit == 1 ? ne : eq, has_color, cr0);
3697 bind(&other_color);
3698 }
3699
3700
3701 // Detect some, but not all, common pointer-free objects. This is used by the
3702 // incremental write barrier which doesn't care about oddballs (they are always
3703 // marked black immediately so this code is not hit).
3704 void MacroAssembler::JumpIfDataObject(Register value, Register scratch,
3705 Label* not_data_object) {
3706 Label is_data_object;
3707 LoadP(scratch, FieldMemOperand(value, HeapObject::kMapOffset));
3708 CompareRoot(scratch, Heap::kHeapNumberMapRootIndex);
3709 beq(&is_data_object);
3710 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3711 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3712 // If it's a string and it's not a cons string then it's an object containing
3713 // no GC pointers.
3714 lbz(scratch, FieldMemOperand(scratch, Map::kInstanceTypeOffset));
3715 STATIC_ASSERT((kIsIndirectStringMask | kIsNotStringMask) == 0x81);
3716 andi(scratch, scratch, Operand(kIsIndirectStringMask | kIsNotStringMask));
3717 bne(not_data_object, cr0);
3718 bind(&is_data_object);
3719 }
3720
3721
3722 void MacroAssembler::GetMarkBits(Register addr_reg, Register bitmap_reg,
3723 Register mask_reg) {
3724 DCHECK(!AreAliased(addr_reg, bitmap_reg, mask_reg, no_reg));
3725 DCHECK((~Page::kPageAlignmentMask & 0xffff) == 0);
3726 lis(r0, Operand((~Page::kPageAlignmentMask >> 16)));
3727 and_(bitmap_reg, addr_reg, r0);
3728 const int kLowBits = kPointerSizeLog2 + Bitmap::kBitsPerCellLog2;
3729 ExtractBitRange(mask_reg, addr_reg, kLowBits - 1, kPointerSizeLog2);
3730 ExtractBitRange(ip, addr_reg, kPageSizeBits - 1, kLowBits);
3731 ShiftLeftImm(ip, ip, Operand(Bitmap::kBytesPerCellLog2));
3732 add(bitmap_reg, bitmap_reg, ip);
3733 li(ip, Operand(1));
3734 slw(mask_reg, ip, mask_reg);
3735 }
3736
3737
3738 void MacroAssembler::EnsureNotWhite(Register value, Register bitmap_scratch,
3739 Register mask_scratch,
3740 Register load_scratch,
3741 Label* value_is_white_and_not_data) {
3742 DCHECK(!AreAliased(value, bitmap_scratch, mask_scratch, ip));
3743 GetMarkBits(value, bitmap_scratch, mask_scratch);
3744
3745 // If the value is black or grey we don't need to do anything.
3746 DCHECK(strcmp(Marking::kWhiteBitPattern, "00") == 0);
3747 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
3748 DCHECK(strcmp(Marking::kGreyBitPattern, "11") == 0);
3749 DCHECK(strcmp(Marking::kImpossibleBitPattern, "01") == 0);
3750
3751 Label done;
3752
3753 // Since both black and grey have a 1 in the first position and white does
3754 // not have a 1 there we only need to check one bit.
3755 lwz(load_scratch, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3756 and_(r0, mask_scratch, load_scratch, SetRC);
3757 bne(&done, cr0);
3758
3759 if (emit_debug_code()) {
3760 // Check for impossible bit pattern.
3761 Label ok;
3762 // LSL may overflow, making the check conservative.
3763 slwi(r0, mask_scratch, Operand(1));
3764 and_(r0, load_scratch, r0, SetRC);
3765 beq(&ok, cr0);
3766 stop("Impossible marking bit pattern");
3767 bind(&ok);
3768 }
3769
3770 // Value is white. We check whether it is data that doesn't need scanning.
3771 // Currently only checks for HeapNumber and non-cons strings.
3772 Register map = load_scratch; // Holds map while checking type.
3773 Register length = load_scratch; // Holds length of object after testing type.
3774 Label is_data_object, maybe_string_object, is_string_object, is_encoded;
3775 #if V8_TARGET_ARCH_PPC64
3776 Label length_computed;
3777 #endif
3778
3779
3780 // Check for heap-number
3781 LoadP(map, FieldMemOperand(value, HeapObject::kMapOffset));
3782 CompareRoot(map, Heap::kHeapNumberMapRootIndex);
3783 bne(&maybe_string_object);
3784 li(length, Operand(HeapNumber::kSize));
3785 b(&is_data_object);
3786 bind(&maybe_string_object);
3787
3788 // Check for strings.
3789 DCHECK(kIsIndirectStringTag == 1 && kIsIndirectStringMask == 1);
3790 DCHECK(kNotStringTag == 0x80 && kIsNotStringMask == 0x80);
3791 // If it's a string and it's not a cons string then it's an object containing
3792 // no GC pointers.
3793 Register instance_type = load_scratch;
3794 lbz(instance_type, FieldMemOperand(map, Map::kInstanceTypeOffset));
3795 andi(r0, instance_type, Operand(kIsIndirectStringMask | kIsNotStringMask));
3796 bne(value_is_white_and_not_data, cr0);
3797 // It's a non-indirect (non-cons and non-slice) string.
3798 // If it's external, the length is just ExternalString::kSize.
3799 // Otherwise it's String::kHeaderSize + string->length() * (1 or 2).
3800 // External strings are the only ones with the kExternalStringTag bit
3801 // set.
3802 DCHECK_EQ(0, kSeqStringTag & kExternalStringTag);
3803 DCHECK_EQ(0, kConsStringTag & kExternalStringTag);
3804 andi(r0, instance_type, Operand(kExternalStringTag));
3805 beq(&is_string_object, cr0);
3806 li(length, Operand(ExternalString::kSize));
3807 b(&is_data_object);
3808 bind(&is_string_object);
3809
3810 // Sequential string, either Latin1 or UC16.
3811 // For Latin1 (char-size of 1) we untag the smi to get the length.
3812 // For UC16 (char-size of 2):
3813 // - (32-bit) we just leave the smi tag in place, thereby getting
3814 // the length multiplied by 2.
3815 // - (64-bit) we compute the offset in the 2-byte array
3816 DCHECK(kOneByteStringTag == 4 && kStringEncodingMask == 4);
3817 LoadP(ip, FieldMemOperand(value, String::kLengthOffset));
3818 andi(r0, instance_type, Operand(kStringEncodingMask));
3819 beq(&is_encoded, cr0);
3820 SmiUntag(ip);
3821 #if V8_TARGET_ARCH_PPC64
3822 b(&length_computed);
3823 #endif
3824 bind(&is_encoded);
3825 #if V8_TARGET_ARCH_PPC64
3826 SmiToShortArrayOffset(ip, ip);
3827 bind(&length_computed);
3828 #else
3829 DCHECK(kSmiShift == 1);
3830 #endif
3831 addi(length, ip, Operand(SeqString::kHeaderSize + kObjectAlignmentMask));
3832 li(r0, Operand(~kObjectAlignmentMask));
3833 and_(length, length, r0);
3834
3835 bind(&is_data_object);
3836 // Value is a data object, and it is white. Mark it black. Since we know
3837 // that the object is white we can make it black by flipping one bit.
3838 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3839 orx(ip, ip, mask_scratch);
3840 stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kHeaderSize));
3841
3842 mov(ip, Operand(~Page::kPageAlignmentMask));
3843 and_(bitmap_scratch, bitmap_scratch, ip);
3844 lwz(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3845 add(ip, ip, length);
3846 stw(ip, MemOperand(bitmap_scratch, MemoryChunk::kLiveBytesOffset));
3847
3848 bind(&done);
3849 }
3850
3851
3852 // Saturate a value into 8-bit unsigned integer
3853 // if input_value < 0, output_value is 0
3854 // if input_value > 255, output_value is 255
3855 // otherwise output_value is the input_value
3856 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3857 Label done, negative_label, overflow_label;
3858 int satval = (1 << 8) - 1;
3859
3860 cmpi(input_reg, Operand::Zero());
3861 blt(&negative_label);
3862
3863 cmpi(input_reg, Operand(satval));
3864 bgt(&overflow_label);
3865 if (!output_reg.is(input_reg)) {
3866 mr(output_reg, input_reg);
3867 }
3868 b(&done);
3869
3870 bind(&negative_label);
3871 li(output_reg, Operand::Zero()); // set to 0 if negative
3872 b(&done);
3873
3874
3875 bind(&overflow_label); // set to satval if > satval
3876 li(output_reg, Operand(satval));
3877
3878 bind(&done);
3879 }
3880
3881
3882 void MacroAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
3883
3884
3885 void MacroAssembler::ResetRoundingMode() {
3886 mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest)
3887 }
3888
3889
3890 void MacroAssembler::ClampDoubleToUint8(Register result_reg,
3891 DoubleRegister input_reg,
3892 DoubleRegister double_scratch) {
3893 Label above_zero;
3894 Label done;
3895 Label in_bounds;
3896
3897 LoadDoubleLiteral(double_scratch, 0.0, result_reg);
3898 fcmpu(input_reg, double_scratch);
3899 bgt(&above_zero);
3900
3901 // Double value is less than zero, NaN or Inf, return 0.
3902 LoadIntLiteral(result_reg, 0);
3903 b(&done);
3904
3905 // Double value is >= 255, return 255.
3906 bind(&above_zero);
3907 LoadDoubleLiteral(double_scratch, 255.0, result_reg);
3908 fcmpu(input_reg, double_scratch);
3909 ble(&in_bounds);
3910 LoadIntLiteral(result_reg, 255);
3911 b(&done);
3912
3913 // In 0-255 range, round and truncate.
3914 bind(&in_bounds);
3915
3916 // round to nearest (default rounding mode)
3917 fctiw(double_scratch, input_reg);
3918 MovDoubleLowToInt(result_reg, double_scratch);
3919 bind(&done);
3920 }
3921
3922
3923 void MacroAssembler::LoadInstanceDescriptors(Register map,
3924 Register descriptors) {
3925 LoadP(descriptors, FieldMemOperand(map, Map::kDescriptorsOffset));
3926 }
3927
3928
3929 void MacroAssembler::NumberOfOwnDescriptors(Register dst, Register map) {
3930 lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
3931 DecodeField<Map::NumberOfOwnDescriptorsBits>(dst);
3932 }
3933
3934
3935 void MacroAssembler::EnumLength(Register dst, Register map) {
3936 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3937 lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
3938 ExtractBitMask(dst, dst, Map::EnumLengthBits::kMask);
3939 SmiTag(dst);
3940 }
3941
3942
3943 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
3944 Register empty_fixed_array_value = r9;
3945 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3946 Label next, start;
3947 mr(r5, r3);
3948
3949 // Check if the enum length field is properly initialized, indicating that
3950 // there is an enum cache.
3951 LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
3952
3953 EnumLength(r6, r4);
3954 CmpSmiLiteral(r6, Smi::FromInt(kInvalidEnumCacheSentinel), r0);
3955 beq(call_runtime);
3956
3957 b(&start);
3958
3959 bind(&next);
3960 LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
3961
3962 // For all objects but the receiver, check that the cache is empty.
3963 EnumLength(r6, r4);
3964 CmpSmiLiteral(r6, Smi::FromInt(0), r0);
3965 bne(call_runtime);
3966
3967 bind(&start);
3968
3969 // Check that there are no elements. Register r5 contains the current JS
3970 // object we've reached through the prototype chain.
3971 Label no_elements;
3972 LoadP(r5, FieldMemOperand(r5, JSObject::kElementsOffset));
3973 cmp(r5, empty_fixed_array_value);
3974 beq(&no_elements);
3975
3976 // Second chance, the object may be using the empty slow element dictionary.
3977 CompareRoot(r5, Heap::kEmptySlowElementDictionaryRootIndex);
3978 bne(call_runtime);
3979
3980 bind(&no_elements);
3981 LoadP(r5, FieldMemOperand(r4, Map::kPrototypeOffset));
3982 cmp(r5, null_value);
3983 bne(&next);
3984 }
3985
3986
3987 ////////////////////////////////////////////////////////////////////////////////
3988 //
3989 // New MacroAssembler Interfaces added for PPC
3990 //
3991 ////////////////////////////////////////////////////////////////////////////////
3992 void MacroAssembler::LoadIntLiteral(Register dst, int value) {
3993 mov(dst, Operand(value));
3994 }
3995
3996
3997 void MacroAssembler::LoadSmiLiteral(Register dst, Smi* smi) {
3998 mov(dst, Operand(smi));
3999 }
4000
4001
4002 void MacroAssembler::LoadDoubleLiteral(DoubleRegister result, double value,
4003 Register scratch) {
4004 #if V8_OOL_CONSTANT_POOL
4005 // TODO(mbrandy): enable extended constant pool usage for doubles.
4006 // See ARM commit e27ab337 for a reference.
4007 if (is_constant_pool_available() && !is_constant_pool_full()) {
4008 RelocInfo rinfo(pc_, value);
4009 ConstantPoolAddEntry(rinfo);
4010 #if V8_TARGET_ARCH_PPC64
4011 // We use 2 instruction sequence here for consistency with mov.
4012 li(scratch, Operand::Zero());
4013 lfdx(result, MemOperand(kConstantPoolRegister, scratch));
4014 #else
4015 lfd(result, MemOperand(kConstantPoolRegister, 0));
4016 #endif
4017 return;
4018 }
4019 #endif
4020
4021 // avoid gcc strict aliasing error using union cast
4022 union {
4023 double dval;
4024 #if V8_TARGET_ARCH_PPC64
4025 intptr_t ival;
4026 #else
4027 intptr_t ival[2];
4028 #endif
4029 } litVal;
4030
4031 litVal.dval = value;
4032
4033 #if V8_TARGET_ARCH_PPC64
4034 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4035 mov(scratch, Operand(litVal.ival));
4036 mtfprd(result, scratch);
4037 return;
4038 }
4039 #endif
4040
4041 addi(sp, sp, Operand(-kDoubleSize));
4042 #if V8_TARGET_ARCH_PPC64
4043 mov(scratch, Operand(litVal.ival));
4044 std(scratch, MemOperand(sp));
4045 #else
4046 LoadIntLiteral(scratch, litVal.ival[0]);
4047 stw(scratch, MemOperand(sp, 0));
4048 LoadIntLiteral(scratch, litVal.ival[1]);
4049 stw(scratch, MemOperand(sp, 4));
4050 #endif
4051 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4052 lfd(result, MemOperand(sp, 0));
4053 addi(sp, sp, Operand(kDoubleSize));
4054 }
4055
4056
4057 void MacroAssembler::MovIntToDouble(DoubleRegister dst, Register src,
4058 Register scratch) {
4059 // sign-extend src to 64-bit
4060 #if V8_TARGET_ARCH_PPC64
4061 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4062 mtfprwa(dst, src);
4063 return;
4064 }
4065 #endif
4066
4067 DCHECK(!src.is(scratch));
4068 subi(sp, sp, Operand(kDoubleSize));
4069 #if V8_TARGET_ARCH_PPC64
4070 extsw(scratch, src);
4071 std(scratch, MemOperand(sp, 0));
4072 #else
4073 srawi(scratch, src, 31);
4074 stw(scratch, MemOperand(sp, Register::kExponentOffset));
4075 stw(src, MemOperand(sp, Register::kMantissaOffset));
4076 #endif
4077 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4078 lfd(dst, MemOperand(sp, 0));
4079 addi(sp, sp, Operand(kDoubleSize));
4080 }
4081
4082
4083 void MacroAssembler::MovUnsignedIntToDouble(DoubleRegister dst, Register src,
4084 Register scratch) {
4085 // zero-extend src to 64-bit
4086 #if V8_TARGET_ARCH_PPC64
4087 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4088 mtfprwz(dst, src);
4089 return;
4090 }
4091 #endif
4092
4093 DCHECK(!src.is(scratch));
4094 subi(sp, sp, Operand(kDoubleSize));
4095 #if V8_TARGET_ARCH_PPC64
4096 clrldi(scratch, src, Operand(32));
4097 std(scratch, MemOperand(sp, 0));
4098 #else
4099 li(scratch, Operand::Zero());
4100 stw(scratch, MemOperand(sp, Register::kExponentOffset));
4101 stw(src, MemOperand(sp, Register::kMantissaOffset));
4102 #endif
4103 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4104 lfd(dst, MemOperand(sp, 0));
4105 addi(sp, sp, Operand(kDoubleSize));
4106 }
4107
4108
4109 void MacroAssembler::MovInt64ToDouble(DoubleRegister dst,
4110 #if !V8_TARGET_ARCH_PPC64
4111 Register src_hi,
4112 #endif
4113 Register src) {
4114 #if V8_TARGET_ARCH_PPC64
4115 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4116 mtfprd(dst, src);
4117 return;
4118 }
4119 #endif
4120
4121 subi(sp, sp, Operand(kDoubleSize));
4122 #if V8_TARGET_ARCH_PPC64
4123 std(src, MemOperand(sp, 0));
4124 #else
4125 stw(src_hi, MemOperand(sp, Register::kExponentOffset));
4126 stw(src, MemOperand(sp, Register::kMantissaOffset));
4127 #endif
4128 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4129 lfd(dst, MemOperand(sp, 0));
4130 addi(sp, sp, Operand(kDoubleSize));
4131 }
4132
4133
4134 #if V8_TARGET_ARCH_PPC64
4135 void MacroAssembler::MovInt64ComponentsToDouble(DoubleRegister dst,
4136 Register src_hi,
4137 Register src_lo,
4138 Register scratch) {
4139 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4140 sldi(scratch, src_hi, Operand(32));
4141 rldimi(scratch, src_lo, 0, 32);
4142 mtfprd(dst, scratch);
4143 return;
4144 }
4145
4146 subi(sp, sp, Operand(kDoubleSize));
4147 stw(src_hi, MemOperand(sp, Register::kExponentOffset));
4148 stw(src_lo, MemOperand(sp, Register::kMantissaOffset));
4149 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4150 lfd(dst, MemOperand(sp));
4151 addi(sp, sp, Operand(kDoubleSize));
4152 }
4153 #endif
4154
4155
4156 void MacroAssembler::MovDoubleLowToInt(Register dst, DoubleRegister src) {
4157 #if V8_TARGET_ARCH_PPC64
4158 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4159 mffprwz(dst, src);
4160 return;
4161 }
4162 #endif
4163
4164 subi(sp, sp, Operand(kDoubleSize));
4165 stfd(src, MemOperand(sp));
4166 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4167 lwz(dst, MemOperand(sp, Register::kMantissaOffset));
4168 addi(sp, sp, Operand(kDoubleSize));
4169 }
4170
4171
4172 void MacroAssembler::MovDoubleHighToInt(Register dst, DoubleRegister src) {
4173 #if V8_TARGET_ARCH_PPC64
4174 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4175 mffprd(dst, src);
4176 srdi(dst, dst, Operand(32));
4177 return;
4178 }
4179 #endif
4180
4181 subi(sp, sp, Operand(kDoubleSize));
4182 stfd(src, MemOperand(sp));
4183 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4184 lwz(dst, MemOperand(sp, Register::kExponentOffset));
4185 addi(sp, sp, Operand(kDoubleSize));
4186 }
4187
4188
4189 void MacroAssembler::MovDoubleToInt64(
4190 #if !V8_TARGET_ARCH_PPC64
4191 Register dst_hi,
4192 #endif
4193 Register dst, DoubleRegister src) {
4194 #if V8_TARGET_ARCH_PPC64
4195 if (CpuFeatures::IsSupported(FPR_GPR_MOV)) {
4196 mffprd(dst, src);
4197 return;
4198 }
4199 #endif
4200
4201 subi(sp, sp, Operand(kDoubleSize));
4202 stfd(src, MemOperand(sp));
4203 nop(GROUP_ENDING_NOP); // LHS/RAW optimization
4204 #if V8_TARGET_ARCH_PPC64
4205 ld(dst, MemOperand(sp, 0));
4206 #else
4207 lwz(dst_hi, MemOperand(sp, Register::kExponentOffset));
4208 lwz(dst, MemOperand(sp, Register::kMantissaOffset));
4209 #endif
4210 addi(sp, sp, Operand(kDoubleSize));
4211 }
4212
4213
4214 void MacroAssembler::Add(Register dst, Register src, intptr_t value,
4215 Register scratch) {
4216 if (is_int16(value)) {
4217 addi(dst, src, Operand(value));
4218 } else {
4219 mov(scratch, Operand(value));
4220 add(dst, src, scratch);
4221 }
4222 }
4223
4224
4225 void MacroAssembler::Cmpi(Register src1, const Operand& src2, Register scratch,
4226 CRegister cr) {
4227 intptr_t value = src2.immediate();
4228 if (is_int16(value)) {
4229 cmpi(src1, src2, cr);
4230 } else {
4231 mov(scratch, src2);
4232 cmp(src1, scratch, cr);
4233 }
4234 }
4235
4236
4237 void MacroAssembler::Cmpli(Register src1, const Operand& src2, Register scratch,
4238 CRegister cr) {
4239 intptr_t value = src2.immediate();
4240 if (is_uint16(value)) {
4241 cmpli(src1, src2, cr);
4242 } else {
4243 mov(scratch, src2);
4244 cmpl(src1, scratch, cr);
4245 }
4246 }
4247
4248
4249 void MacroAssembler::Cmpwi(Register src1, const Operand& src2, Register scratch,
4250 CRegister cr) {
4251 intptr_t value = src2.immediate();
4252 if (is_int16(value)) {
4253 cmpwi(src1, src2, cr);
4254 } else {
4255 mov(scratch, src2);
4256 cmpw(src1, scratch, cr);
4257 }
4258 }
4259
4260
4261 void MacroAssembler::Cmplwi(Register src1, const Operand& src2,
4262 Register scratch, CRegister cr) {
4263 intptr_t value = src2.immediate();
4264 if (is_uint16(value)) {
4265 cmplwi(src1, src2, cr);
4266 } else {
4267 mov(scratch, src2);
4268 cmplw(src1, scratch, cr);
4269 }
4270 }
4271
4272
4273 void MacroAssembler::And(Register ra, Register rs, const Operand& rb,
4274 RCBit rc) {
4275 if (rb.is_reg()) {
4276 and_(ra, rs, rb.rm(), rc);
4277 } else {
4278 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == SetRC) {
4279 andi(ra, rs, rb);
4280 } else {
4281 // mov handles the relocation.
4282 DCHECK(!rs.is(r0));
4283 mov(r0, rb);
4284 and_(ra, rs, r0, rc);
4285 }
4286 }
4287 }
4288
4289
4290 void MacroAssembler::Or(Register ra, Register rs, const Operand& rb, RCBit rc) {
4291 if (rb.is_reg()) {
4292 orx(ra, rs, rb.rm(), rc);
4293 } else {
4294 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
4295 ori(ra, rs, rb);
4296 } else {
4297 // mov handles the relocation.
4298 DCHECK(!rs.is(r0));
4299 mov(r0, rb);
4300 orx(ra, rs, r0, rc);
4301 }
4302 }
4303 }
4304
4305
4306 void MacroAssembler::Xor(Register ra, Register rs, const Operand& rb,
4307 RCBit rc) {
4308 if (rb.is_reg()) {
4309 xor_(ra, rs, rb.rm(), rc);
4310 } else {
4311 if (is_uint16(rb.imm_) && RelocInfo::IsNone(rb.rmode_) && rc == LeaveRC) {
4312 xori(ra, rs, rb);
4313 } else {
4314 // mov handles the relocation.
4315 DCHECK(!rs.is(r0));
4316 mov(r0, rb);
4317 xor_(ra, rs, r0, rc);
4318 }
4319 }
4320 }
4321
4322
4323 void MacroAssembler::CmpSmiLiteral(Register src1, Smi* smi, Register scratch,
4324 CRegister cr) {
4325 #if V8_TARGET_ARCH_PPC64
4326 LoadSmiLiteral(scratch, smi);
4327 cmp(src1, scratch, cr);
4328 #else
4329 Cmpi(src1, Operand(smi), scratch, cr);
4330 #endif
4331 }
4332
4333
4334 void MacroAssembler::CmplSmiLiteral(Register src1, Smi* smi, Register scratch,
4335 CRegister cr) {
4336 #if V8_TARGET_ARCH_PPC64
4337 LoadSmiLiteral(scratch, smi);
4338 cmpl(src1, scratch, cr);
4339 #else
4340 Cmpli(src1, Operand(smi), scratch, cr);
4341 #endif
4342 }
4343
4344
4345 void MacroAssembler::AddSmiLiteral(Register dst, Register src, Smi* smi,
4346 Register scratch) {
4347 #if V8_TARGET_ARCH_PPC64
4348 LoadSmiLiteral(scratch, smi);
4349 add(dst, src, scratch);
4350 #else
4351 Add(dst, src, reinterpret_cast<intptr_t>(smi), scratch);
4352 #endif
4353 }
4354
4355
4356 void MacroAssembler::SubSmiLiteral(Register dst, Register src, Smi* smi,
4357 Register scratch) {
4358 #if V8_TARGET_ARCH_PPC64
4359 LoadSmiLiteral(scratch, smi);
4360 sub(dst, src, scratch);
4361 #else
4362 Add(dst, src, -(reinterpret_cast<intptr_t>(smi)), scratch);
4363 #endif
4364 }
4365
4366
4367 void MacroAssembler::AndSmiLiteral(Register dst, Register src, Smi* smi,
4368 Register scratch, RCBit rc) {
4369 #if V8_TARGET_ARCH_PPC64
4370 LoadSmiLiteral(scratch, smi);
4371 and_(dst, src, scratch, rc);
4372 #else
4373 And(dst, src, Operand(smi), rc);
4374 #endif
4375 }
4376
4377
4378 // Load a "pointer" sized value from the memory location
4379 void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
4380 Register scratch) {
4381 int offset = mem.offset();
4382
4383 if (!scratch.is(no_reg) && !is_int16(offset)) {
4384 /* cannot use d-form */
4385 LoadIntLiteral(scratch, offset);
4386 #if V8_TARGET_ARCH_PPC64
4387 ldx(dst, MemOperand(mem.ra(), scratch));
4388 #else
4389 lwzx(dst, MemOperand(mem.ra(), scratch));
4390 #endif
4391 } else {
4392 #if V8_TARGET_ARCH_PPC64
4393 int misaligned = (offset & 3);
4394 if (misaligned) {
4395 // adjust base to conform to offset alignment requirements
4396 // Todo: enhance to use scratch if dst is unsuitable
4397 DCHECK(!dst.is(r0));
4398 addi(dst, mem.ra(), Operand((offset & 3) - 4));
4399 ld(dst, MemOperand(dst, (offset & ~3) + 4));
4400 } else {
4401 ld(dst, mem);
4402 }
4403 #else
4404 lwz(dst, mem);
4405 #endif
4406 }
4407 }
4408
4409
4410 // Store a "pointer" sized value to the memory location
4411 void MacroAssembler::StoreP(Register src, const MemOperand& mem,
4412 Register scratch) {
4413 int offset = mem.offset();
4414
4415 if (!scratch.is(no_reg) && !is_int16(offset)) {
4416 /* cannot use d-form */
4417 LoadIntLiteral(scratch, offset);
4418 #if V8_TARGET_ARCH_PPC64
4419 stdx(src, MemOperand(mem.ra(), scratch));
4420 #else
4421 stwx(src, MemOperand(mem.ra(), scratch));
4422 #endif
4423 } else {
4424 #if V8_TARGET_ARCH_PPC64
4425 int misaligned = (offset & 3);
4426 if (misaligned) {
4427 // adjust base to conform to offset alignment requirements
4428 // a suitable scratch is required here
4429 DCHECK(!scratch.is(no_reg));
4430 if (scratch.is(r0)) {
4431 LoadIntLiteral(scratch, offset);
4432 stdx(src, MemOperand(mem.ra(), scratch));
4433 } else {
4434 addi(scratch, mem.ra(), Operand((offset & 3) - 4));
4435 std(src, MemOperand(scratch, (offset & ~3) + 4));
4436 }
4437 } else {
4438 std(src, mem);
4439 }
4440 #else
4441 stw(src, mem);
4442 #endif
4443 }
4444 }
4445
4446 void MacroAssembler::LoadWordArith(Register dst, const MemOperand& mem,
4447 Register scratch) {
4448 int offset = mem.offset();
4449
4450 if (!scratch.is(no_reg) && !is_int16(offset)) {
4451 /* cannot use d-form */
4452 LoadIntLiteral(scratch, offset);
4453 #if V8_TARGET_ARCH_PPC64
4454 // lwax(dst, MemOperand(mem.ra(), scratch));
4455 DCHECK(0); // lwax not yet implemented
4456 #else
4457 lwzx(dst, MemOperand(mem.ra(), scratch));
4458 #endif
4459 } else {
4460 #if V8_TARGET_ARCH_PPC64
4461 int misaligned = (offset & 3);
4462 if (misaligned) {
4463 // adjust base to conform to offset alignment requirements
4464 // Todo: enhance to use scratch if dst is unsuitable
4465 DCHECK(!dst.is(r0));
4466 addi(dst, mem.ra(), Operand((offset & 3) - 4));
4467 lwa(dst, MemOperand(dst, (offset & ~3) + 4));
4468 } else {
4469 lwa(dst, mem);
4470 }
4471 #else
4472 lwz(dst, mem);
4473 #endif
4474 }
4475 }
4476
4477
4478 // Variable length depending on whether offset fits into immediate field
4479 // MemOperand currently only supports d-form
4480 void MacroAssembler::LoadWord(Register dst, const MemOperand& mem,
4481 Register scratch, bool updateForm) {
4482 Register base = mem.ra();
4483 int offset = mem.offset();
4484
4485 bool use_dform = true;
4486 if (!is_int16(offset)) {
4487 use_dform = false;
4488 LoadIntLiteral(scratch, offset);
4489 }
4490
4491 if (!updateForm) {
4492 if (use_dform) {
4493 lwz(dst, mem);
4494 } else {
4495 lwzx(dst, MemOperand(base, scratch));
4496 }
4497 } else {
4498 if (use_dform) {
4499 lwzu(dst, mem);
4500 } else {
4501 lwzux(dst, MemOperand(base, scratch));
4502 }
4503 }
4504 }
4505
4506
4507 // Variable length depending on whether offset fits into immediate field
4508 // MemOperand current only supports d-form
4509 void MacroAssembler::StoreWord(Register src, const MemOperand& mem,
4510 Register scratch, bool updateForm) {
4511 Register base = mem.ra();
4512 int offset = mem.offset();
4513
4514 bool use_dform = true;
4515 if (!is_int16(offset)) {
4516 use_dform = false;
4517 LoadIntLiteral(scratch, offset);
4518 }
4519
4520 if (!updateForm) {
4521 if (use_dform) {
4522 stw(src, mem);
4523 } else {
4524 stwx(src, MemOperand(base, scratch));
4525 }
4526 } else {
4527 if (use_dform) {
4528 stwu(src, mem);
4529 } else {
4530 stwux(src, MemOperand(base, scratch));
4531 }
4532 }
4533 }
4534
4535
4536 // Variable length depending on whether offset fits into immediate field
4537 // MemOperand currently only supports d-form
4538 void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
4539 Register scratch, bool updateForm) {
4540 Register base = mem.ra();
4541 int offset = mem.offset();
4542
4543 bool use_dform = true;
4544 if (!is_int16(offset)) {
4545 use_dform = false;
4546 LoadIntLiteral(scratch, offset);
4547 }
4548
4549 if (!updateForm) {
4550 if (use_dform) {
4551 lhz(dst, mem);
4552 } else {
4553 lhzx(dst, MemOperand(base, scratch));
4554 }
4555 } else {
4556 // If updateForm is ever true, then lhzu will
4557 // need to be implemented
4558 assert(0);
4559 #if 0 // LoadHalfWord w\ update not yet needed
4560 if (use_dform) {
4561 lhzu(dst, mem);
4562 } else {
4563 lhzux(dst, MemOperand(base, scratch));
4564 }
4565 #endif
4566 }
4567 }
4568
4569
4570 // Variable length depending on whether offset fits into immediate field
4571 // MemOperand current only supports d-form
4572 void MacroAssembler::StoreHalfWord(Register src, const MemOperand& mem,
4573 Register scratch, bool updateForm) {
4574 Register base = mem.ra();
4575 int offset = mem.offset();
4576
4577 bool use_dform = true;
4578 if (!is_int16(offset)) {
4579 use_dform = false;
4580 LoadIntLiteral(scratch, offset);
4581 }
4582
4583 if (!updateForm) {
4584 if (use_dform) {
4585 sth(src, mem);
4586 } else {
4587 sthx(src, MemOperand(base, scratch));
4588 }
4589 } else {
4590 // If updateForm is ever true, then sthu will
4591 // need to be implemented
4592 assert(0);
4593 #if 0 // StoreHalfWord w\ update not yet needed
4594 if (use_dform) {
4595 sthu(src, mem);
4596 } else {
4597 sthux(src, MemOperand(base, scratch));
4598 }
4599 #endif
4600 }
4601 }
4602
4603
4604 // Variable length depending on whether offset fits into immediate field
4605 // MemOperand currently only supports d-form
4606 void MacroAssembler::LoadByte(Register dst, const MemOperand& mem,
4607 Register scratch, bool updateForm) {
4608 Register base = mem.ra();
4609 int offset = mem.offset();
4610
4611 bool use_dform = true;
4612 if (!is_int16(offset)) {
4613 use_dform = false;
4614 LoadIntLiteral(scratch, offset);
4615 }
4616
4617 if (!updateForm) {
4618 if (use_dform) {
4619 lbz(dst, mem);
4620 } else {
4621 lbzx(dst, MemOperand(base, scratch));
4622 }
4623 } else {
4624 // If updateForm is ever true, then lbzu will
4625 // need to be implemented
4626 assert(0);
4627 #if 0 // LoadByte w\ update not yet needed
4628 if (use_dform) {
4629 lbzu(dst, mem);
4630 } else {
4631 lbzux(dst, MemOperand(base, scratch));
4632 }
4633 #endif
4634 }
4635 }
4636
4637
4638 // Variable length depending on whether offset fits into immediate field
4639 // MemOperand current only supports d-form
4640 void MacroAssembler::StoreByte(Register src, const MemOperand& mem,
4641 Register scratch, bool updateForm) {
4642 Register base = mem.ra();
4643 int offset = mem.offset();
4644
4645 bool use_dform = true;
4646 if (!is_int16(offset)) {
4647 use_dform = false;
4648 LoadIntLiteral(scratch, offset);
4649 }
4650
4651 if (!updateForm) {
4652 if (use_dform) {
4653 stb(src, mem);
4654 } else {
4655 stbx(src, MemOperand(base, scratch));
4656 }
4657 } else {
4658 // If updateForm is ever true, then stbu will
4659 // need to be implemented
4660 assert(0);
4661 #if 0 // StoreByte w\ update not yet needed
4662 if (use_dform) {
4663 stbu(src, mem);
4664 } else {
4665 stbux(src, MemOperand(base, scratch));
4666 }
4667 #endif
4668 }
4669 }
4670
4671
4672 void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
4673 Representation r, Register scratch) {
4674 DCHECK(!r.IsDouble());
4675 if (r.IsInteger8()) {
4676 LoadByte(dst, mem, scratch);
4677 extsb(dst, dst);
4678 } else if (r.IsUInteger8()) {
4679 LoadByte(dst, mem, scratch);
4680 } else if (r.IsInteger16()) {
4681 LoadHalfWord(dst, mem, scratch);
4682 extsh(dst, dst);
4683 } else if (r.IsUInteger16()) {
4684 LoadHalfWord(dst, mem, scratch);
4685 #if V8_TARGET_ARCH_PPC64
4686 } else if (r.IsInteger32()) {
4687 LoadWord(dst, mem, scratch);
4688 #endif
4689 } else {
4690 LoadP(dst, mem, scratch);
4691 }
4692 }
4693
4694
4695 void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
4696 Representation r, Register scratch) {
4697 DCHECK(!r.IsDouble());
4698 if (r.IsInteger8() || r.IsUInteger8()) {
4699 StoreByte(src, mem, scratch);
4700 } else if (r.IsInteger16() || r.IsUInteger16()) {
4701 StoreHalfWord(src, mem, scratch);
4702 #if V8_TARGET_ARCH_PPC64
4703 } else if (r.IsInteger32()) {
4704 StoreWord(src, mem, scratch);
4705 #endif
4706 } else {
4707 if (r.IsHeapObject()) {
4708 AssertNotSmi(src);
4709 } else if (r.IsSmi()) {
4710 AssertSmi(src);
4711 }
4712 StoreP(src, mem, scratch);
4713 }
4714 }
4715
4716
4717 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
4718 Register scratch_reg,
4719 Label* no_memento_found) {
4720 ExternalReference new_space_start =
4721 ExternalReference::new_space_start(isolate());
4722 ExternalReference new_space_allocation_top =
4723 ExternalReference::new_space_allocation_top_address(isolate());
4724 addi(scratch_reg, receiver_reg,
4725 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
4726 Cmpi(scratch_reg, Operand(new_space_start), r0);
4727 blt(no_memento_found);
4728 mov(ip, Operand(new_space_allocation_top));
4729 LoadP(ip, MemOperand(ip));
4730 cmp(scratch_reg, ip);
4731 bgt(no_memento_found);
4732 LoadP(scratch_reg, MemOperand(scratch_reg, -AllocationMemento::kSize));
4733 Cmpi(scratch_reg, Operand(isolate()->factory()->allocation_memento_map()),
4734 r0);
4735 }
4736
4737
4738 Register GetRegisterThatIsNotOneOf(Register reg1, Register reg2, Register reg3,
4739 Register reg4, Register reg5,
4740 Register reg6) {
4741 RegList regs = 0;
4742 if (reg1.is_valid()) regs |= reg1.bit();
4743 if (reg2.is_valid()) regs |= reg2.bit();
4744 if (reg3.is_valid()) regs |= reg3.bit();
4745 if (reg4.is_valid()) regs |= reg4.bit();
4746 if (reg5.is_valid()) regs |= reg5.bit();
4747 if (reg6.is_valid()) regs |= reg6.bit();
4748
4749 for (int i = 0; i < Register::NumAllocatableRegisters(); i++) {
4750 Register candidate = Register::FromAllocationIndex(i);
4751 if (regs & candidate.bit()) continue;
4752 return candidate;
4753 }
4754 UNREACHABLE();
4755 return no_reg;
4756 }
4757
4758
4759 void MacroAssembler::JumpIfDictionaryInPrototypeChain(Register object,
4760 Register scratch0,
4761 Register scratch1,
4762 Label* found) {
4763 DCHECK(!scratch1.is(scratch0));
4764 Factory* factory = isolate()->factory();
4765 Register current = scratch0;
4766 Label loop_again;
4767
4768 // scratch contained elements pointer.
4769 mr(current, object);
4770
4771 // Loop based on the map going up the prototype chain.
4772 bind(&loop_again);
4773 LoadP(current, FieldMemOperand(current, HeapObject::kMapOffset));
4774 lbz(scratch1, FieldMemOperand(current, Map::kBitField2Offset));
4775 DecodeField<Map::ElementsKindBits>(scratch1);
4776 cmpi(scratch1, Operand(DICTIONARY_ELEMENTS));
4777 beq(found);
4778 LoadP(current, FieldMemOperand(current, Map::kPrototypeOffset));
4779 Cmpi(current, Operand(factory->null_value()), r0);
4780 bne(&loop_again);
4781 }
4782
4783
4784 #ifdef DEBUG
4785 bool AreAliased(Register reg1, Register reg2, Register reg3, Register reg4,
4786 Register reg5, Register reg6, Register reg7, Register reg8) {
4787 int n_of_valid_regs = reg1.is_valid() + reg2.is_valid() + reg3.is_valid() +
4788 reg4.is_valid() + reg5.is_valid() + reg6.is_valid() +
4789 reg7.is_valid() + reg8.is_valid();
4790
4791 RegList regs = 0;
4792 if (reg1.is_valid()) regs |= reg1.bit();
4793 if (reg2.is_valid()) regs |= reg2.bit();
4794 if (reg3.is_valid()) regs |= reg3.bit();
4795 if (reg4.is_valid()) regs |= reg4.bit();
4796 if (reg5.is_valid()) regs |= reg5.bit();
4797 if (reg6.is_valid()) regs |= reg6.bit();
4798 if (reg7.is_valid()) regs |= reg7.bit();
4799 if (reg8.is_valid()) regs |= reg8.bit();
4800 int n_of_non_aliasing_regs = NumRegs(regs);
4801
4802 return n_of_valid_regs != n_of_non_aliasing_regs;
4803 }
4804 #endif
4805
4806
4807 CodePatcher::CodePatcher(byte* address, int instructions,
4808 FlushICache flush_cache)
4809 : address_(address),
4810 size_(instructions * Assembler::kInstrSize),
4811 masm_(NULL, address, size_ + Assembler::kGap),
4812 flush_cache_(flush_cache) {
4813 // Create a new macro assembler pointing to the address of the code to patch.
4814 // The size is adjusted with kGap on order for the assembler to generate size
4815 // bytes of instructions without failing with buffer size constraints.
4816 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4817 }
4818
4819
4820 CodePatcher::~CodePatcher() {
4821 // Indicate that code has changed.
4822 if (flush_cache_ == FLUSH) {
4823 CpuFeatures::FlushICache(address_, size_);
4824 }
4825
4826 // Check that the code was patched as expected.
4827 DCHECK(masm_.pc_ == address_ + size_);
4828 DCHECK(masm_.reloc_info_writer.pos() == address_ + size_ + Assembler::kGap);
4829 }
4830
4831
4832 void CodePatcher::Emit(Instr instr) { masm()->emit(instr); }
4833
4834
4835 void CodePatcher::EmitCondition(Condition cond) {
4836 Instr instr = Assembler::instr_at(masm_.pc_);
4837 switch (cond) {
4838 case eq:
4839 instr = (instr & ~kCondMask) | BT;
4840 break;
4841 case ne:
4842 instr = (instr & ~kCondMask) | BF;
4843 break;
4844 default:
4845 UNIMPLEMENTED();
4846 }
4847 masm_.emit(instr);
4848 }
4849
4850
4851 void MacroAssembler::TruncatingDiv(Register result, Register dividend,
4852 int32_t divisor) {
4853 DCHECK(!dividend.is(result));
4854 DCHECK(!dividend.is(r0));
4855 DCHECK(!result.is(r0));
4856 base::MagicNumbersForDivision<uint32_t> mag =
4857 base::SignedDivisionByConstant(static_cast<uint32_t>(divisor));
4858 mov(r0, Operand(mag.multiplier));
4859 mulhw(result, dividend, r0);
4860 bool neg = (mag.multiplier & (static_cast<uint32_t>(1) << 31)) != 0;
4861 if (divisor > 0 && neg) {
4862 add(result, result, dividend);
4863 }
4864 if (divisor < 0 && !neg && mag.multiplier > 0) {
4865 sub(result, result, dividend);
4866 }
4867 if (mag.shift > 0) srawi(result, result, mag.shift);
4868 ExtractBit(r0, dividend, 31);
4869 add(result, result, r0);
4870 }
4871 }
4872 } // namespace v8::internal
4873
4874 #endif // V8_TARGET_ARCH_PPC
OLDNEW

Powered by Google App Engine
This is Rietveld 408576698