Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(438)

Side by Side Diff: src/ppc/codegen-ppc.cc

Issue 422063005: Contribution of PowerPC port. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 4 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 //
3 // Copyright IBM Corp. 2012, 2013. All rights reserved.
4 //
5 // Use of this source code is governed by a BSD-style license that can be
6 // found in the LICENSE file.
7
8 #include "src/v8.h"
9
10 #if V8_TARGET_ARCH_PPC
11
12 #include "src/codegen.h"
13 #include "src/macro-assembler.h"
14 #include "src/ppc/simulator-ppc.h"
15
16 namespace v8 {
17 namespace internal {
18
19
20 #define __ masm.
21
22
23 #if defined(USE_SIMULATOR)
24 byte* fast_exp_ppc_machine_code = NULL;
25 double fast_exp_simulator(double x) {
26 return Simulator::current(Isolate::Current())->CallFPReturnsDouble(
27 fast_exp_ppc_machine_code, x, 0);
28 }
29 #endif
30
31
32 UnaryMathFunction CreateExpFunction() {
33 if (!FLAG_fast_math) return &std::exp;
34 size_t actual_size;
35 byte* buffer =
36 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
37 if (buffer == NULL) return &std::exp;
38 ExternalReference::InitializeMathExpData();
39
40 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
41
42 {
43 DoubleRegister input = d1;
44 DoubleRegister result = d2;
45 DoubleRegister double_scratch1 = d3;
46 DoubleRegister double_scratch2 = d4;
47 Register temp1 = r7;
48 Register temp2 = r8;
49 Register temp3 = r9;
50
51 // Called from C
52 #if ABI_USES_FUNCTION_DESCRIPTORS
53 __ function_descriptor();
54 #endif
55
56 __ Push(temp3, temp2, temp1);
57 MathExpGenerator::EmitMathExp(
58 &masm, input, result, double_scratch1, double_scratch2,
59 temp1, temp2, temp3);
60 __ Pop(temp3, temp2, temp1);
61 __ fmr(d1, result);
62 __ Ret();
63 }
64
65 CodeDesc desc;
66 masm.GetCode(&desc);
67 #if !ABI_USES_FUNCTION_DESCRIPTORS
68 ASSERT(!RelocInfo::RequiresRelocation(desc));
69 #endif
70
71 CpuFeatures::FlushICache(buffer, actual_size);
72 base::OS::ProtectCode(buffer, actual_size);
73
74 #if !defined(USE_SIMULATOR)
75 return FUNCTION_CAST<UnaryMathFunction>(buffer);
76 #else
77 fast_exp_ppc_machine_code = buffer;
78 return &fast_exp_simulator;
79 #endif
80 }
81
82
83 UnaryMathFunction CreateSqrtFunction() {
84 #if defined(USE_SIMULATOR)
85 return &std::sqrt;
86 #else
87 size_t actual_size;
88 byte* buffer =
89 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
90 if (buffer == NULL) return &std::sqrt;
91
92 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
93
94 // Called from C
95 #if ABI_USES_FUNCTION_DESCRIPTORS
96 __ function_descriptor();
97 #endif
98
99 __ MovFromFloatParameter(d1);
100 __ fsqrt(d1, d1);
101 __ MovToFloatResult(d1);
102 __ Ret();
103
104 CodeDesc desc;
105 masm.GetCode(&desc);
106 #if !ABI_USES_FUNCTION_DESCRIPTORS
107 ASSERT(!RelocInfo::RequiresRelocation(desc));
108 #endif
109
110 CpuFeatures::FlushICache(buffer, actual_size);
111 base::OS::ProtectCode(buffer, actual_size);
112 return FUNCTION_CAST<UnaryMathFunction>(buffer);
113 #endif
114 }
115
116 #undef __
117
118
119 // -------------------------------------------------------------------------
120 // Platform-specific RuntimeCallHelper functions.
121
122 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
123 masm->EnterFrame(StackFrame::INTERNAL);
124 ASSERT(!masm->has_frame());
125 masm->set_has_frame(true);
126 }
127
128
129 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
130 masm->LeaveFrame(StackFrame::INTERNAL);
131 ASSERT(masm->has_frame());
132 masm->set_has_frame(false);
133 }
134
135
136 // -------------------------------------------------------------------------
137 // Code generators
138
139 #define __ ACCESS_MASM(masm)
140
141 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
142 MacroAssembler* masm,
143 Register receiver,
144 Register key,
145 Register value,
146 Register target_map,
147 AllocationSiteMode mode,
148 Label* allocation_memento_found) {
149 Register scratch_elements = r7;
150 ASSERT(!AreAliased(receiver, key, value, target_map,
151 scratch_elements));
152
153 if (mode == TRACK_ALLOCATION_SITE) {
154 ASSERT(allocation_memento_found != NULL);
155 __ JumpIfJSArrayHasAllocationMemento(
156 receiver, scratch_elements, allocation_memento_found);
157 }
158
159 // Set transitioned map.
160 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
161 __ RecordWriteField(receiver,
162 HeapObject::kMapOffset,
163 target_map,
164 r11,
165 kLRHasNotBeenSaved,
166 kDontSaveFPRegs,
167 EMIT_REMEMBERED_SET,
168 OMIT_SMI_CHECK);
169 }
170
171
172 void ElementsTransitionGenerator::GenerateSmiToDouble(
173 MacroAssembler* masm,
174 Register receiver,
175 Register key,
176 Register value,
177 Register target_map,
178 AllocationSiteMode mode,
179 Label* fail) {
180 // lr contains the return address
181 Label loop, entry, convert_hole, gc_required, only_change_map, done;
182 Register elements = r7;
183 Register length = r8;
184 Register array = r9;
185 Register array_end = array;
186
187 // target_map parameter can be clobbered.
188 Register scratch1 = target_map;
189 Register scratch2 = r11;
190
191 // Verify input registers don't conflict with locals.
192 ASSERT(!AreAliased(receiver, key, value, target_map,
193 elements, length, array, scratch2));
194
195 if (mode == TRACK_ALLOCATION_SITE) {
196 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
197 }
198
199 // Check for empty arrays, which only require a map transition and no changes
200 // to the backing store.
201 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
202 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
203 __ beq(&only_change_map);
204
205 // Preserve lr and use r17 as a temporary register.
206 __ mflr(r0);
207 __ Push(r0);
208
209 __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
210 // length: number of elements (smi-tagged)
211
212 // Allocate new FixedDoubleArray.
213 __ SmiToDoubleArrayOffset(r17, length);
214 __ addi(r17, r17, Operand(FixedDoubleArray::kHeaderSize));
215 __ Allocate(r17, array, r10, scratch2, &gc_required, DOUBLE_ALIGNMENT);
216
217 // Set destination FixedDoubleArray's length and map.
218 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
219 __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
220 // Update receiver's map.
221 __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
222
223 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
224 __ RecordWriteField(receiver,
225 HeapObject::kMapOffset,
226 target_map,
227 scratch2,
228 kLRHasBeenSaved,
229 kDontSaveFPRegs,
230 OMIT_REMEMBERED_SET,
231 OMIT_SMI_CHECK);
232 // Replace receiver's backing store with newly created FixedDoubleArray.
233 __ addi(scratch1, array, Operand(kHeapObjectTag));
234 __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
235 __ RecordWriteField(receiver,
236 JSObject::kElementsOffset,
237 scratch1,
238 scratch2,
239 kLRHasBeenSaved,
240 kDontSaveFPRegs,
241 EMIT_REMEMBERED_SET,
242 OMIT_SMI_CHECK);
243
244 // Prepare for conversion loop.
245 __ addi(target_map, elements,
246 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
247 __ addi(r10, array, Operand(FixedDoubleArray::kHeaderSize));
248 __ SmiToDoubleArrayOffset(array, length);
249 __ add(array_end, r10, array);
250 // Repurpose registers no longer in use.
251 #if V8_TARGET_ARCH_PPC64
252 Register hole_int64 = elements;
253 #else
254 Register hole_lower = elements;
255 Register hole_upper = length;
256 #endif
257 // scratch1: begin of source FixedArray element fields, not tagged
258 // hole_lower: kHoleNanLower32 OR hol_int64
259 // hole_upper: kHoleNanUpper32
260 // array_end: end of destination FixedDoubleArray, not tagged
261 // scratch2: begin of FixedDoubleArray element fields, not tagged
262
263 __ b(&entry);
264
265 __ bind(&only_change_map);
266 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
267 __ RecordWriteField(receiver,
268 HeapObject::kMapOffset,
269 target_map,
270 scratch2,
271 kLRHasNotBeenSaved,
272 kDontSaveFPRegs,
273 OMIT_REMEMBERED_SET,
274 OMIT_SMI_CHECK);
275 __ b(&done);
276
277 // Call into runtime if GC is required.
278 __ bind(&gc_required);
279 __ Pop(r0);
280 __ mtlr(r0);
281 __ b(fail);
282
283 // Convert and copy elements.
284 __ bind(&loop);
285 __ LoadP(r11, MemOperand(scratch1));
286 __ addi(scratch1, scratch1, Operand(kPointerSize));
287 // r11: current element
288 __ UntagAndJumpIfNotSmi(r11, r11, &convert_hole);
289
290 // Normal smi, convert to double and store.
291 __ ConvertIntToDouble(r11, d0);
292 __ stfd(d0, MemOperand(scratch2, 0));
293 __ addi(r10, r10, Operand(8));
294
295 __ b(&entry);
296
297 // Hole found, store the-hole NaN.
298 __ bind(&convert_hole);
299 if (FLAG_debug_code) {
300 // Restore a "smi-untagged" heap object.
301 __ LoadP(r11, MemOperand(r6, -kPointerSize));
302 __ CompareRoot(r11, Heap::kTheHoleValueRootIndex);
303 __ Assert(eq, kObjectFoundInSmiOnlyArray);
304 }
305 #if V8_TARGET_ARCH_PPC64
306 __ std(hole_int64, MemOperand(r10, 0));
307 #else
308 __ stw(hole_upper, MemOperand(r10, Register::kExponentOffset));
309 __ stw(hole_lower, MemOperand(r10, Register::kMantissaOffset));
310 #endif
311 __ addi(r10, r10, Operand(8));
312
313 __ bind(&entry);
314 __ cmp(r10, array_end);
315 __ blt(&loop);
316
317 __ Pop(r0);
318 __ mtlr(r0);
319 __ bind(&done);
320 }
321
322
323 void ElementsTransitionGenerator::GenerateDoubleToObject(
324 MacroAssembler* masm,
325 Register receiver,
326 Register key,
327 Register value,
328 Register target_map,
329 AllocationSiteMode mode,
330 Label* fail) {
331 // Register lr contains the return address.
332 Label entry, loop, convert_hole, gc_required, only_change_map;
333 Register elements = r7;
334 Register array = r9;
335 Register length = r8;
336 Register scratch = r11;
337
338 // Verify input registers don't conflict with locals.
339 ASSERT(!AreAliased(receiver, key, value, target_map,
340 elements, array, length, scratch));
341
342 if (mode == TRACK_ALLOCATION_SITE) {
343 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
344 }
345
346 // Check for empty arrays, which only require a map transition and no changes
347 // to the backing store.
348 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
349 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
350 __ beq(&only_change_map);
351
352 __ Push(target_map, receiver, key, value);
353 __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
354 // elements: source FixedDoubleArray
355 // length: number of elements (smi-tagged)
356
357 // Allocate new FixedArray.
358 // Re-use value and target_map registers, as they have been saved on the
359 // stack.
360 Register array_size = value;
361 Register allocate_scratch = target_map;
362 __ li(array_size, Operand(FixedDoubleArray::kHeaderSize));
363 __ SmiToPtrArrayOffset(r0, length);
364 __ add(array_size, array_size, r0);
365 __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
366 NO_ALLOCATION_FLAGS);
367 // array: destination FixedArray, not tagged as heap object
368 // Set destination FixedDoubleArray's length and map.
369 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
370 __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
371 __ StoreP(scratch, MemOperand(array, HeapObject::kMapOffset));
372
373 // Prepare for conversion loop.
374 Register src_elements = elements;
375 Register dst_elements = target_map;
376 Register dst_end = length;
377 Register heap_number_map = scratch;
378 __ addi(src_elements, elements,
379 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
380 __ addi(dst_elements, array, Operand(FixedArray::kHeaderSize));
381 __ addi(array, array, Operand(kHeapObjectTag));
382 __ SmiToPtrArrayOffset(length, length);
383 __ add(dst_end, dst_elements, length);
384 __ LoadRoot(r10, Heap::kTheHoleValueRootIndex);
385 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
386 // Using offsetted addresses in src_elements to fully take advantage of
387 // post-indexing.
388 // dst_elements: begin of destination FixedArray element fields, not tagged
389 // src_elements: begin of source FixedDoubleArray element fields,
390 // not tagged, +4
391 // dst_end: end of destination FixedArray, not tagged
392 // array: destination FixedArray
393 // r10: the-hole pointer
394 // heap_number_map: heap number map
395 __ b(&entry);
396
397 // Call into runtime if GC is required.
398 __ bind(&gc_required);
399 __ Pop(target_map, receiver, key, value);
400 __ b(fail);
401
402 __ bind(&loop);
403 Register upper_bits = key;
404 __ lwz(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
405 __ addi(src_elements, src_elements, Operand(kDoubleSize));
406 // upper_bits: current element's upper 32 bit
407 // src_elements: address of next element's upper 32 bit
408 __ Cmpi(upper_bits, Operand(kHoleNanUpper32), r0);
409 __ beq(&convert_hole);
410
411 // Non-hole double, copy value into a heap number.
412 Register heap_number = receiver;
413 Register scratch2 = value;
414 __ AllocateHeapNumber(heap_number, scratch2, r11, heap_number_map,
415 &gc_required);
416 // heap_number: new heap number
417 #if V8_TARGET_ARCH_PPC64
418 __ ld(scratch2, MemOperand(src_elements, -kDoubleSize));
419 __ addi(upper_bits, heap_number, Operand(-1)); // subtract tag for std
420 __ std(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
421 #else
422 __ lwz(scratch2, MemOperand(src_elements,
423 Register::kMantissaOffset - kDoubleSize));
424 __ lwz(upper_bits, MemOperand(src_elements,
425 Register::kExponentOffset - kDoubleSize));
426 __ stw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
427 __ stw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
428 #endif
429 __ mr(scratch2, dst_elements);
430 __ StoreP(heap_number, MemOperand(dst_elements));
431 __ addi(dst_elements, dst_elements, Operand(kPointerSize));
432 __ RecordWrite(array,
433 scratch2,
434 heap_number,
435 kLRHasNotBeenSaved,
436 kDontSaveFPRegs,
437 EMIT_REMEMBERED_SET,
438 OMIT_SMI_CHECK);
439 __ b(&entry);
440
441 // Replace the-hole NaN with the-hole pointer.
442 __ bind(&convert_hole);
443 __ StoreP(r10, MemOperand(dst_elements));
444 __ addi(dst_elements, dst_elements, Operand(kPointerSize));
445
446 __ bind(&entry);
447 __ cmpl(dst_elements, dst_end);
448 __ blt(&loop);
449
450 __ Pop(target_map, receiver, key, value);
451 // Replace receiver's backing store with newly created and filled FixedArray.
452 __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
453 __ RecordWriteField(receiver,
454 JSObject::kElementsOffset,
455 array,
456 scratch,
457 kLRHasNotBeenSaved,
458 kDontSaveFPRegs,
459 EMIT_REMEMBERED_SET,
460 OMIT_SMI_CHECK);
461
462 __ bind(&only_change_map);
463 // Update receiver's map.
464 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
465 __ RecordWriteField(receiver,
466 HeapObject::kMapOffset,
467 target_map,
468 scratch,
469 kLRHasNotBeenSaved,
470 kDontSaveFPRegs,
471 OMIT_REMEMBERED_SET,
472 OMIT_SMI_CHECK);
473 }
474
475
476 // assume ip can be used as a scratch register below
477 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
478 Register string,
479 Register index,
480 Register result,
481 Label* call_runtime) {
482 // Fetch the instance type of the receiver into result register.
483 __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
484 __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
485
486 // We need special handling for indirect strings.
487 Label check_sequential;
488 __ andi(r0, result, Operand(kIsIndirectStringMask));
489 __ beq(&check_sequential, cr0);
490
491 // Dispatch on the indirect string shape: slice or cons.
492 Label cons_string;
493 __ mov(ip, Operand(kSlicedNotConsMask));
494 __ and_(r0, result, ip, SetRC);
495 __ beq(&cons_string, cr0);
496
497 // Handle slices.
498 Label indirect_string_loaded;
499 __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
500 __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
501 __ SmiUntag(ip, result);
502 __ add(index, index, ip);
503 __ b(&indirect_string_loaded);
504
505 // Handle cons strings.
506 // Check whether the right hand side is the empty string (i.e. if
507 // this is really a flat string in a cons string). If that is not
508 // the case we would rather go to the runtime system now to flatten
509 // the string.
510 __ bind(&cons_string);
511 __ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset));
512 __ CompareRoot(result, Heap::kempty_stringRootIndex);
513 __ bne(call_runtime);
514 // Get the first of the two strings and load its instance type.
515 __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
516
517 __ bind(&indirect_string_loaded);
518 __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
519 __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
520
521 // Distinguish sequential and external strings. Only these two string
522 // representations can reach here (slices and flat cons strings have been
523 // reduced to the underlying sequential or external string).
524 Label external_string, check_encoding;
525 __ bind(&check_sequential);
526 STATIC_ASSERT(kSeqStringTag == 0);
527 __ andi(r0, result, Operand(kStringRepresentationMask));
528 __ bne(&external_string, cr0);
529
530 // Prepare sequential strings
531 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
532 __ addi(string,
533 string,
534 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
535 __ b(&check_encoding);
536
537 // Handle external strings.
538 __ bind(&external_string);
539 if (FLAG_debug_code) {
540 // Assert that we do not have a cons or slice (indirect strings) here.
541 // Sequential strings have already been ruled out.
542 __ andi(r0, result, Operand(kIsIndirectStringMask));
543 __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
544 }
545 // Rule out short external strings.
546 STATIC_ASSERT(kShortExternalStringTag != 0);
547 __ andi(r0, result, Operand(kShortExternalStringMask));
548 __ bne(call_runtime, cr0);
549 __ LoadP(string,
550 FieldMemOperand(string, ExternalString::kResourceDataOffset));
551
552 Label ascii, done;
553 __ bind(&check_encoding);
554 STATIC_ASSERT(kTwoByteStringTag == 0);
555 __ andi(r0, result, Operand(kStringEncodingMask));
556 __ bne(&ascii, cr0);
557 // Two-byte string.
558 __ ShiftLeftImm(result, index, Operand(1));
559 __ lhzx(result, MemOperand(string, result));
560 __ b(&done);
561 __ bind(&ascii);
562 // Ascii string.
563 __ lbzx(result, MemOperand(string, index));
564 __ bind(&done);
565 }
566
567
568 static MemOperand ExpConstant(int index, Register base) {
569 return MemOperand(base, index * kDoubleSize);
570 }
571
572
573 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
574 DoubleRegister input,
575 DoubleRegister result,
576 DoubleRegister double_scratch1,
577 DoubleRegister double_scratch2,
578 Register temp1,
579 Register temp2,
580 Register temp3) {
581 ASSERT(!input.is(result));
582 ASSERT(!input.is(double_scratch1));
583 ASSERT(!input.is(double_scratch2));
584 ASSERT(!result.is(double_scratch1));
585 ASSERT(!result.is(double_scratch2));
586 ASSERT(!double_scratch1.is(double_scratch2));
587 ASSERT(!temp1.is(temp2));
588 ASSERT(!temp1.is(temp3));
589 ASSERT(!temp2.is(temp3));
590 ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
591
592 Label zero, infinity, done;
593
594 __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
595
596 __ lfd(double_scratch1, ExpConstant(0, temp3));
597 __ fcmpu(double_scratch1, input);
598 __ fmr(result, input);
599 __ bunordered(&done);
600 __ bge(&zero);
601
602 __ lfd(double_scratch2, ExpConstant(1, temp3));
603 __ fcmpu(input, double_scratch2);
604 __ bge(&infinity);
605
606 __ lfd(double_scratch1, ExpConstant(3, temp3));
607 __ lfd(result, ExpConstant(4, temp3));
608 __ fmul(double_scratch1, double_scratch1, input);
609 __ fadd(double_scratch1, double_scratch1, result);
610
611 // Move low word of double_scratch1 to temp2
612 __ subi(sp, sp, Operand(kDoubleSize));
613 __ stfd(double_scratch1, MemOperand(sp));
614 __ nop(); // LHS/RAW optimization
615 __ lwz(temp2, MemOperand(sp, Register::kMantissaOffset));
616
617 __ fsub(double_scratch1, double_scratch1, result);
618 __ lfd(result, ExpConstant(6, temp3));
619 __ lfd(double_scratch2, ExpConstant(5, temp3));
620 __ fmul(double_scratch1, double_scratch1, double_scratch2);
621 __ fsub(double_scratch1, double_scratch1, input);
622 __ fsub(result, result, double_scratch1);
623 __ fmul(double_scratch2, double_scratch1, double_scratch1);
624 __ fmul(result, result, double_scratch2);
625 __ lfd(double_scratch2, ExpConstant(7, temp3));
626 __ fmul(result, result, double_scratch2);
627 __ fsub(result, result, double_scratch1);
628 __ lfd(double_scratch2, ExpConstant(8, temp3));
629 __ fadd(result, result, double_scratch2);
630 __ srwi(temp1, temp2, Operand(11));
631 __ andi(temp2, temp2, Operand(0x7ff));
632 __ addi(temp1, temp1, Operand(0x3ff));
633
634 // Must not call ExpConstant() after overwriting temp3!
635 __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
636 __ slwi(temp2, temp2, Operand(3));
637 #if V8_TARGET_ARCH_PPC64
638 __ ldx(temp2, MemOperand(temp3, temp2));
639 __ sldi(temp1, temp1, Operand(52));
640 __ orx(temp2, temp1, temp2);
641 __ std(temp2, MemOperand(sp, 0));
642 #else
643 __ add(ip, temp3, temp2);
644 __ lwz(temp3, MemOperand(ip, Register::kExponentOffset));
645 __ lwz(temp2, MemOperand(ip, Register::kMantissaOffset));
646 __ slwi(temp1, temp1, Operand(20));
647 __ orx(temp3, temp1, temp3);
648 __ stw(temp3, MemOperand(sp, Register::kExponentOffset));
649 __ stw(temp2, MemOperand(sp, Register::kMantissaOffset));
650 #endif
651 __ nop(); // LHS/RAW optimization
652 __ lfd(double_scratch1, MemOperand(sp, 0));
653 __ addi(sp, sp, Operand(kDoubleSize));
654
655 __ fmul(result, result, double_scratch1);
656 __ b(&done);
657
658 __ bind(&zero);
659 __ fmr(result, kDoubleRegZero);
660 __ b(&done);
661
662 __ bind(&infinity);
663 __ lfd(result, ExpConstant(2, temp3));
664
665 __ bind(&done);
666 }
667
668 #undef __
669
670 #ifdef DEBUG
671 // mflr ip
672 static const uint32_t kCodeAgePatchFirstInstruction = 0x7d8802a6;
673 #endif
674
675 CodeAgingHelper::CodeAgingHelper() {
676 ASSERT(young_sequence_.length() == kNoCodeAgeSequenceLength);
677 // Since patcher is a large object, allocate it dynamically when needed,
678 // to avoid overloading the stack in stress conditions.
679 // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
680 // the process, before ARM simulator ICache is setup.
681 SmartPointer<CodePatcher> patcher(
682 new CodePatcher(young_sequence_.start(),
683 young_sequence_.length() / Assembler::kInstrSize,
684 CodePatcher::DONT_FLUSH));
685 PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
686 patcher->masm()->PushFixedFrame(r4);
687 patcher->masm()->addi(
688 fp, sp, Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
689 for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
690 patcher->masm()->nop();
691 }
692 }
693
694
695 #ifdef DEBUG
696 bool CodeAgingHelper::IsOld(byte* candidate) const {
697 return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
698 }
699 #endif
700
701
702 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
703 bool result = isolate->code_aging_helper()->IsYoung(sequence);
704 ASSERT(result || isolate->code_aging_helper()->IsOld(sequence));
705 return result;
706 }
707
708
709 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
710 MarkingParity* parity) {
711 if (IsYoungSequence(isolate, sequence)) {
712 *age = kNoAgeCodeAge;
713 *parity = NO_MARKING_PARITY;
714 } else {
715 ConstantPoolArray *constant_pool = NULL;
716 Address target_address = Assembler::target_address_at(
717 sequence + kCodeAgingTargetDelta, constant_pool);
718 Code* stub = GetCodeFromTargetAddress(target_address);
719 GetCodeAgeAndParity(stub, age, parity);
720 }
721 }
722
723
724 void Code::PatchPlatformCodeAge(Isolate* isolate,
725 byte* sequence,
726 Code::Age age,
727 MarkingParity parity) {
728 uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
729 if (age == kNoAgeCodeAge) {
730 isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
731 CpuFeatures::FlushICache(sequence, young_length);
732 } else {
733 // FIXED_SEQUENCE
734 Code* stub = GetCodeAgeStub(isolate, age, parity);
735 CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
736 Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
737 intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
738 // We use Call to compute the address of this patch sequence.
739 // Preserve lr since it will be clobbered. See
740 // GenerateMakeCodeYoungAgainCommon for the stub code.
741 patcher.masm()->mflr(ip);
742 patcher.masm()->mov(r3, Operand(target));
743 patcher.masm()->Call(r3);
744 for (int i = 0; i < kCodeAgingSequenceNops; i++) {
745 patcher.masm()->nop();
746 }
747 }
748 }
749
750
751 } } // namespace v8::internal
752
753 #endif // V8_TARGET_ARCH_PPC
OLDNEW
« src/objects-inl.h ('K') | « src/ppc/codegen-ppc.h ('k') | src/ppc/constants-ppc.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698