Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(497)

Side by Side Diff: src/ppc/codegen-ppc.cc

Issue 422063005: Contribution of PowerPC port. (Closed) Base URL: http://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: re-upload - catch up to 8/19 level Created 6 years, 3 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
OLDNEW
(Empty)
1 // Copyright 2012 the V8 project authors. All rights reserved.
2 //
3 // Copyright IBM Corp. 2012, 2013. All rights reserved.
4 //
5 // Use of this source code is governed by a BSD-style license that can be
6 // found in the LICENSE file.
7
8 #include "src/v8.h"
9
10 #if V8_TARGET_ARCH_PPC
11
12 #include "src/codegen.h"
13 #include "src/macro-assembler.h"
14 #include "src/ppc/simulator-ppc.h"
15
16 namespace v8 {
17 namespace internal {
18
19
20 #define __ masm.
21
22
23 #if defined(USE_SIMULATOR)
24 byte* fast_exp_ppc_machine_code = NULL;
25 double fast_exp_simulator(double x) {
26 return Simulator::current(Isolate::Current())
27 ->CallFPReturnsDouble(fast_exp_ppc_machine_code, x, 0);
28 }
29 #endif
30
31
32 UnaryMathFunction CreateExpFunction() {
33 if (!FLAG_fast_math) return &std::exp;
34 size_t actual_size;
35 byte* buffer =
36 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
37 if (buffer == NULL) return &std::exp;
38 ExternalReference::InitializeMathExpData();
39
40 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
41
42 {
43 DoubleRegister input = d1;
44 DoubleRegister result = d2;
45 DoubleRegister double_scratch1 = d3;
46 DoubleRegister double_scratch2 = d4;
47 Register temp1 = r7;
48 Register temp2 = r8;
49 Register temp3 = r9;
50
51 // Called from C
52 #if ABI_USES_FUNCTION_DESCRIPTORS
53 __ function_descriptor();
54 #endif
55
56 __ Push(temp3, temp2, temp1);
57 MathExpGenerator::EmitMathExp(&masm, input, result, double_scratch1,
58 double_scratch2, temp1, temp2, temp3);
59 __ Pop(temp3, temp2, temp1);
60 __ fmr(d1, result);
61 __ Ret();
62 }
63
64 CodeDesc desc;
65 masm.GetCode(&desc);
66 #if !ABI_USES_FUNCTION_DESCRIPTORS
67 DCHECK(!RelocInfo::RequiresRelocation(desc));
68 #endif
69
70 CpuFeatures::FlushICache(buffer, actual_size);
71 base::OS::ProtectCode(buffer, actual_size);
72
73 #if !defined(USE_SIMULATOR)
74 return FUNCTION_CAST<UnaryMathFunction>(buffer);
75 #else
76 fast_exp_ppc_machine_code = buffer;
77 return &fast_exp_simulator;
78 #endif
79 }
80
81
82 UnaryMathFunction CreateSqrtFunction() {
83 #if defined(USE_SIMULATOR)
84 return &std::sqrt;
85 #else
86 size_t actual_size;
87 byte* buffer =
88 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
89 if (buffer == NULL) return &std::sqrt;
90
91 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
92
93 // Called from C
94 #if ABI_USES_FUNCTION_DESCRIPTORS
95 __ function_descriptor();
96 #endif
97
98 __ MovFromFloatParameter(d1);
99 __ fsqrt(d1, d1);
100 __ MovToFloatResult(d1);
101 __ Ret();
102
103 CodeDesc desc;
104 masm.GetCode(&desc);
105 #if !ABI_USES_FUNCTION_DESCRIPTORS
106 DCHECK(!RelocInfo::RequiresRelocation(desc));
107 #endif
108
109 CpuFeatures::FlushICache(buffer, actual_size);
110 base::OS::ProtectCode(buffer, actual_size);
111 return FUNCTION_CAST<UnaryMathFunction>(buffer);
112 #endif
113 }
114
115 #undef __
116
117
118 // -------------------------------------------------------------------------
119 // Platform-specific RuntimeCallHelper functions.
120
121 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
122 masm->EnterFrame(StackFrame::INTERNAL);
123 DCHECK(!masm->has_frame());
124 masm->set_has_frame(true);
125 }
126
127
128 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
129 masm->LeaveFrame(StackFrame::INTERNAL);
130 DCHECK(masm->has_frame());
131 masm->set_has_frame(false);
132 }
133
134
135 // -------------------------------------------------------------------------
136 // Code generators
137
138 #define __ ACCESS_MASM(masm)
139
140 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
141 MacroAssembler* masm, Register receiver, Register key, Register value,
142 Register target_map, AllocationSiteMode mode,
143 Label* allocation_memento_found) {
144 Register scratch_elements = r7;
145 DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
146
147 if (mode == TRACK_ALLOCATION_SITE) {
148 DCHECK(allocation_memento_found != NULL);
149 __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements,
150 allocation_memento_found);
151 }
152
153 // Set transitioned map.
154 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
155 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r11,
156 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
157 OMIT_SMI_CHECK);
158 }
159
160
161 void ElementsTransitionGenerator::GenerateSmiToDouble(
162 MacroAssembler* masm, Register receiver, Register key, Register value,
163 Register target_map, AllocationSiteMode mode, Label* fail) {
164 // lr contains the return address
165 Label loop, entry, convert_hole, gc_required, only_change_map, done;
166 Register elements = r7;
167 Register length = r8;
168 Register array = r9;
169 Register array_end = array;
170
171 // target_map parameter can be clobbered.
172 Register scratch1 = target_map;
173 Register scratch2 = r11;
174
175 // Verify input registers don't conflict with locals.
176 DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
177 scratch2));
178
179 if (mode == TRACK_ALLOCATION_SITE) {
180 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
181 }
182
183 // Check for empty arrays, which only require a map transition and no changes
184 // to the backing store.
185 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
186 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
187 __ beq(&only_change_map);
188
189 // Preserve lr and use r17 as a temporary register.
190 __ mflr(r0);
191 __ Push(r0);
192
193 __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
194 // length: number of elements (smi-tagged)
195
196 // Allocate new FixedDoubleArray.
197 __ SmiToDoubleArrayOffset(r17, length);
198 __ addi(r17, r17, Operand(FixedDoubleArray::kHeaderSize));
199 __ Allocate(r17, array, r10, scratch2, &gc_required, DOUBLE_ALIGNMENT);
200
201 // Set destination FixedDoubleArray's length and map.
202 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
203 __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
204 // Update receiver's map.
205 __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
206
207 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
208 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
209 kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
210 OMIT_SMI_CHECK);
211 // Replace receiver's backing store with newly created FixedDoubleArray.
212 __ addi(scratch1, array, Operand(kHeapObjectTag));
213 __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
214 __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
215 kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
216 OMIT_SMI_CHECK);
217
218 // Prepare for conversion loop.
219 __ addi(target_map, elements,
220 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
221 __ addi(r10, array, Operand(FixedDoubleArray::kHeaderSize));
222 __ SmiToDoubleArrayOffset(array, length);
223 __ add(array_end, r10, array);
224 // Repurpose registers no longer in use.
225 #if V8_TARGET_ARCH_PPC64
226 Register hole_int64 = elements;
227 #else
228 Register hole_lower = elements;
229 Register hole_upper = length;
230 #endif
231 // scratch1: begin of source FixedArray element fields, not tagged
232 // hole_lower: kHoleNanLower32 OR hol_int64
233 // hole_upper: kHoleNanUpper32
234 // array_end: end of destination FixedDoubleArray, not tagged
235 // scratch2: begin of FixedDoubleArray element fields, not tagged
236
237 __ b(&entry);
238
239 __ bind(&only_change_map);
240 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
241 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
242 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
243 OMIT_SMI_CHECK);
244 __ b(&done);
245
246 // Call into runtime if GC is required.
247 __ bind(&gc_required);
248 __ Pop(r0);
249 __ mtlr(r0);
250 __ b(fail);
251
252 // Convert and copy elements.
253 __ bind(&loop);
254 __ LoadP(r11, MemOperand(scratch1));
255 __ addi(scratch1, scratch1, Operand(kPointerSize));
256 // r11: current element
257 __ UntagAndJumpIfNotSmi(r11, r11, &convert_hole);
258
259 // Normal smi, convert to double and store.
260 __ ConvertIntToDouble(r11, d0);
261 __ stfd(d0, MemOperand(scratch2, 0));
262 __ addi(r10, r10, Operand(8));
263
264 __ b(&entry);
265
266 // Hole found, store the-hole NaN.
267 __ bind(&convert_hole);
268 if (FLAG_debug_code) {
269 // Restore a "smi-untagged" heap object.
270 __ LoadP(r11, MemOperand(r6, -kPointerSize));
271 __ CompareRoot(r11, Heap::kTheHoleValueRootIndex);
272 __ Assert(eq, kObjectFoundInSmiOnlyArray);
273 }
274 #if V8_TARGET_ARCH_PPC64
275 __ std(hole_int64, MemOperand(r10, 0));
276 #else
277 __ stw(hole_upper, MemOperand(r10, Register::kExponentOffset));
278 __ stw(hole_lower, MemOperand(r10, Register::kMantissaOffset));
279 #endif
280 __ addi(r10, r10, Operand(8));
281
282 __ bind(&entry);
283 __ cmp(r10, array_end);
284 __ blt(&loop);
285
286 __ Pop(r0);
287 __ mtlr(r0);
288 __ bind(&done);
289 }
290
291
292 void ElementsTransitionGenerator::GenerateDoubleToObject(
293 MacroAssembler* masm, Register receiver, Register key, Register value,
294 Register target_map, AllocationSiteMode mode, Label* fail) {
295 // Register lr contains the return address.
296 Label entry, loop, convert_hole, gc_required, only_change_map;
297 Register elements = r7;
298 Register array = r9;
299 Register length = r8;
300 Register scratch = r11;
301
302 // Verify input registers don't conflict with locals.
303 DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
304 scratch));
305
306 if (mode == TRACK_ALLOCATION_SITE) {
307 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
308 }
309
310 // Check for empty arrays, which only require a map transition and no changes
311 // to the backing store.
312 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
313 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
314 __ beq(&only_change_map);
315
316 __ Push(target_map, receiver, key, value);
317 __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
318 // elements: source FixedDoubleArray
319 // length: number of elements (smi-tagged)
320
321 // Allocate new FixedArray.
322 // Re-use value and target_map registers, as they have been saved on the
323 // stack.
324 Register array_size = value;
325 Register allocate_scratch = target_map;
326 __ li(array_size, Operand(FixedDoubleArray::kHeaderSize));
327 __ SmiToPtrArrayOffset(r0, length);
328 __ add(array_size, array_size, r0);
329 __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
330 NO_ALLOCATION_FLAGS);
331 // array: destination FixedArray, not tagged as heap object
332 // Set destination FixedDoubleArray's length and map.
333 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
334 __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
335 __ StoreP(scratch, MemOperand(array, HeapObject::kMapOffset));
336
337 // Prepare for conversion loop.
338 Register src_elements = elements;
339 Register dst_elements = target_map;
340 Register dst_end = length;
341 Register heap_number_map = scratch;
342 __ addi(src_elements, elements,
343 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
344 __ addi(dst_elements, array, Operand(FixedArray::kHeaderSize));
345 __ addi(array, array, Operand(kHeapObjectTag));
346 __ SmiToPtrArrayOffset(length, length);
347 __ add(dst_end, dst_elements, length);
348 __ LoadRoot(r10, Heap::kTheHoleValueRootIndex);
349 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
350 // Using offsetted addresses in src_elements to fully take advantage of
351 // post-indexing.
352 // dst_elements: begin of destination FixedArray element fields, not tagged
353 // src_elements: begin of source FixedDoubleArray element fields,
354 // not tagged, +4
355 // dst_end: end of destination FixedArray, not tagged
356 // array: destination FixedArray
357 // r10: the-hole pointer
358 // heap_number_map: heap number map
359 __ b(&entry);
360
361 // Call into runtime if GC is required.
362 __ bind(&gc_required);
363 __ Pop(target_map, receiver, key, value);
364 __ b(fail);
365
366 __ bind(&loop);
367 Register upper_bits = key;
368 __ lwz(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
369 __ addi(src_elements, src_elements, Operand(kDoubleSize));
370 // upper_bits: current element's upper 32 bit
371 // src_elements: address of next element's upper 32 bit
372 __ Cmpi(upper_bits, Operand(kHoleNanUpper32), r0);
373 __ beq(&convert_hole);
374
375 // Non-hole double, copy value into a heap number.
376 Register heap_number = receiver;
377 Register scratch2 = value;
378 __ AllocateHeapNumber(heap_number, scratch2, r11, heap_number_map,
379 &gc_required);
380 // heap_number: new heap number
381 #if V8_TARGET_ARCH_PPC64
382 __ ld(scratch2, MemOperand(src_elements, -kDoubleSize));
383 __ addi(upper_bits, heap_number, Operand(-1)); // subtract tag for std
384 __ std(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
385 #else
386 __ lwz(scratch2,
387 MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
388 __ lwz(upper_bits,
389 MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
390 __ stw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
391 __ stw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
392 #endif
393 __ mr(scratch2, dst_elements);
394 __ StoreP(heap_number, MemOperand(dst_elements));
395 __ addi(dst_elements, dst_elements, Operand(kPointerSize));
396 __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
397 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
398 __ b(&entry);
399
400 // Replace the-hole NaN with the-hole pointer.
401 __ bind(&convert_hole);
402 __ StoreP(r10, MemOperand(dst_elements));
403 __ addi(dst_elements, dst_elements, Operand(kPointerSize));
404
405 __ bind(&entry);
406 __ cmpl(dst_elements, dst_end);
407 __ blt(&loop);
408
409 __ Pop(target_map, receiver, key, value);
410 // Replace receiver's backing store with newly created and filled FixedArray.
411 __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
412 __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
413 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
414 OMIT_SMI_CHECK);
415
416 __ bind(&only_change_map);
417 // Update receiver's map.
418 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
419 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
420 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
421 OMIT_SMI_CHECK);
422 }
423
424
425 // assume ip can be used as a scratch register below
426 void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
427 Register index, Register result,
428 Label* call_runtime) {
429 // Fetch the instance type of the receiver into result register.
430 __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
431 __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
432
433 // We need special handling for indirect strings.
434 Label check_sequential;
435 __ andi(r0, result, Operand(kIsIndirectStringMask));
436 __ beq(&check_sequential, cr0);
437
438 // Dispatch on the indirect string shape: slice or cons.
439 Label cons_string;
440 __ mov(ip, Operand(kSlicedNotConsMask));
441 __ and_(r0, result, ip, SetRC);
442 __ beq(&cons_string, cr0);
443
444 // Handle slices.
445 Label indirect_string_loaded;
446 __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
447 __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
448 __ SmiUntag(ip, result);
449 __ add(index, index, ip);
450 __ b(&indirect_string_loaded);
451
452 // Handle cons strings.
453 // Check whether the right hand side is the empty string (i.e. if
454 // this is really a flat string in a cons string). If that is not
455 // the case we would rather go to the runtime system now to flatten
456 // the string.
457 __ bind(&cons_string);
458 __ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset));
459 __ CompareRoot(result, Heap::kempty_stringRootIndex);
460 __ bne(call_runtime);
461 // Get the first of the two strings and load its instance type.
462 __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
463
464 __ bind(&indirect_string_loaded);
465 __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
466 __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
467
468 // Distinguish sequential and external strings. Only these two string
469 // representations can reach here (slices and flat cons strings have been
470 // reduced to the underlying sequential or external string).
471 Label external_string, check_encoding;
472 __ bind(&check_sequential);
473 STATIC_ASSERT(kSeqStringTag == 0);
474 __ andi(r0, result, Operand(kStringRepresentationMask));
475 __ bne(&external_string, cr0);
476
477 // Prepare sequential strings
478 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
479 __ addi(string, string,
480 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
481 __ b(&check_encoding);
482
483 // Handle external strings.
484 __ bind(&external_string);
485 if (FLAG_debug_code) {
486 // Assert that we do not have a cons or slice (indirect strings) here.
487 // Sequential strings have already been ruled out.
488 __ andi(r0, result, Operand(kIsIndirectStringMask));
489 __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
490 }
491 // Rule out short external strings.
492 STATIC_ASSERT(kShortExternalStringTag != 0);
493 __ andi(r0, result, Operand(kShortExternalStringMask));
494 __ bne(call_runtime, cr0);
495 __ LoadP(string,
496 FieldMemOperand(string, ExternalString::kResourceDataOffset));
497
498 Label ascii, done;
499 __ bind(&check_encoding);
500 STATIC_ASSERT(kTwoByteStringTag == 0);
501 __ andi(r0, result, Operand(kStringEncodingMask));
502 __ bne(&ascii, cr0);
503 // Two-byte string.
504 __ ShiftLeftImm(result, index, Operand(1));
505 __ lhzx(result, MemOperand(string, result));
506 __ b(&done);
507 __ bind(&ascii);
508 // Ascii string.
509 __ lbzx(result, MemOperand(string, index));
510 __ bind(&done);
511 }
512
513
514 static MemOperand ExpConstant(int index, Register base) {
515 return MemOperand(base, index * kDoubleSize);
516 }
517
518
519 void MathExpGenerator::EmitMathExp(MacroAssembler* masm, DoubleRegister input,
520 DoubleRegister result,
521 DoubleRegister double_scratch1,
522 DoubleRegister double_scratch2,
523 Register temp1, Register temp2,
524 Register temp3) {
525 DCHECK(!input.is(result));
526 DCHECK(!input.is(double_scratch1));
527 DCHECK(!input.is(double_scratch2));
528 DCHECK(!result.is(double_scratch1));
529 DCHECK(!result.is(double_scratch2));
530 DCHECK(!double_scratch1.is(double_scratch2));
531 DCHECK(!temp1.is(temp2));
532 DCHECK(!temp1.is(temp3));
533 DCHECK(!temp2.is(temp3));
534 DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
535 DCHECK(!masm->serializer_enabled()); // External references not serializable.
536
537 Label zero, infinity, done;
538
539 __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
540
541 __ lfd(double_scratch1, ExpConstant(0, temp3));
542 __ fcmpu(double_scratch1, input);
543 __ fmr(result, input);
544 __ bunordered(&done);
545 __ bge(&zero);
546
547 __ lfd(double_scratch2, ExpConstant(1, temp3));
548 __ fcmpu(input, double_scratch2);
549 __ bge(&infinity);
550
551 __ lfd(double_scratch1, ExpConstant(3, temp3));
552 __ lfd(result, ExpConstant(4, temp3));
553 __ fmul(double_scratch1, double_scratch1, input);
554 __ fadd(double_scratch1, double_scratch1, result);
555 __ MovDoubleLowToInt(temp2, double_scratch1);
556 __ fsub(double_scratch1, double_scratch1, result);
557 __ lfd(result, ExpConstant(6, temp3));
558 __ lfd(double_scratch2, ExpConstant(5, temp3));
559 __ fmul(double_scratch1, double_scratch1, double_scratch2);
560 __ fsub(double_scratch1, double_scratch1, input);
561 __ fsub(result, result, double_scratch1);
562 __ fmul(double_scratch2, double_scratch1, double_scratch1);
563 __ fmul(result, result, double_scratch2);
564 __ lfd(double_scratch2, ExpConstant(7, temp3));
565 __ fmul(result, result, double_scratch2);
566 __ fsub(result, result, double_scratch1);
567 __ lfd(double_scratch2, ExpConstant(8, temp3));
568 __ fadd(result, result, double_scratch2);
569 __ srwi(temp1, temp2, Operand(11));
570 __ andi(temp2, temp2, Operand(0x7ff));
571 __ addi(temp1, temp1, Operand(0x3ff));
572
573 // Must not call ExpConstant() after overwriting temp3!
574 __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
575 __ slwi(temp2, temp2, Operand(3));
576 #if V8_TARGET_ARCH_PPC64
577 __ ldx(temp2, MemOperand(temp3, temp2));
578 __ sldi(temp1, temp1, Operand(52));
579 __ orx(temp2, temp1, temp2);
580 __ MovInt64ToDouble(double_scratch1, temp2);
581 #else
582 __ add(ip, temp3, temp2);
583 __ lwz(temp3, MemOperand(ip, Register::kExponentOffset));
584 __ lwz(temp2, MemOperand(ip, Register::kMantissaOffset));
585 __ slwi(temp1, temp1, Operand(20));
586 __ orx(temp3, temp1, temp3);
587 __ MovInt64ToDouble(double_scratch1, temp3, temp2);
588 #endif
589
590 __ fmul(result, result, double_scratch1);
591 __ b(&done);
592
593 __ bind(&zero);
594 __ fmr(result, kDoubleRegZero);
595 __ b(&done);
596
597 __ bind(&infinity);
598 __ lfd(result, ExpConstant(2, temp3));
599
600 __ bind(&done);
601 }
602
603 #undef __
604
605 #ifdef DEBUG
606 // mflr ip
607 static const uint32_t kCodeAgePatchFirstInstruction = 0x7d8802a6;
608 #endif
609
610 CodeAgingHelper::CodeAgingHelper() {
611 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
612 // Since patcher is a large object, allocate it dynamically when needed,
613 // to avoid overloading the stack in stress conditions.
614 // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
615 // the process, before ARM simulator ICache is setup.
616 SmartPointer<CodePatcher> patcher(new CodePatcher(
617 young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize,
618 CodePatcher::DONT_FLUSH));
619 PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
620 patcher->masm()->PushFixedFrame(r4);
621 patcher->masm()->addi(fp, sp,
622 Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
623 for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
624 patcher->masm()->nop();
625 }
626 }
627
628
629 #ifdef DEBUG
630 bool CodeAgingHelper::IsOld(byte* candidate) const {
631 return Memory::uint32_at(candidate) == kCodeAgePatchFirstInstruction;
632 }
633 #endif
634
635
636 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
637 bool result = isolate->code_aging_helper()->IsYoung(sequence);
638 DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
639 return result;
640 }
641
642
643 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
644 MarkingParity* parity) {
645 if (IsYoungSequence(isolate, sequence)) {
646 *age = kNoAgeCodeAge;
647 *parity = NO_MARKING_PARITY;
648 } else {
649 ConstantPoolArray* constant_pool = NULL;
650 Address target_address = Assembler::target_address_at(
651 sequence + kCodeAgingTargetDelta, constant_pool);
652 Code* stub = GetCodeFromTargetAddress(target_address);
653 GetCodeAgeAndParity(stub, age, parity);
654 }
655 }
656
657
658 void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
659 MarkingParity parity) {
660 uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
661 if (age == kNoAgeCodeAge) {
662 isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
663 CpuFeatures::FlushICache(sequence, young_length);
664 } else {
665 // FIXED_SEQUENCE
666 Code* stub = GetCodeAgeStub(isolate, age, parity);
667 CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
668 Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
669 intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
670 // We use Call to compute the address of this patch sequence.
671 // Preserve lr since it will be clobbered. See
672 // GenerateMakeCodeYoungAgainCommon for the stub code.
673 patcher.masm()->mflr(ip);
674 patcher.masm()->mov(r3, Operand(target));
675 patcher.masm()->Call(r3);
676 for (int i = 0; i < kCodeAgingSequenceNops; i++) {
677 patcher.masm()->nop();
678 }
679 }
680 }
681 }
682 } // namespace v8::internal
683
684 #endif // V8_TARGET_ARCH_PPC
OLDNEW
« src/hydrogen-bch.cc ('K') | « src/ppc/codegen-ppc.h ('k') | src/ppc/constants-ppc.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698