Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(2)

Side by Side Diff: src/ppc/codegen-ppc.cc

Issue 714093002: PowerPC specific sub-directories. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Created 6 years, 1 month ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/ppc/codegen-ppc.h ('k') | src/ppc/constants-ppc.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "src/v8.h"
6
7 #if V8_TARGET_ARCH_PPC
8
9 #include "src/codegen.h"
10 #include "src/macro-assembler.h"
11 #include "src/ppc/simulator-ppc.h"
12
13 namespace v8 {
14 namespace internal {
15
16
17 #define __ masm.
18
19
20 #if defined(USE_SIMULATOR)
21 byte* fast_exp_ppc_machine_code = NULL;
22 double fast_exp_simulator(double x) {
23 return Simulator::current(Isolate::Current())
24 ->CallFPReturnsDouble(fast_exp_ppc_machine_code, x, 0);
25 }
26 #endif
27
28
29 UnaryMathFunction CreateExpFunction() {
30 if (!FLAG_fast_math) return &std::exp;
31 size_t actual_size;
32 byte* buffer =
33 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
34 if (buffer == NULL) return &std::exp;
35 ExternalReference::InitializeMathExpData();
36
37 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
38
39 {
40 DoubleRegister input = d1;
41 DoubleRegister result = d2;
42 DoubleRegister double_scratch1 = d3;
43 DoubleRegister double_scratch2 = d4;
44 Register temp1 = r7;
45 Register temp2 = r8;
46 Register temp3 = r9;
47
48 // Called from C
49 #if ABI_USES_FUNCTION_DESCRIPTORS
50 __ function_descriptor();
51 #endif
52
53 __ Push(temp3, temp2, temp1);
54 MathExpGenerator::EmitMathExp(&masm, input, result, double_scratch1,
55 double_scratch2, temp1, temp2, temp3);
56 __ Pop(temp3, temp2, temp1);
57 __ fmr(d1, result);
58 __ Ret();
59 }
60
61 CodeDesc desc;
62 masm.GetCode(&desc);
63 #if !ABI_USES_FUNCTION_DESCRIPTORS
64 DCHECK(!RelocInfo::RequiresRelocation(desc));
65 #endif
66
67 CpuFeatures::FlushICache(buffer, actual_size);
68 base::OS::ProtectCode(buffer, actual_size);
69
70 #if !defined(USE_SIMULATOR)
71 return FUNCTION_CAST<UnaryMathFunction>(buffer);
72 #else
73 fast_exp_ppc_machine_code = buffer;
74 return &fast_exp_simulator;
75 #endif
76 }
77
78
79 UnaryMathFunction CreateSqrtFunction() {
80 #if defined(USE_SIMULATOR)
81 return &std::sqrt;
82 #else
83 size_t actual_size;
84 byte* buffer =
85 static_cast<byte*>(base::OS::Allocate(1 * KB, &actual_size, true));
86 if (buffer == NULL) return &std::sqrt;
87
88 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
89
90 // Called from C
91 #if ABI_USES_FUNCTION_DESCRIPTORS
92 __ function_descriptor();
93 #endif
94
95 __ MovFromFloatParameter(d1);
96 __ fsqrt(d1, d1);
97 __ MovToFloatResult(d1);
98 __ Ret();
99
100 CodeDesc desc;
101 masm.GetCode(&desc);
102 #if !ABI_USES_FUNCTION_DESCRIPTORS
103 DCHECK(!RelocInfo::RequiresRelocation(desc));
104 #endif
105
106 CpuFeatures::FlushICache(buffer, actual_size);
107 base::OS::ProtectCode(buffer, actual_size);
108 return FUNCTION_CAST<UnaryMathFunction>(buffer);
109 #endif
110 }
111
112 #undef __
113
114
115 // -------------------------------------------------------------------------
116 // Platform-specific RuntimeCallHelper functions.
117
118 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
119 masm->EnterFrame(StackFrame::INTERNAL);
120 DCHECK(!masm->has_frame());
121 masm->set_has_frame(true);
122 }
123
124
125 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
126 masm->LeaveFrame(StackFrame::INTERNAL);
127 DCHECK(masm->has_frame());
128 masm->set_has_frame(false);
129 }
130
131
132 // -------------------------------------------------------------------------
133 // Code generators
134
135 #define __ ACCESS_MASM(masm)
136
137 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
138 MacroAssembler* masm, Register receiver, Register key, Register value,
139 Register target_map, AllocationSiteMode mode,
140 Label* allocation_memento_found) {
141 Register scratch_elements = r7;
142 DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
143
144 if (mode == TRACK_ALLOCATION_SITE) {
145 DCHECK(allocation_memento_found != NULL);
146 __ JumpIfJSArrayHasAllocationMemento(receiver, scratch_elements,
147 allocation_memento_found);
148 }
149
150 // Set transitioned map.
151 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
152 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r11,
153 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
154 OMIT_SMI_CHECK);
155 }
156
157
158 void ElementsTransitionGenerator::GenerateSmiToDouble(
159 MacroAssembler* masm, Register receiver, Register key, Register value,
160 Register target_map, AllocationSiteMode mode, Label* fail) {
161 // lr contains the return address
162 Label loop, entry, convert_hole, gc_required, only_change_map, done;
163 Register elements = r7;
164 Register length = r8;
165 Register array = r9;
166 Register array_end = array;
167
168 // target_map parameter can be clobbered.
169 Register scratch1 = target_map;
170 Register scratch2 = r11;
171
172 // Verify input registers don't conflict with locals.
173 DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
174 scratch2));
175
176 if (mode == TRACK_ALLOCATION_SITE) {
177 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
178 }
179
180 // Check for empty arrays, which only require a map transition and no changes
181 // to the backing store.
182 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
183 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
184 __ beq(&only_change_map);
185
186 // Preserve lr and use r17 as a temporary register.
187 __ mflr(r0);
188 __ Push(r0);
189
190 __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
191 // length: number of elements (smi-tagged)
192
193 // Allocate new FixedDoubleArray.
194 __ SmiToDoubleArrayOffset(r17, length);
195 __ addi(r17, r17, Operand(FixedDoubleArray::kHeaderSize));
196 __ Allocate(r17, array, r10, scratch2, &gc_required, DOUBLE_ALIGNMENT);
197
198 // Set destination FixedDoubleArray's length and map.
199 __ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
200 __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
201 // Update receiver's map.
202 __ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
203
204 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
205 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
206 kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
207 OMIT_SMI_CHECK);
208 // Replace receiver's backing store with newly created FixedDoubleArray.
209 __ addi(scratch1, array, Operand(kHeapObjectTag));
210 __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
211 __ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
212 kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
213 OMIT_SMI_CHECK);
214
215 // Prepare for conversion loop.
216 __ addi(target_map, elements,
217 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
218 __ addi(r10, array, Operand(FixedDoubleArray::kHeaderSize));
219 __ SmiToDoubleArrayOffset(array, length);
220 __ add(array_end, r10, array);
221 // Repurpose registers no longer in use.
222 #if V8_TARGET_ARCH_PPC64
223 Register hole_int64 = elements;
224 #else
225 Register hole_lower = elements;
226 Register hole_upper = length;
227 #endif
228 // scratch1: begin of source FixedArray element fields, not tagged
229 // hole_lower: kHoleNanLower32 OR hol_int64
230 // hole_upper: kHoleNanUpper32
231 // array_end: end of destination FixedDoubleArray, not tagged
232 // scratch2: begin of FixedDoubleArray element fields, not tagged
233
234 __ b(&entry);
235
236 __ bind(&only_change_map);
237 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
238 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
239 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
240 OMIT_SMI_CHECK);
241 __ b(&done);
242
243 // Call into runtime if GC is required.
244 __ bind(&gc_required);
245 __ Pop(r0);
246 __ mtlr(r0);
247 __ b(fail);
248
249 // Convert and copy elements.
250 __ bind(&loop);
251 __ LoadP(r11, MemOperand(scratch1));
252 __ addi(scratch1, scratch1, Operand(kPointerSize));
253 // r11: current element
254 __ UntagAndJumpIfNotSmi(r11, r11, &convert_hole);
255
256 // Normal smi, convert to double and store.
257 __ ConvertIntToDouble(r11, d0);
258 __ stfd(d0, MemOperand(scratch2, 0));
259 __ addi(r10, r10, Operand(8));
260
261 __ b(&entry);
262
263 // Hole found, store the-hole NaN.
264 __ bind(&convert_hole);
265 if (FLAG_debug_code) {
266 // Restore a "smi-untagged" heap object.
267 __ LoadP(r11, MemOperand(r6, -kPointerSize));
268 __ CompareRoot(r11, Heap::kTheHoleValueRootIndex);
269 __ Assert(eq, kObjectFoundInSmiOnlyArray);
270 }
271 #if V8_TARGET_ARCH_PPC64
272 __ std(hole_int64, MemOperand(r10, 0));
273 #else
274 __ stw(hole_upper, MemOperand(r10, Register::kExponentOffset));
275 __ stw(hole_lower, MemOperand(r10, Register::kMantissaOffset));
276 #endif
277 __ addi(r10, r10, Operand(8));
278
279 __ bind(&entry);
280 __ cmp(r10, array_end);
281 __ blt(&loop);
282
283 __ Pop(r0);
284 __ mtlr(r0);
285 __ bind(&done);
286 }
287
288
289 void ElementsTransitionGenerator::GenerateDoubleToObject(
290 MacroAssembler* masm, Register receiver, Register key, Register value,
291 Register target_map, AllocationSiteMode mode, Label* fail) {
292 // Register lr contains the return address.
293 Label entry, loop, convert_hole, gc_required, only_change_map;
294 Register elements = r7;
295 Register array = r9;
296 Register length = r8;
297 Register scratch = r11;
298
299 // Verify input registers don't conflict with locals.
300 DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
301 scratch));
302
303 if (mode == TRACK_ALLOCATION_SITE) {
304 __ JumpIfJSArrayHasAllocationMemento(receiver, elements, fail);
305 }
306
307 // Check for empty arrays, which only require a map transition and no changes
308 // to the backing store.
309 __ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
310 __ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
311 __ beq(&only_change_map);
312
313 __ Push(target_map, receiver, key, value);
314 __ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
315 // elements: source FixedDoubleArray
316 // length: number of elements (smi-tagged)
317
318 // Allocate new FixedArray.
319 // Re-use value and target_map registers, as they have been saved on the
320 // stack.
321 Register array_size = value;
322 Register allocate_scratch = target_map;
323 __ li(array_size, Operand(FixedDoubleArray::kHeaderSize));
324 __ SmiToPtrArrayOffset(r0, length);
325 __ add(array_size, array_size, r0);
326 __ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
327 NO_ALLOCATION_FLAGS);
328 // array: destination FixedArray, not tagged as heap object
329 // Set destination FixedDoubleArray's length and map.
330 __ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
331 __ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
332 __ StoreP(scratch, MemOperand(array, HeapObject::kMapOffset));
333 __ addi(array, array, Operand(kHeapObjectTag));
334
335 // Prepare for conversion loop.
336 Register src_elements = elements;
337 Register dst_elements = target_map;
338 Register dst_end = length;
339 Register heap_number_map = scratch;
340 __ addi(src_elements, elements,
341 Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
342 __ SmiToPtrArrayOffset(length, length);
343 __ LoadRoot(r10, Heap::kTheHoleValueRootIndex);
344
345 Label initialization_loop, loop_done;
346 __ ShiftRightImm(r0, length, Operand(kPointerSizeLog2), SetRC);
347 __ beq(&loop_done, cr0);
348
349 // Allocating heap numbers in the loop below can fail and cause a jump to
350 // gc_required. We can't leave a partly initialized FixedArray behind,
351 // so pessimistically fill it with holes now.
352 __ mtctr(r0);
353 __ addi(dst_elements, array,
354 Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
355 __ bind(&initialization_loop);
356 __ StorePU(r10, MemOperand(dst_elements, kPointerSize));
357 __ bdnz(&initialization_loop);
358
359 __ addi(dst_elements, array,
360 Operand(FixedArray::kHeaderSize - kHeapObjectTag));
361 __ add(dst_end, dst_elements, length);
362 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
363 // Using offsetted addresses in src_elements to fully take advantage of
364 // post-indexing.
365 // dst_elements: begin of destination FixedArray element fields, not tagged
366 // src_elements: begin of source FixedDoubleArray element fields,
367 // not tagged, +4
368 // dst_end: end of destination FixedArray, not tagged
369 // array: destination FixedArray
370 // r10: the-hole pointer
371 // heap_number_map: heap number map
372 __ b(&loop);
373
374 // Call into runtime if GC is required.
375 __ bind(&gc_required);
376 __ Pop(target_map, receiver, key, value);
377 __ b(fail);
378
379 // Replace the-hole NaN with the-hole pointer.
380 __ bind(&convert_hole);
381 __ StoreP(r10, MemOperand(dst_elements));
382 __ addi(dst_elements, dst_elements, Operand(kPointerSize));
383 __ cmpl(dst_elements, dst_end);
384 __ bge(&loop_done);
385
386 __ bind(&loop);
387 Register upper_bits = key;
388 __ lwz(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
389 __ addi(src_elements, src_elements, Operand(kDoubleSize));
390 // upper_bits: current element's upper 32 bit
391 // src_elements: address of next element's upper 32 bit
392 __ Cmpi(upper_bits, Operand(kHoleNanUpper32), r0);
393 __ beq(&convert_hole);
394
395 // Non-hole double, copy value into a heap number.
396 Register heap_number = receiver;
397 Register scratch2 = value;
398 __ AllocateHeapNumber(heap_number, scratch2, r11, heap_number_map,
399 &gc_required);
400 // heap_number: new heap number
401 #if V8_TARGET_ARCH_PPC64
402 __ ld(scratch2, MemOperand(src_elements, -kDoubleSize));
403 // subtract tag for std
404 __ addi(upper_bits, heap_number, Operand(-kHeapObjectTag));
405 __ std(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
406 #else
407 __ lwz(scratch2,
408 MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
409 __ lwz(upper_bits,
410 MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
411 __ stw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
412 __ stw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
413 #endif
414 __ mr(scratch2, dst_elements);
415 __ StoreP(heap_number, MemOperand(dst_elements));
416 __ addi(dst_elements, dst_elements, Operand(kPointerSize));
417 __ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
418 kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
419 __ b(&entry);
420
421 // Replace the-hole NaN with the-hole pointer.
422 __ bind(&convert_hole);
423 __ StoreP(r10, MemOperand(dst_elements));
424 __ addi(dst_elements, dst_elements, Operand(kPointerSize));
425
426 __ bind(&entry);
427 __ cmpl(dst_elements, dst_end);
428 __ blt(&loop);
429 __ bind(&loop_done);
430
431 __ Pop(target_map, receiver, key, value);
432 // Replace receiver's backing store with newly created and filled FixedArray.
433 __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
434 __ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
435 kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
436 OMIT_SMI_CHECK);
437
438 __ bind(&only_change_map);
439 // Update receiver's map.
440 __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
441 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
442 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
443 OMIT_SMI_CHECK);
444 }
445
446
447 // assume ip can be used as a scratch register below
448 void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
449 Register index, Register result,
450 Label* call_runtime) {
451 // Fetch the instance type of the receiver into result register.
452 __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
453 __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
454
455 // We need special handling for indirect strings.
456 Label check_sequential;
457 __ andi(r0, result, Operand(kIsIndirectStringMask));
458 __ beq(&check_sequential, cr0);
459
460 // Dispatch on the indirect string shape: slice or cons.
461 Label cons_string;
462 __ mov(ip, Operand(kSlicedNotConsMask));
463 __ and_(r0, result, ip, SetRC);
464 __ beq(&cons_string, cr0);
465
466 // Handle slices.
467 Label indirect_string_loaded;
468 __ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
469 __ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
470 __ SmiUntag(ip, result);
471 __ add(index, index, ip);
472 __ b(&indirect_string_loaded);
473
474 // Handle cons strings.
475 // Check whether the right hand side is the empty string (i.e. if
476 // this is really a flat string in a cons string). If that is not
477 // the case we would rather go to the runtime system now to flatten
478 // the string.
479 __ bind(&cons_string);
480 __ LoadP(result, FieldMemOperand(string, ConsString::kSecondOffset));
481 __ CompareRoot(result, Heap::kempty_stringRootIndex);
482 __ bne(call_runtime);
483 // Get the first of the two strings and load its instance type.
484 __ LoadP(string, FieldMemOperand(string, ConsString::kFirstOffset));
485
486 __ bind(&indirect_string_loaded);
487 __ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
488 __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
489
490 // Distinguish sequential and external strings. Only these two string
491 // representations can reach here (slices and flat cons strings have been
492 // reduced to the underlying sequential or external string).
493 Label external_string, check_encoding;
494 __ bind(&check_sequential);
495 STATIC_ASSERT(kSeqStringTag == 0);
496 __ andi(r0, result, Operand(kStringRepresentationMask));
497 __ bne(&external_string, cr0);
498
499 // Prepare sequential strings
500 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
501 __ addi(string, string,
502 Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
503 __ b(&check_encoding);
504
505 // Handle external strings.
506 __ bind(&external_string);
507 if (FLAG_debug_code) {
508 // Assert that we do not have a cons or slice (indirect strings) here.
509 // Sequential strings have already been ruled out.
510 __ andi(r0, result, Operand(kIsIndirectStringMask));
511 __ Assert(eq, kExternalStringExpectedButNotFound, cr0);
512 }
513 // Rule out short external strings.
514 STATIC_ASSERT(kShortExternalStringTag != 0);
515 __ andi(r0, result, Operand(kShortExternalStringMask));
516 __ bne(call_runtime, cr0);
517 __ LoadP(string,
518 FieldMemOperand(string, ExternalString::kResourceDataOffset));
519
520 Label one_byte, done;
521 __ bind(&check_encoding);
522 STATIC_ASSERT(kTwoByteStringTag == 0);
523 __ andi(r0, result, Operand(kStringEncodingMask));
524 __ bne(&one_byte, cr0);
525 // Two-byte string.
526 __ ShiftLeftImm(result, index, Operand(1));
527 __ lhzx(result, MemOperand(string, result));
528 __ b(&done);
529 __ bind(&one_byte);
530 // One-byte string.
531 __ lbzx(result, MemOperand(string, index));
532 __ bind(&done);
533 }
534
535
536 static MemOperand ExpConstant(int index, Register base) {
537 return MemOperand(base, index * kDoubleSize);
538 }
539
540
541 void MathExpGenerator::EmitMathExp(MacroAssembler* masm, DoubleRegister input,
542 DoubleRegister result,
543 DoubleRegister double_scratch1,
544 DoubleRegister double_scratch2,
545 Register temp1, Register temp2,
546 Register temp3) {
547 DCHECK(!input.is(result));
548 DCHECK(!input.is(double_scratch1));
549 DCHECK(!input.is(double_scratch2));
550 DCHECK(!result.is(double_scratch1));
551 DCHECK(!result.is(double_scratch2));
552 DCHECK(!double_scratch1.is(double_scratch2));
553 DCHECK(!temp1.is(temp2));
554 DCHECK(!temp1.is(temp3));
555 DCHECK(!temp2.is(temp3));
556 DCHECK(ExternalReference::math_exp_constants(0).address() != NULL);
557 DCHECK(!masm->serializer_enabled()); // External references not serializable.
558
559 Label zero, infinity, done;
560
561 __ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
562
563 __ lfd(double_scratch1, ExpConstant(0, temp3));
564 __ fcmpu(double_scratch1, input);
565 __ fmr(result, input);
566 __ bunordered(&done);
567 __ bge(&zero);
568
569 __ lfd(double_scratch2, ExpConstant(1, temp3));
570 __ fcmpu(input, double_scratch2);
571 __ bge(&infinity);
572
573 __ lfd(double_scratch1, ExpConstant(3, temp3));
574 __ lfd(result, ExpConstant(4, temp3));
575 __ fmul(double_scratch1, double_scratch1, input);
576 __ fadd(double_scratch1, double_scratch1, result);
577 __ MovDoubleLowToInt(temp2, double_scratch1);
578 __ fsub(double_scratch1, double_scratch1, result);
579 __ lfd(result, ExpConstant(6, temp3));
580 __ lfd(double_scratch2, ExpConstant(5, temp3));
581 __ fmul(double_scratch1, double_scratch1, double_scratch2);
582 __ fsub(double_scratch1, double_scratch1, input);
583 __ fsub(result, result, double_scratch1);
584 __ fmul(double_scratch2, double_scratch1, double_scratch1);
585 __ fmul(result, result, double_scratch2);
586 __ lfd(double_scratch2, ExpConstant(7, temp3));
587 __ fmul(result, result, double_scratch2);
588 __ fsub(result, result, double_scratch1);
589 __ lfd(double_scratch2, ExpConstant(8, temp3));
590 __ fadd(result, result, double_scratch2);
591 __ srwi(temp1, temp2, Operand(11));
592 __ andi(temp2, temp2, Operand(0x7ff));
593 __ addi(temp1, temp1, Operand(0x3ff));
594
595 // Must not call ExpConstant() after overwriting temp3!
596 __ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
597 __ slwi(temp2, temp2, Operand(3));
598 #if V8_TARGET_ARCH_PPC64
599 __ ldx(temp2, MemOperand(temp3, temp2));
600 __ sldi(temp1, temp1, Operand(52));
601 __ orx(temp2, temp1, temp2);
602 __ MovInt64ToDouble(double_scratch1, temp2);
603 #else
604 __ add(ip, temp3, temp2);
605 __ lwz(temp3, MemOperand(ip, Register::kExponentOffset));
606 __ lwz(temp2, MemOperand(ip, Register::kMantissaOffset));
607 __ slwi(temp1, temp1, Operand(20));
608 __ orx(temp3, temp1, temp3);
609 __ MovInt64ToDouble(double_scratch1, temp3, temp2);
610 #endif
611
612 __ fmul(result, result, double_scratch1);
613 __ b(&done);
614
615 __ bind(&zero);
616 __ fmr(result, kDoubleRegZero);
617 __ b(&done);
618
619 __ bind(&infinity);
620 __ lfd(result, ExpConstant(2, temp3));
621
622 __ bind(&done);
623 }
624
625 #undef __
626
627 CodeAgingHelper::CodeAgingHelper() {
628 DCHECK(young_sequence_.length() == kNoCodeAgeSequenceLength);
629 // Since patcher is a large object, allocate it dynamically when needed,
630 // to avoid overloading the stack in stress conditions.
631 // DONT_FLUSH is used because the CodeAgingHelper is initialized early in
632 // the process, before ARM simulator ICache is setup.
633 SmartPointer<CodePatcher> patcher(new CodePatcher(
634 young_sequence_.start(), young_sequence_.length() / Assembler::kInstrSize,
635 CodePatcher::DONT_FLUSH));
636 PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
637 patcher->masm()->PushFixedFrame(r4);
638 patcher->masm()->addi(fp, sp,
639 Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
640 for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
641 patcher->masm()->nop();
642 }
643 }
644
645
646 #ifdef DEBUG
647 bool CodeAgingHelper::IsOld(byte* candidate) const {
648 return Assembler::IsNop(Assembler::instr_at(candidate));
649 }
650 #endif
651
652
653 bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
654 bool result = isolate->code_aging_helper()->IsYoung(sequence);
655 DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
656 return result;
657 }
658
659
660 void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
661 MarkingParity* parity) {
662 if (IsYoungSequence(isolate, sequence)) {
663 *age = kNoAgeCodeAge;
664 *parity = NO_MARKING_PARITY;
665 } else {
666 ConstantPoolArray* constant_pool = NULL;
667 Address target_address = Assembler::target_address_at(
668 sequence + kCodeAgingTargetDelta, constant_pool);
669 Code* stub = GetCodeFromTargetAddress(target_address);
670 GetCodeAgeAndParity(stub, age, parity);
671 }
672 }
673
674
675 void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
676 MarkingParity parity) {
677 uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
678 if (age == kNoAgeCodeAge) {
679 isolate->code_aging_helper()->CopyYoungSequenceTo(sequence);
680 CpuFeatures::FlushICache(sequence, young_length);
681 } else {
682 // FIXED_SEQUENCE
683 Code* stub = GetCodeAgeStub(isolate, age, parity);
684 CodePatcher patcher(sequence, young_length / Assembler::kInstrSize);
685 Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
686 intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
687 // Don't use Call -- we need to preserve ip and lr.
688 // GenerateMakeCodeYoungAgainCommon for the stub code.
689 patcher.masm()->nop(); // marker to detect sequence (see IsOld)
690 patcher.masm()->mov(r3, Operand(target));
691 patcher.masm()->Jump(r3);
692 for (int i = 0; i < kCodeAgingSequenceNops; i++) {
693 patcher.masm()->nop();
694 }
695 }
696 }
697 }
698 } // namespace v8::internal
699
700 #endif // V8_TARGET_ARCH_PPC
OLDNEW
« no previous file with comments | « src/ppc/codegen-ppc.h ('k') | src/ppc/constants-ppc.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698