Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(456)

Side by Side Diff: src/a64/codegen-a64.cc

Issue 148293020: Merge experimental/a64 to bleeding_edge. (Closed) Base URL: https://v8.googlecode.com/svn/branches/bleeding_edge
Patch Set: Remove ARM from OWNERS Created 6 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch | Annotate | Revision Log
« no previous file with comments | « src/a64/codegen-a64.h ('k') | src/a64/constants-a64.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
(Empty)
1 // Copyright 2013 the V8 project authors. All rights reserved.
2 // Redistribution and use in source and binary forms, with or without
3 // modification, are permitted provided that the following conditions are
4 // met:
5 //
6 // * Redistributions of source code must retain the above copyright
7 // notice, this list of conditions and the following disclaimer.
8 // * Redistributions in binary form must reproduce the above
9 // copyright notice, this list of conditions and the following
10 // disclaimer in the documentation and/or other materials provided
11 // with the distribution.
12 // * Neither the name of Google Inc. nor the names of its
13 // contributors may be used to endorse or promote products derived
14 // from this software without specific prior written permission.
15 //
16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27
28 #include "v8.h"
29
30 #if V8_TARGET_ARCH_A64
31
32 #include "codegen.h"
33 #include "macro-assembler.h"
34 #include "simulator-a64.h"
35
36 namespace v8 {
37 namespace internal {
38
39 #define __ ACCESS_MASM(masm)
40
41 #if defined(USE_SIMULATOR)
42 byte* fast_exp_a64_machine_code = NULL;
43 double fast_exp_simulator(double x) {
44 Simulator * simulator = Simulator::current(Isolate::Current());
45 return simulator->CallDouble(fast_exp_a64_machine_code,
46 Simulator::CallArgument(x),
47 Simulator::CallArgument::End());
48 }
49 #endif
50
51
52 UnaryMathFunction CreateExpFunction() {
53 if (!FLAG_fast_math) return &std::exp;
54
55 // Use the Math.exp implemetation in MathExpGenerator::EmitMathExp() to create
56 // an AAPCS64-compliant exp() function. This will be faster than the C
57 // library's exp() function, but probably less accurate.
58 size_t actual_size;
59 byte* buffer = static_cast<byte*>(OS::Allocate(1 * KB, &actual_size, true));
60 if (buffer == NULL) return &std::exp;
61
62 ExternalReference::InitializeMathExpData();
63 MacroAssembler masm(NULL, buffer, static_cast<int>(actual_size));
64 masm.SetStackPointer(csp);
65
66 // The argument will be in d0 on entry.
67 DoubleRegister input = d0;
68 // Use other caller-saved registers for all other values.
69 DoubleRegister result = d1;
70 DoubleRegister double_temp1 = d2;
71 DoubleRegister double_temp2 = d3;
72 Register temp1 = x10;
73 Register temp2 = x11;
74 Register temp3 = x12;
75
76 MathExpGenerator::EmitMathExp(&masm, input, result,
77 double_temp1, double_temp2,
78 temp1, temp2, temp3);
79 // Move the result to the return register.
80 masm.Fmov(d0, result);
81 masm.Ret();
82
83 CodeDesc desc;
84 masm.GetCode(&desc);
85 ASSERT(!RelocInfo::RequiresRelocation(desc));
86
87 CPU::FlushICache(buffer, actual_size);
88 OS::ProtectCode(buffer, actual_size);
89
90 #if !defined(USE_SIMULATOR)
91 return FUNCTION_CAST<UnaryMathFunction>(buffer);
92 #else
93 fast_exp_a64_machine_code = buffer;
94 return &fast_exp_simulator;
95 #endif
96 }
97
98
99 UnaryMathFunction CreateSqrtFunction() {
100 return &std::sqrt;
101 }
102
103
104 // -------------------------------------------------------------------------
105 // Platform-specific RuntimeCallHelper functions.
106
107 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
108 masm->EnterFrame(StackFrame::INTERNAL);
109 ASSERT(!masm->has_frame());
110 masm->set_has_frame(true);
111 }
112
113
114 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
115 masm->LeaveFrame(StackFrame::INTERNAL);
116 ASSERT(masm->has_frame());
117 masm->set_has_frame(false);
118 }
119
120
121 // -------------------------------------------------------------------------
122 // Code generators
123
124 void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
125 MacroAssembler* masm, AllocationSiteMode mode,
126 Label* allocation_memento_found) {
127 // ----------- S t a t e -------------
128 // -- x2 : receiver
129 // -- x3 : target map
130 // -----------------------------------
131 Register receiver = x2;
132 Register map = x3;
133
134 if (mode == TRACK_ALLOCATION_SITE) {
135 ASSERT(allocation_memento_found != NULL);
136 __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11,
137 allocation_memento_found);
138 }
139
140 // Set transitioned map.
141 __ Str(map, FieldMemOperand(receiver, HeapObject::kMapOffset));
142 __ RecordWriteField(receiver,
143 HeapObject::kMapOffset,
144 map,
145 x10,
146 kLRHasNotBeenSaved,
147 kDontSaveFPRegs,
148 EMIT_REMEMBERED_SET,
149 OMIT_SMI_CHECK);
150 }
151
152
153 void ElementsTransitionGenerator::GenerateSmiToDouble(
154 MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
155 ASM_LOCATION("ElementsTransitionGenerator::GenerateSmiToDouble");
156 // ----------- S t a t e -------------
157 // -- lr : return address
158 // -- x0 : value
159 // -- x1 : key
160 // -- x2 : receiver
161 // -- x3 : target map, scratch for subsequent call
162 // -----------------------------------
163 Register receiver = x2;
164 Register target_map = x3;
165
166 Label gc_required, only_change_map;
167
168 if (mode == TRACK_ALLOCATION_SITE) {
169 __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
170 }
171
172 // Check for empty arrays, which only require a map transition and no changes
173 // to the backing store.
174 Register elements = x4;
175 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
176 __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
177
178 __ Push(lr);
179 Register length = x5;
180 __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
181 FixedArray::kLengthOffset));
182
183 // Allocate new FixedDoubleArray.
184 Register array_size = x6;
185 Register array = x7;
186 __ Lsl(array_size, length, kDoubleSizeLog2);
187 __ Add(array_size, array_size, FixedDoubleArray::kHeaderSize);
188 __ Allocate(array_size, array, x10, x11, &gc_required, DOUBLE_ALIGNMENT);
189 // Register array is non-tagged heap object.
190
191 // Set the destination FixedDoubleArray's length and map.
192 Register map_root = x6;
193 __ LoadRoot(map_root, Heap::kFixedDoubleArrayMapRootIndex);
194 __ SmiTag(x11, length);
195 __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
196 __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
197
198 __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
199 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
200 kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
201 OMIT_SMI_CHECK);
202
203 // Replace receiver's backing store with newly created FixedDoubleArray.
204 __ Add(x10, array, kHeapObjectTag);
205 __ Str(x10, FieldMemOperand(receiver, JSObject::kElementsOffset));
206 __ RecordWriteField(receiver, JSObject::kElementsOffset, x10,
207 x6, kLRHasBeenSaved, kDontSaveFPRegs,
208 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
209
210 // Prepare for conversion loop.
211 Register src_elements = x10;
212 Register dst_elements = x11;
213 Register dst_end = x12;
214 __ Add(src_elements, elements, FixedArray::kHeaderSize - kHeapObjectTag);
215 __ Add(dst_elements, array, FixedDoubleArray::kHeaderSize);
216 __ Add(dst_end, dst_elements, Operand(length, LSL, kDoubleSizeLog2));
217
218 FPRegister nan_d = d1;
219 __ Fmov(nan_d, rawbits_to_double(kHoleNanInt64));
220
221 Label entry, done;
222 __ B(&entry);
223
224 __ Bind(&only_change_map);
225 __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
226 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x6,
227 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
228 OMIT_SMI_CHECK);
229 __ B(&done);
230
231 // Call into runtime if GC is required.
232 __ Bind(&gc_required);
233 __ Pop(lr);
234 __ B(fail);
235
236 // Iterate over the array, copying and coverting smis to doubles. If an
237 // element is non-smi, write a hole to the destination.
238 {
239 Label loop;
240 __ Bind(&loop);
241 __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
242 __ SmiUntagToDouble(d0, x13, kSpeculativeUntag);
243 __ Tst(x13, kSmiTagMask);
244 __ Fcsel(d0, d0, nan_d, eq);
245 __ Str(d0, MemOperand(dst_elements, kDoubleSize, PostIndex));
246
247 __ Bind(&entry);
248 __ Cmp(dst_elements, dst_end);
249 __ B(lt, &loop);
250 }
251
252 __ Pop(lr);
253 __ Bind(&done);
254 }
255
256
257 void ElementsTransitionGenerator::GenerateDoubleToObject(
258 MacroAssembler* masm, AllocationSiteMode mode, Label* fail) {
259 ASM_LOCATION("ElementsTransitionGenerator::GenerateDoubleToObject");
260 // ----------- S t a t e -------------
261 // -- x0 : value
262 // -- x1 : key
263 // -- x2 : receiver
264 // -- lr : return address
265 // -- x3 : target map, scratch for subsequent call
266 // -- x4 : scratch (elements)
267 // -----------------------------------
268 Register value = x0;
269 Register key = x1;
270 Register receiver = x2;
271 Register target_map = x3;
272
273 if (mode == TRACK_ALLOCATION_SITE) {
274 __ JumpIfJSArrayHasAllocationMemento(receiver, x10, x11, fail);
275 }
276
277 // Check for empty arrays, which only require a map transition and no changes
278 // to the backing store.
279 Label only_change_map;
280 Register elements = x4;
281 __ Ldr(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
282 __ JumpIfRoot(elements, Heap::kEmptyFixedArrayRootIndex, &only_change_map);
283
284 __ Push(lr);
285 // TODO(all): These registers may not need to be pushed. Examine
286 // RecordWriteStub and check whether it's needed.
287 __ Push(target_map, receiver, key, value);
288 Register length = x5;
289 __ Ldrsw(length, UntagSmiFieldMemOperand(elements,
290 FixedArray::kLengthOffset));
291
292 // Allocate new FixedArray.
293 Register array_size = x6;
294 Register array = x7;
295 Label gc_required;
296 __ Mov(array_size, FixedDoubleArray::kHeaderSize);
297 __ Add(array_size, array_size, Operand(length, LSL, kPointerSizeLog2));
298 __ Allocate(array_size, array, x10, x11, &gc_required, NO_ALLOCATION_FLAGS);
299
300 // Set destination FixedDoubleArray's length and map.
301 Register map_root = x6;
302 __ LoadRoot(map_root, Heap::kFixedArrayMapRootIndex);
303 __ SmiTag(x11, length);
304 __ Str(x11, MemOperand(array, FixedDoubleArray::kLengthOffset));
305 __ Str(map_root, MemOperand(array, HeapObject::kMapOffset));
306
307 // Prepare for conversion loop.
308 Register src_elements = x10;
309 Register dst_elements = x11;
310 Register dst_end = x12;
311 __ Add(src_elements, elements,
312 FixedDoubleArray::kHeaderSize - kHeapObjectTag);
313 __ Add(dst_elements, array, FixedArray::kHeaderSize);
314 __ Add(array, array, kHeapObjectTag);
315 __ Add(dst_end, dst_elements, Operand(length, LSL, kPointerSizeLog2));
316
317 Register the_hole = x14;
318 Register heap_num_map = x15;
319 __ LoadRoot(the_hole, Heap::kTheHoleValueRootIndex);
320 __ LoadRoot(heap_num_map, Heap::kHeapNumberMapRootIndex);
321
322 Label entry;
323 __ B(&entry);
324
325 // Call into runtime if GC is required.
326 __ Bind(&gc_required);
327 __ Pop(value, key, receiver, target_map);
328 __ Pop(lr);
329 __ B(fail);
330
331 {
332 Label loop, convert_hole;
333 __ Bind(&loop);
334 __ Ldr(x13, MemOperand(src_elements, kPointerSize, PostIndex));
335 __ Cmp(x13, kHoleNanInt64);
336 __ B(eq, &convert_hole);
337
338 // Non-hole double, copy value into a heap number.
339 Register heap_num = x5;
340 __ AllocateHeapNumber(heap_num, &gc_required, x6, x4, heap_num_map);
341 __ Str(x13, FieldMemOperand(heap_num, HeapNumber::kValueOffset));
342 __ Mov(x13, dst_elements);
343 __ Str(heap_num, MemOperand(dst_elements, kPointerSize, PostIndex));
344 __ RecordWrite(array, x13, heap_num, kLRHasBeenSaved, kDontSaveFPRegs,
345 EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
346
347 __ B(&entry);
348
349 // Replace the-hole NaN with the-hole pointer.
350 __ Bind(&convert_hole);
351 __ Str(the_hole, MemOperand(dst_elements, kPointerSize, PostIndex));
352
353 __ Bind(&entry);
354 __ Cmp(dst_elements, dst_end);
355 __ B(lt, &loop);
356 }
357
358 __ Pop(value, key, receiver, target_map);
359 // Replace receiver's backing store with newly created and filled FixedArray.
360 __ Str(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
361 __ RecordWriteField(receiver, JSObject::kElementsOffset, array, x13,
362 kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
363 OMIT_SMI_CHECK);
364 __ Pop(lr);
365
366 __ Bind(&only_change_map);
367 __ Str(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
368 __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, x13,
369 kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
370 OMIT_SMI_CHECK);
371 }
372
373
374 bool Code::IsYoungSequence(byte* sequence) {
375 return MacroAssembler::IsYoungSequence(sequence);
376 }
377
378
379 void Code::GetCodeAgeAndParity(byte* sequence, Age* age,
380 MarkingParity* parity) {
381 if (IsYoungSequence(sequence)) {
382 *age = kNoAgeCodeAge;
383 *parity = NO_MARKING_PARITY;
384 } else {
385 byte* target = sequence + kCodeAgeStubEntryOffset;
386 Code* stub = GetCodeFromTargetAddress(Memory::Address_at(target));
387 GetCodeAgeAndParity(stub, age, parity);
388 }
389 }
390
391
392 void Code::PatchPlatformCodeAge(Isolate* isolate,
393 byte* sequence,
394 Code::Age age,
395 MarkingParity parity) {
396 PatchingAssembler patcher(sequence, kCodeAgeSequenceSize / kInstructionSize);
397 if (age == kNoAgeCodeAge) {
398 MacroAssembler::EmitFrameSetupForCodeAgePatching(&patcher);
399 } else {
400 Code * stub = GetCodeAgeStub(isolate, age, parity);
401 MacroAssembler::EmitCodeAgeSequence(&patcher, stub);
402 }
403 }
404
405
406 void StringCharLoadGenerator::Generate(MacroAssembler* masm,
407 Register string,
408 Register index,
409 Register result,
410 Label* call_runtime) {
411 // Fetch the instance type of the receiver into result register.
412 __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
413 __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
414
415 // We need special handling for indirect strings.
416 Label check_sequential;
417 __ TestAndBranchIfAllClear(result, kIsIndirectStringMask, &check_sequential);
418
419 // Dispatch on the indirect string shape: slice or cons.
420 Label cons_string;
421 __ TestAndBranchIfAllClear(result, kSlicedNotConsMask, &cons_string);
422
423 // Handle slices.
424 Label indirect_string_loaded;
425 __ Ldrsw(result,
426 UntagSmiFieldMemOperand(string, SlicedString::kOffsetOffset));
427 __ Ldr(string, FieldMemOperand(string, SlicedString::kParentOffset));
428 __ Add(index, index, result);
429 __ B(&indirect_string_loaded);
430
431 // Handle cons strings.
432 // Check whether the right hand side is the empty string (i.e. if
433 // this is really a flat string in a cons string). If that is not
434 // the case we would rather go to the runtime system now to flatten
435 // the string.
436 __ Bind(&cons_string);
437 __ Ldr(result, FieldMemOperand(string, ConsString::kSecondOffset));
438 __ JumpIfNotRoot(result, Heap::kempty_stringRootIndex, call_runtime);
439 // Get the first of the two strings and load its instance type.
440 __ Ldr(string, FieldMemOperand(string, ConsString::kFirstOffset));
441
442 __ Bind(&indirect_string_loaded);
443 __ Ldr(result, FieldMemOperand(string, HeapObject::kMapOffset));
444 __ Ldrb(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
445
446 // Distinguish sequential and external strings. Only these two string
447 // representations can reach here (slices and flat cons strings have been
448 // reduced to the underlying sequential or external string).
449 Label external_string, check_encoding;
450 __ Bind(&check_sequential);
451 STATIC_ASSERT(kSeqStringTag == 0);
452 __ TestAndBranchIfAnySet(result, kStringRepresentationMask, &external_string);
453
454 // Prepare sequential strings
455 STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
456 __ Add(string, string, SeqTwoByteString::kHeaderSize - kHeapObjectTag);
457 __ B(&check_encoding);
458
459 // Handle external strings.
460 __ Bind(&external_string);
461 if (FLAG_debug_code) {
462 // Assert that we do not have a cons or slice (indirect strings) here.
463 // Sequential strings have already been ruled out.
464 __ Tst(result, kIsIndirectStringMask);
465 __ Assert(eq, kExternalStringExpectedButNotFound);
466 }
467 // Rule out short external strings.
468 STATIC_CHECK(kShortExternalStringTag != 0);
469 // TestAndBranchIfAnySet can emit Tbnz. Do not use it because call_runtime
470 // can be bound far away in deferred code.
471 __ Tst(result, kShortExternalStringMask);
472 __ B(ne, call_runtime);
473 __ Ldr(string, FieldMemOperand(string, ExternalString::kResourceDataOffset));
474
475 Label ascii, done;
476 __ Bind(&check_encoding);
477 STATIC_ASSERT(kTwoByteStringTag == 0);
478 __ TestAndBranchIfAnySet(result, kStringEncodingMask, &ascii);
479 // Two-byte string.
480 __ Ldrh(result, MemOperand(string, index, LSL, 1));
481 __ B(&done);
482 __ Bind(&ascii);
483 // Ascii string.
484 __ Ldrb(result, MemOperand(string, index));
485 __ Bind(&done);
486 }
487
488
489 static MemOperand ExpConstant(Register base, int index) {
490 return MemOperand(base, index * kDoubleSize);
491 }
492
493
494 void MathExpGenerator::EmitMathExp(MacroAssembler* masm,
495 DoubleRegister input,
496 DoubleRegister result,
497 DoubleRegister double_temp1,
498 DoubleRegister double_temp2,
499 Register temp1,
500 Register temp2,
501 Register temp3) {
502 // TODO(jbramley): There are several instances where fnmsub could be used
503 // instead of fmul and fsub. Doing this changes the result, but since this is
504 // an estimation anyway, does it matter?
505
506 ASSERT(!AreAliased(input, result,
507 double_temp1, double_temp2,
508 temp1, temp2, temp3));
509 ASSERT(ExternalReference::math_exp_constants(0).address() != NULL);
510
511 Label done;
512 DoubleRegister double_temp3 = result;
513 Register constants = temp3;
514
515 // The algorithm used relies on some magic constants which are initialized in
516 // ExternalReference::InitializeMathExpData().
517
518 // Load the address of the start of the array.
519 __ Mov(constants, Operand(ExternalReference::math_exp_constants(0)));
520
521 // We have to do a four-way split here:
522 // - If input <= about -708.4, the output always rounds to zero.
523 // - If input >= about 709.8, the output always rounds to +infinity.
524 // - If the input is NaN, the output is NaN.
525 // - Otherwise, the result needs to be calculated.
526 Label result_is_finite_non_zero;
527 // Assert that we can load offset 0 (the small input threshold) and offset 1
528 // (the large input threshold) with a single ldp.
529 ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 1).offset() -
530 ExpConstant(constants, 0).offset()));
531 __ Ldp(double_temp1, double_temp2, ExpConstant(constants, 0));
532
533 __ Fcmp(input, double_temp1);
534 __ Fccmp(input, double_temp2, NoFlag, hi);
535 // At this point, the condition flags can be in one of five states:
536 // NZCV
537 // 1000 -708.4 < input < 709.8 result = exp(input)
538 // 0110 input == 709.8 result = +infinity
539 // 0010 input > 709.8 result = +infinity
540 // 0011 input is NaN result = input
541 // 0000 input <= -708.4 result = +0.0
542
543 // Continue the common case first. 'mi' tests N == 1.
544 __ B(&result_is_finite_non_zero, mi);
545
546 // TODO(jbramley): Add (and use) a zero D register for A64.
547 // TODO(jbramley): Consider adding a +infinity register for A64.
548 __ Ldr(double_temp2, ExpConstant(constants, 2)); // Synthesize +infinity.
549 __ Fsub(double_temp1, double_temp1, double_temp1); // Synthesize +0.0.
550
551 // Select between +0.0 and +infinity. 'lo' tests C == 0.
552 __ Fcsel(result, double_temp1, double_temp2, lo);
553 // Select between {+0.0 or +infinity} and input. 'vc' tests V == 0.
554 __ Fcsel(result, result, input, vc);
555 __ B(&done);
556
557 // The rest is magic, as described in InitializeMathExpData().
558 __ Bind(&result_is_finite_non_zero);
559
560 // Assert that we can load offset 3 and offset 4 with a single ldp.
561 ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 4).offset() -
562 ExpConstant(constants, 3).offset()));
563 __ Ldp(double_temp1, double_temp3, ExpConstant(constants, 3));
564 __ Fmadd(double_temp1, double_temp1, input, double_temp3);
565 __ Fmov(temp2.W(), double_temp1.S());
566 __ Fsub(double_temp1, double_temp1, double_temp3);
567
568 // Assert that we can load offset 5 and offset 6 with a single ldp.
569 ASSERT(kDRegSizeInBytes == (ExpConstant(constants, 6).offset() -
570 ExpConstant(constants, 5).offset()));
571 __ Ldp(double_temp2, double_temp3, ExpConstant(constants, 5));
572 // TODO(jbramley): Consider using Fnmsub here.
573 __ Fmul(double_temp1, double_temp1, double_temp2);
574 __ Fsub(double_temp1, double_temp1, input);
575
576 __ Fmul(double_temp2, double_temp1, double_temp1);
577 __ Fsub(double_temp3, double_temp3, double_temp1);
578 __ Fmul(double_temp3, double_temp3, double_temp2);
579
580 __ Mov(temp1.W(), Operand(temp2.W(), LSR, 11));
581
582 __ Ldr(double_temp2, ExpConstant(constants, 7));
583 // TODO(jbramley): Consider using Fnmsub here.
584 __ Fmul(double_temp3, double_temp3, double_temp2);
585 __ Fsub(double_temp3, double_temp3, double_temp1);
586
587 // The 8th constant is 1.0, so use an immediate move rather than a load.
588 // We can't generate a runtime assertion here as we would need to call Abort
589 // in the runtime and we don't have an Isolate when we generate this code.
590 __ Fmov(double_temp2, 1.0);
591 __ Fadd(double_temp3, double_temp3, double_temp2);
592
593 __ And(temp2, temp2, 0x7ff);
594 __ Add(temp1, temp1, 0x3ff);
595
596 // Do the final table lookup.
597 __ Mov(temp3, Operand(ExternalReference::math_exp_log_table()));
598
599 __ Add(temp3, temp3, Operand(temp2, LSL, kDRegSizeInBytesLog2));
600 __ Ldp(temp2.W(), temp3.W(), MemOperand(temp3));
601 __ Orr(temp1.W(), temp3.W(), Operand(temp1.W(), LSL, 20));
602 __ Bfi(temp2, temp1, 32, 32);
603 __ Fmov(double_temp1, temp2);
604
605 __ Fmul(result, double_temp3, double_temp1);
606
607 __ Bind(&done);
608 }
609
610 #undef __
611
612 } } // namespace v8::internal
613
614 #endif // V8_TARGET_ARCH_A64
OLDNEW
« no previous file with comments | « src/a64/codegen-a64.h ('k') | src/a64/constants-a64.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698