Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(191)

Unified Diff: src/s390/codegen-s390.cc

Issue 1725243004: S390: Initial impl of S390 asm, masm, code-stubs,... (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Updated BUILD.gn + cpu-s390.cc to addr @jochen's comments. Created 4 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View side-by-side diff with in-line comments
Download patch
Index: src/s390/codegen-s390.cc
diff --git a/src/ppc/codegen-ppc.cc b/src/s390/codegen-s390.cc
similarity index 66%
copy from src/ppc/codegen-ppc.cc
copy to src/s390/codegen-s390.cc
index d6d86b0fccb70a45c84e8c6917e7a8db761d4662..a91716c6130f32a52004fba8d6d1b7f00e95f030 100644
--- a/src/ppc/codegen-ppc.cc
+++ b/src/s390/codegen-s390.cc
@@ -1,31 +1,28 @@
-// Copyright 2014 the V8 project authors. All rights reserved.
+// Copyright 2015 the V8 project authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.
-#include "src/ppc/codegen-ppc.h"
+#include "src/s390/codegen-s390.h"
-#if V8_TARGET_ARCH_PPC
+#if V8_TARGET_ARCH_S390
#include "src/codegen.h"
#include "src/macro-assembler.h"
-#include "src/ppc/simulator-ppc.h"
+#include "src/s390/simulator-s390.h"
namespace v8 {
namespace internal {
-
#define __ masm.
-
#if defined(USE_SIMULATOR)
-byte* fast_exp_ppc_machine_code = nullptr;
+byte* fast_exp_s390_machine_code = nullptr;
double fast_exp_simulator(double x, Isolate* isolate) {
- return Simulator::current(isolate)
- ->CallFPReturnsDouble(fast_exp_ppc_machine_code, x, 0);
+ return Simulator::current(isolate)->CallFPReturnsDouble(
+ fast_exp_s390_machine_code, x, 0);
}
#endif
-
UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
size_t actual_size;
byte* buffer =
@@ -37,22 +34,19 @@ UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
CodeObjectRequired::kNo);
{
- DoubleRegister input = d1;
+ DoubleRegister input = d0;
DoubleRegister result = d2;
DoubleRegister double_scratch1 = d3;
DoubleRegister double_scratch2 = d4;
- Register temp1 = r7;
- Register temp2 = r8;
- Register temp3 = r9;
-
-// Called from C
- __ function_descriptor();
+ Register temp1 = r6;
+ Register temp2 = r7;
+ Register temp3 = r8;
__ Push(temp3, temp2, temp1);
MathExpGenerator::EmitMathExp(&masm, input, result, double_scratch1,
double_scratch2, temp1, temp2, temp3);
__ Pop(temp3, temp2, temp1);
- __ fmr(d1, result);
+ __ ldr(d0, result);
__ Ret();
}
@@ -66,12 +60,11 @@ UnaryMathFunctionWithIsolate CreateExpFunction(Isolate* isolate) {
#if !defined(USE_SIMULATOR)
return FUNCTION_CAST<UnaryMathFunctionWithIsolate>(buffer);
#else
- fast_exp_ppc_machine_code = buffer;
+ fast_exp_s390_machine_code = buffer;
return &fast_exp_simulator;
#endif
}
-
UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#if defined(USE_SIMULATOR)
return nullptr;
@@ -84,12 +77,9 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
MacroAssembler masm(isolate, buffer, static_cast<int>(actual_size),
CodeObjectRequired::kNo);
-// Called from C
- __ function_descriptor();
-
- __ MovFromFloatParameter(d1);
- __ fsqrt(d1, d1);
- __ MovToFloatResult(d1);
+ __ MovFromFloatParameter(d0);
+ __ sqdbr(d0, d0);
+ __ MovToFloatResult(d0);
__ Ret();
CodeDesc desc;
@@ -104,7 +94,6 @@ UnaryMathFunctionWithIsolate CreateSqrtFunction(Isolate* isolate) {
#undef __
-
// -------------------------------------------------------------------------
// Platform-specific RuntimeCallHelper functions.
@@ -114,14 +103,12 @@ void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const {
masm->set_has_frame(true);
}
-
void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const {
masm->LeaveFrame(StackFrame::INTERNAL);
DCHECK(masm->has_frame());
masm->set_has_frame(false);
}
-
// -------------------------------------------------------------------------
// Code generators
@@ -131,7 +118,7 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
MacroAssembler* masm, Register receiver, Register key, Register value,
Register target_map, AllocationSiteMode mode,
Label* allocation_memento_found) {
- Register scratch_elements = r7;
+ Register scratch_elements = r6;
DCHECK(!AreAliased(receiver, key, value, target_map, scratch_elements));
if (mode == TRACK_ALLOCATION_SITE) {
@@ -141,28 +128,25 @@ void ElementsTransitionGenerator::GenerateMapChangeElementsTransition(
}
// Set transitioned map.
- __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
- __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r11,
+ __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
+ __ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, r1,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
-
void ElementsTransitionGenerator::GenerateSmiToDouble(
MacroAssembler* masm, Register receiver, Register key, Register value,
Register target_map, AllocationSiteMode mode, Label* fail) {
// lr contains the return address
- Label loop, entry, convert_hole, only_change_map, done;
- Register elements = r7;
- Register length = r8;
- Register array = r9;
+ Label loop, entry, convert_hole, gc_required, only_change_map, done;
+ Register elements = r6;
+ Register length = r7;
+ Register array = r8;
Register array_end = array;
// target_map parameter can be clobbered.
Register scratch1 = target_map;
- Register scratch2 = r10;
- Register scratch3 = r11;
- Register scratch4 = r14;
+ Register scratch2 = r1;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map, elements, length, array,
@@ -176,17 +160,18 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// to the backing store.
__ LoadP(elements, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ CompareRoot(elements, Heap::kEmptyFixedArrayRootIndex);
- __ beq(&only_change_map);
+ __ beq(&only_change_map, Label::kNear);
+
+ // Preserve lr and use r14 as a temporary register.
+ __ push(r14);
__ LoadP(length, FieldMemOperand(elements, FixedArray::kLengthOffset));
// length: number of elements (smi-tagged)
// Allocate new FixedDoubleArray.
- __ SmiToDoubleArrayOffset(scratch3, length);
- __ addi(scratch3, scratch3, Operand(FixedDoubleArray::kHeaderSize));
- __ Allocate(scratch3, array, scratch4, scratch2, fail, DOUBLE_ALIGNMENT);
- // array: destination FixedDoubleArray, not tagged as heap object.
- // elements: source FixedArray.
+ __ SmiToDoubleArrayOffset(r14, length);
+ __ AddP(r14, Operand(FixedDoubleArray::kHeaderSize));
+ __ Allocate(r14, array, r9, scratch2, &gc_required, DOUBLE_ALIGNMENT);
// Set destination FixedDoubleArray's length and map.
__ LoadRoot(scratch2, Heap::kFixedDoubleArrayMapRootIndex);
@@ -194,32 +179,29 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// Update receiver's map.
__ StoreP(scratch2, MemOperand(array, HeapObject::kMapOffset));
- __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
+ __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
- kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
+ kLRHasBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Replace receiver's backing store with newly created FixedDoubleArray.
- __ addi(scratch1, array, Operand(kHeapObjectTag));
- __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
+ __ AddP(scratch1, array, Operand(kHeapObjectTag));
+ __ StoreP(scratch1, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver, JSObject::kElementsOffset, scratch1, scratch2,
- kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
+ kLRHasBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
// Prepare for conversion loop.
- __ addi(scratch1, elements,
+ __ AddP(target_map, elements,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ addi(scratch2, array, Operand(FixedDoubleArray::kHeaderSize));
- __ SmiToDoubleArrayOffset(array_end, length);
- __ add(array_end, scratch2, array_end);
+ __ AddP(r9, array, Operand(FixedDoubleArray::kHeaderSize));
+ __ SmiToDoubleArrayOffset(array, length);
+ __ AddP(array_end, r9, array);
// Repurpose registers no longer in use.
-#if V8_TARGET_ARCH_PPC64
+#if V8_TARGET_ARCH_S390X
Register hole_int64 = elements;
- __ mov(hole_int64, Operand(kHoleNanInt64));
#else
Register hole_lower = elements;
Register hole_upper = length;
- __ mov(hole_lower, Operand(kHoleNanLower32));
- __ mov(hole_upper, Operand(kHoleNanUpper32));
#endif
// scratch1: begin of source FixedArray element fields, not tagged
// hole_lower: kHoleNanLower32 OR hol_int64
@@ -227,62 +209,67 @@ void ElementsTransitionGenerator::GenerateSmiToDouble(
// array_end: end of destination FixedDoubleArray, not tagged
// scratch2: begin of FixedDoubleArray element fields, not tagged
- __ b(&entry);
+ __ b(&entry, Label::kNear);
__ bind(&only_change_map);
- __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
+ __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch2,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
- __ b(&done);
+ __ b(&done, Label::kNear);
+
+ // Call into runtime if GC is required.
+ __ bind(&gc_required);
+ __ pop(r14);
+ __ b(fail);
// Convert and copy elements.
__ bind(&loop);
- __ LoadP(scratch3, MemOperand(scratch1));
- __ addi(scratch1, scratch1, Operand(kPointerSize));
- // scratch3: current element
- __ UntagAndJumpIfNotSmi(scratch3, scratch3, &convert_hole);
+ __ LoadP(r14, MemOperand(scratch1));
+ __ la(scratch1, MemOperand(scratch1, kPointerSize));
+ // r1: current element
+ __ UntagAndJumpIfNotSmi(r14, r14, &convert_hole);
// Normal smi, convert to double and store.
- __ ConvertIntToDouble(scratch3, d0);
- __ stfd(d0, MemOperand(scratch2, 0));
- __ addi(scratch2, scratch2, Operand(8));
- __ b(&entry);
+ __ ConvertIntToDouble(r14, d0);
+ __ StoreDouble(d0, MemOperand(r9, 0));
+ __ la(r9, MemOperand(r9, 8));
+
+ __ b(&entry, Label::kNear);
// Hole found, store the-hole NaN.
__ bind(&convert_hole);
if (FLAG_debug_code) {
- __ LoadP(scratch3, MemOperand(scratch1, -kPointerSize));
- __ CompareRoot(scratch3, Heap::kTheHoleValueRootIndex);
+ // Restore a "smi-untagged" heap object.
+ __ LoadP(r1, MemOperand(r5, -kPointerSize));
+ __ CompareRoot(r1, Heap::kTheHoleValueRootIndex);
__ Assert(eq, kObjectFoundInSmiOnlyArray);
}
-#if V8_TARGET_ARCH_PPC64
- __ std(hole_int64, MemOperand(scratch2, 0));
+#if V8_TARGET_ARCH_S390X
+ __ stg(hole_int64, MemOperand(r9, 0));
#else
- __ stw(hole_upper, MemOperand(scratch2, Register::kExponentOffset));
- __ stw(hole_lower, MemOperand(scratch2, Register::kMantissaOffset));
+ __ StoreW(hole_upper, MemOperand(r9, Register::kExponentOffset));
+ __ StoreW(hole_lower, MemOperand(r9, Register::kMantissaOffset));
#endif
- __ addi(scratch2, scratch2, Operand(8));
+ __ AddP(r9, Operand(8));
__ bind(&entry);
- __ cmp(scratch2, array_end);
+ __ CmpP(r9, array_end);
__ blt(&loop);
+ __ pop(r14);
__ bind(&done);
}
-
void ElementsTransitionGenerator::GenerateDoubleToObject(
MacroAssembler* masm, Register receiver, Register key, Register value,
Register target_map, AllocationSiteMode mode, Label* fail) {
// Register lr contains the return address.
Label loop, convert_hole, gc_required, only_change_map;
- Register elements = r7;
- Register array = r9;
- Register length = r8;
- Register scratch = r10;
- Register scratch3 = r11;
- Register hole_value = r14;
+ Register elements = r6;
+ Register array = r8;
+ Register length = r7;
+ Register scratch = r1;
// Verify input registers don't conflict with locals.
DCHECK(!AreAliased(receiver, key, value, target_map, elements, array, length,
@@ -308,9 +295,9 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// stack.
Register array_size = value;
Register allocate_scratch = target_map;
- __ li(array_size, Operand(FixedDoubleArray::kHeaderSize));
+ __ LoadImmP(array_size, Operand(FixedDoubleArray::kHeaderSize));
__ SmiToPtrArrayOffset(r0, length);
- __ add(array_size, array_size, r0);
+ __ AddP(array_size, r0);
__ Allocate(array_size, array, allocate_scratch, scratch, &gc_required,
NO_ALLOCATION_FLAGS);
// array: destination FixedArray, not tagged as heap object
@@ -318,35 +305,35 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
__ LoadRoot(scratch, Heap::kFixedArrayMapRootIndex);
__ StoreP(length, MemOperand(array, FixedDoubleArray::kLengthOffset));
__ StoreP(scratch, MemOperand(array, HeapObject::kMapOffset));
- __ addi(array, array, Operand(kHeapObjectTag));
+ __ AddP(array, Operand(kHeapObjectTag));
// Prepare for conversion loop.
Register src_elements = elements;
Register dst_elements = target_map;
Register dst_end = length;
Register heap_number_map = scratch;
- __ addi(src_elements, elements,
+ __ AddP(src_elements,
Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag));
__ SmiToPtrArrayOffset(length, length);
- __ LoadRoot(hole_value, Heap::kTheHoleValueRootIndex);
+ __ LoadRoot(r9, Heap::kTheHoleValueRootIndex);
Label initialization_loop, loop_done;
- __ ShiftRightImm(r0, length, Operand(kPointerSizeLog2), SetRC);
- __ beq(&loop_done, cr0);
+ __ ShiftRightP(r0, length, Operand(kPointerSizeLog2));
+ __ beq(&loop_done, Label::kNear /*, cr0*/);
// Allocating heap numbers in the loop below can fail and cause a jump to
// gc_required. We can't leave a partly initialized FixedArray behind,
// so pessimistically fill it with holes now.
- __ mtctr(r0);
- __ addi(dst_elements, array,
+ __ AddP(dst_elements, array,
Operand(FixedArray::kHeaderSize - kHeapObjectTag - kPointerSize));
__ bind(&initialization_loop);
- __ StorePU(hole_value, MemOperand(dst_elements, kPointerSize));
- __ bdnz(&initialization_loop);
+ __ StoreP(r9, MemOperand(dst_elements, kPointerSize));
+ __ lay(dst_elements, MemOperand(dst_elements, kPointerSize));
+ __ BranchOnCount(r0, &initialization_loop);
- __ addi(dst_elements, array,
+ __ AddP(dst_elements, array,
Operand(FixedArray::kHeaderSize - kHeapObjectTag));
- __ add(dst_end, dst_elements, length);
+ __ AddP(dst_end, dst_elements, length);
__ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex);
// Using offsetted addresses in src_elements to fully take advantage of
// post-indexing.
@@ -355,9 +342,9 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// not tagged, +4
// dst_end: end of destination FixedArray, not tagged
// array: destination FixedArray
- // hole_value: the-hole pointer
+ // r9: the-hole pointer
// heap_number_map: heap number map
- __ b(&loop);
+ __ b(&loop, Label::kNear);
// Call into runtime if GC is required.
__ bind(&gc_required);
@@ -366,90 +353,93 @@ void ElementsTransitionGenerator::GenerateDoubleToObject(
// Replace the-hole NaN with the-hole pointer.
__ bind(&convert_hole);
- __ StoreP(hole_value, MemOperand(dst_elements));
- __ addi(dst_elements, dst_elements, Operand(kPointerSize));
- __ cmpl(dst_elements, dst_end);
+ __ StoreP(r9, MemOperand(dst_elements));
+ __ AddP(dst_elements, Operand(kPointerSize));
+ __ CmpLogicalP(dst_elements, dst_end);
__ bge(&loop_done);
__ bind(&loop);
Register upper_bits = key;
- __ lwz(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
- __ addi(src_elements, src_elements, Operand(kDoubleSize));
+ __ LoadlW(upper_bits, MemOperand(src_elements, Register::kExponentOffset));
+ __ AddP(src_elements, Operand(kDoubleSize));
// upper_bits: current element's upper 32 bit
// src_elements: address of next element's upper 32 bit
- __ Cmpi(upper_bits, Operand(kHoleNanUpper32), r0);
- __ beq(&convert_hole);
+ __ Cmp32(upper_bits, Operand(kHoleNanUpper32));
+ __ beq(&convert_hole, Label::kNear);
// Non-hole double, copy value into a heap number.
Register heap_number = receiver;
Register scratch2 = value;
- __ AllocateHeapNumber(heap_number, scratch2, scratch3, heap_number_map,
+ __ AllocateHeapNumber(heap_number, scratch2, r1, heap_number_map,
&gc_required);
- // heap_number: new heap number
-#if V8_TARGET_ARCH_PPC64
- __ ld(scratch2, MemOperand(src_elements, -kDoubleSize));
+// heap_number: new heap number
+#if V8_TARGET_ARCH_S390X
+ __ lg(scratch2, MemOperand(src_elements, -kDoubleSize));
// subtract tag for std
- __ addi(upper_bits, heap_number, Operand(-kHeapObjectTag));
- __ std(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
+ __ AddP(upper_bits, heap_number, Operand(-kHeapObjectTag));
+ __ stg(scratch2, MemOperand(upper_bits, HeapNumber::kValueOffset));
#else
- __ lwz(scratch2,
- MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
- __ lwz(upper_bits,
- MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
- __ stw(scratch2, FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
- __ stw(upper_bits, FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
+ __ LoadlW(scratch2,
+ MemOperand(src_elements, Register::kMantissaOffset - kDoubleSize));
+ __ LoadlW(upper_bits,
+ MemOperand(src_elements, Register::kExponentOffset - kDoubleSize));
+ __ StoreW(scratch2,
+ FieldMemOperand(heap_number, HeapNumber::kMantissaOffset));
+ __ StoreW(upper_bits,
+ FieldMemOperand(heap_number, HeapNumber::kExponentOffset));
#endif
- __ mr(scratch2, dst_elements);
+ __ LoadRR(scratch2, dst_elements);
__ StoreP(heap_number, MemOperand(dst_elements));
- __ addi(dst_elements, dst_elements, Operand(kPointerSize));
+ __ AddP(dst_elements, Operand(kPointerSize));
__ RecordWrite(array, scratch2, heap_number, kLRHasNotBeenSaved,
kDontSaveFPRegs, EMIT_REMEMBERED_SET, OMIT_SMI_CHECK);
- __ cmpl(dst_elements, dst_end);
+ __ CmpLogicalP(dst_elements, dst_end);
__ blt(&loop);
__ bind(&loop_done);
__ Pop(target_map, receiver, key, value);
// Replace receiver's backing store with newly created and filled FixedArray.
- __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset), r0);
+ __ StoreP(array, FieldMemOperand(receiver, JSObject::kElementsOffset));
__ RecordWriteField(receiver, JSObject::kElementsOffset, array, scratch,
kLRHasNotBeenSaved, kDontSaveFPRegs, EMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
__ bind(&only_change_map);
// Update receiver's map.
- __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset), r0);
+ __ StoreP(target_map, FieldMemOperand(receiver, HeapObject::kMapOffset));
__ RecordWriteField(receiver, HeapObject::kMapOffset, target_map, scratch,
kLRHasNotBeenSaved, kDontSaveFPRegs, OMIT_REMEMBERED_SET,
OMIT_SMI_CHECK);
}
-
// assume ip can be used as a scratch register below
void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
Register index, Register result,
Label* call_runtime) {
// Fetch the instance type of the receiver into result register.
__ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+ __ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
// We need special handling for indirect strings.
Label check_sequential;
- __ andi(r0, result, Operand(kIsIndirectStringMask));
- __ beq(&check_sequential, cr0);
+ __ mov(r0, Operand(kIsIndirectStringMask));
+ __ AndP(r0, result);
+ __ beq(&check_sequential, Label::kNear /*, cr0*/);
// Dispatch on the indirect string shape: slice or cons.
Label cons_string;
__ mov(ip, Operand(kSlicedNotConsMask));
- __ and_(r0, result, ip, SetRC);
- __ beq(&cons_string, cr0);
+ __ LoadRR(r0, result);
+ __ AndP(r0, ip /*, SetRC*/); // Should be okay to remove RC
+ __ beq(&cons_string, Label::kNear /*, cr0*/);
// Handle slices.
Label indirect_string_loaded;
__ LoadP(result, FieldMemOperand(string, SlicedString::kOffsetOffset));
__ LoadP(string, FieldMemOperand(string, SlicedString::kParentOffset));
__ SmiUntag(ip, result);
- __ add(index, index, ip);
- __ b(&indirect_string_loaded);
+ __ AddP(index, ip);
+ __ b(&indirect_string_loaded, Label::kNear);
// Handle cons strings.
// Check whether the right hand side is the empty string (i.e. if
@@ -465,7 +455,7 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
__ bind(&indirect_string_loaded);
__ LoadP(result, FieldMemOperand(string, HeapObject::kMapOffset));
- __ lbz(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
+ __ LoadlB(result, FieldMemOperand(result, Map::kInstanceTypeOffset));
// Distinguish sequential and external strings. Only these two string
// representations can reach here (slices and flat cons strings have been
@@ -473,51 +463,52 @@ void StringCharLoadGenerator::Generate(MacroAssembler* masm, Register string,
Label external_string, check_encoding;
__ bind(&check_sequential);
STATIC_ASSERT(kSeqStringTag == 0);
- __ andi(r0, result, Operand(kStringRepresentationMask));
- __ bne(&external_string, cr0);
+ __ mov(r0, Operand(kStringRepresentationMask));
+ __ AndP(r0, result);
+ __ bne(&external_string, Label::kNear);
// Prepare sequential strings
STATIC_ASSERT(SeqTwoByteString::kHeaderSize == SeqOneByteString::kHeaderSize);
- __ addi(string, string,
- Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
- __ b(&check_encoding);
+ __ AddP(string, Operand(SeqTwoByteString::kHeaderSize - kHeapObjectTag));
+ __ b(&check_encoding, Label::kNear);
// Handle external strings.
__ bind(&external_string);
if (FLAG_debug_code) {
// Assert that we do not have a cons or slice (indirect strings) here.
// Sequential strings have already been ruled out.
- __ andi(r0, result, Operand(kIsIndirectStringMask));
+ __ mov(r0, Operand(kIsIndirectStringMask));
+ __ AndP(r0, result);
__ Assert(eq, kExternalStringExpectedButNotFound, cr0);
}
// Rule out short external strings.
STATIC_ASSERT(kShortExternalStringTag != 0);
- __ andi(r0, result, Operand(kShortExternalStringMask));
- __ bne(call_runtime, cr0);
+ __ mov(r0, Operand(kShortExternalStringMask));
+ __ AndP(r0, result);
+ __ bne(call_runtime /*, cr0*/);
__ LoadP(string,
FieldMemOperand(string, ExternalString::kResourceDataOffset));
Label one_byte, done;
__ bind(&check_encoding);
STATIC_ASSERT(kTwoByteStringTag == 0);
- __ andi(r0, result, Operand(kStringEncodingMask));
- __ bne(&one_byte, cr0);
+ __ mov(r0, Operand(kStringEncodingMask));
+ __ AndP(r0, result);
+ __ bne(&one_byte, Label::kNear);
// Two-byte string.
- __ ShiftLeftImm(result, index, Operand(1));
- __ lhzx(result, MemOperand(string, result));
- __ b(&done);
+ __ ShiftLeftP(result, index, Operand(1));
+ __ LoadLogicalHalfWordP(result, MemOperand(string, result));
+ __ b(&done, Label::kNear);
__ bind(&one_byte);
// One-byte string.
- __ lbzx(result, MemOperand(string, index));
+ __ LoadlB(result, MemOperand(string, index));
__ bind(&done);
}
-
static MemOperand ExpConstant(int index, Register base) {
return MemOperand(base, index * kDoubleSize);
}
-
void MathExpGenerator::EmitMathExp(MacroAssembler* masm, DoubleRegister input,
DoubleRegister result,
DoubleRegister double_scratch1,
@@ -540,64 +531,65 @@ void MathExpGenerator::EmitMathExp(MacroAssembler* masm, DoubleRegister input,
__ mov(temp3, Operand(ExternalReference::math_exp_constants(0)));
- __ lfd(double_scratch1, ExpConstant(0, temp3));
- __ fcmpu(double_scratch1, input);
- __ fmr(result, input);
- __ bunordered(&done);
- __ bge(&zero);
-
- __ lfd(double_scratch2, ExpConstant(1, temp3));
- __ fcmpu(input, double_scratch2);
- __ bge(&infinity);
-
- __ lfd(double_scratch1, ExpConstant(3, temp3));
- __ lfd(result, ExpConstant(4, temp3));
- __ fmul(double_scratch1, double_scratch1, input);
- __ fadd(double_scratch1, double_scratch1, result);
- __ MovDoubleLowToInt(temp2, double_scratch1);
- __ fsub(double_scratch1, double_scratch1, result);
- __ lfd(result, ExpConstant(6, temp3));
- __ lfd(double_scratch2, ExpConstant(5, temp3));
- __ fmul(double_scratch1, double_scratch1, double_scratch2);
- __ fsub(double_scratch1, double_scratch1, input);
- __ fsub(result, result, double_scratch1);
- __ fmul(double_scratch2, double_scratch1, double_scratch1);
- __ fmul(result, result, double_scratch2);
- __ lfd(double_scratch2, ExpConstant(7, temp3));
- __ fmul(result, result, double_scratch2);
- __ fsub(result, result, double_scratch1);
- __ lfd(double_scratch2, ExpConstant(8, temp3));
- __ fadd(result, result, double_scratch2);
- __ srwi(temp1, temp2, Operand(11));
- __ andi(temp2, temp2, Operand(0x7ff));
- __ addi(temp1, temp1, Operand(0x3ff));
+ __ LoadDouble(double_scratch1, ExpConstant(0, temp3));
+ __ cdbr(double_scratch1, input);
+ __ ldr(result, input);
+ __ bunordered(&done, Label::kNear);
+ __ bge(&zero, Label::kNear);
+
+ __ LoadDouble(double_scratch2, ExpConstant(1, temp3));
+ __ cdbr(input, double_scratch2);
+ __ bge(&infinity, Label::kNear);
+
+ __ LoadDouble(double_scratch1, ExpConstant(3, temp3));
+ __ LoadDouble(result, ExpConstant(4, temp3));
+
+ // Do not generate madbr, as intermediate result are not
+ // rounded properly
+ __ mdbr(double_scratch1, input);
+ __ adbr(double_scratch1, result);
+
+ // Move low word of double_scratch1 to temp2
+ __ lgdr(temp2, double_scratch1);
+ __ nihf(temp2, Operand::Zero());
+
+ __ sdbr(double_scratch1, result);
+ __ LoadDouble(result, ExpConstant(6, temp3));
+ __ LoadDouble(double_scratch2, ExpConstant(5, temp3));
+ __ mdbr(double_scratch1, double_scratch2);
+ __ sdbr(double_scratch1, input);
+ __ sdbr(result, double_scratch1);
+ __ ldr(double_scratch2, double_scratch1);
+ __ mdbr(double_scratch2, double_scratch2);
+ __ mdbr(result, double_scratch2);
+ __ LoadDouble(double_scratch2, ExpConstant(7, temp3));
+ __ mdbr(result, double_scratch2);
+ __ sdbr(result, double_scratch1);
+ __ LoadDouble(double_scratch2, ExpConstant(8, temp3));
+ __ adbr(result, double_scratch2);
+ __ ShiftRight(temp1, temp2, Operand(11));
+ __ AndP(temp2, Operand(0x7ff));
+ __ AddP(temp1, Operand(0x3ff));
// Must not call ExpConstant() after overwriting temp3!
__ mov(temp3, Operand(ExternalReference::math_exp_log_table()));
- __ slwi(temp2, temp2, Operand(3));
-#if V8_TARGET_ARCH_PPC64
- __ ldx(temp2, MemOperand(temp3, temp2));
- __ sldi(temp1, temp1, Operand(52));
- __ orx(temp2, temp1, temp2);
- __ MovInt64ToDouble(double_scratch1, temp2);
-#else
- __ add(ip, temp3, temp2);
- __ lwz(temp3, MemOperand(ip, Register::kExponentOffset));
- __ lwz(temp2, MemOperand(ip, Register::kMantissaOffset));
- __ slwi(temp1, temp1, Operand(20));
- __ orx(temp3, temp1, temp3);
- __ MovInt64ToDouble(double_scratch1, temp3, temp2);
-#endif
+ __ ShiftLeft(temp2, temp2, Operand(3));
+
+ __ lg(temp2, MemOperand(temp2, temp3));
+ __ sllg(temp1, temp1, Operand(52));
+ __ ogr(temp2, temp1);
+ __ ldgr(double_scratch1, temp2);
- __ fmul(result, result, double_scratch1);
- __ b(&done);
+ __ mdbr(result, double_scratch1);
+ __ b(&done, Label::kNear);
__ bind(&zero);
- __ fmr(result, kDoubleRegZero);
- __ b(&done);
+ __ lzdr(kDoubleRegZero);
+ __ ldr(result, kDoubleRegZero);
+ __ b(&done, Label::kNear);
__ bind(&infinity);
- __ lfd(result, ExpConstant(2, temp3));
+ __ LoadDouble(result, ExpConstant(2, temp3));
__ bind(&done);
}
@@ -613,32 +605,25 @@ CodeAgingHelper::CodeAgingHelper(Isolate* isolate) {
// the process, before ARM simulator ICache is setup.
base::SmartPointer<CodePatcher> patcher(
new CodePatcher(isolate, young_sequence_.start(),
- young_sequence_.length() / Assembler::kInstrSize,
- CodePatcher::DONT_FLUSH));
+ young_sequence_.length(), CodePatcher::DONT_FLUSH));
PredictableCodeSizeScope scope(patcher->masm(), young_sequence_.length());
- patcher->masm()->PushFixedFrame(r4);
- patcher->masm()->addi(fp, sp,
- Operand(StandardFrameConstants::kFixedFrameSizeFromFp));
- for (int i = 0; i < kNoCodeAgeSequenceNops; i++) {
- patcher->masm()->nop();
- }
+ patcher->masm()->PushFixedFrame(r3);
+ patcher->masm()->la(
+ fp, MemOperand(sp, StandardFrameConstants::kFixedFrameSizeFromFp));
}
-
#ifdef DEBUG
bool CodeAgingHelper::IsOld(byte* candidate) const {
return Assembler::IsNop(Assembler::instr_at(candidate));
}
#endif
-
bool Code::IsYoungSequence(Isolate* isolate, byte* sequence) {
bool result = isolate->code_aging_helper()->IsYoung(sequence);
DCHECK(result || isolate->code_aging_helper()->IsOld(sequence));
return result;
}
-
void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
MarkingParity* parity) {
if (IsYoungSequence(isolate, sequence)) {
@@ -653,7 +638,6 @@ void Code::GetCodeAgeAndParity(Isolate* isolate, byte* sequence, Age* age,
}
}
-
void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
MarkingParity parity) {
uint32_t young_length = isolate->code_aging_helper()->young_sequence_length();
@@ -663,21 +647,29 @@ void Code::PatchPlatformCodeAge(Isolate* isolate, byte* sequence, Code::Age age,
} else {
// FIXED_SEQUENCE
Code* stub = GetCodeAgeStub(isolate, age, parity);
- CodePatcher patcher(isolate, sequence,
- young_length / Assembler::kInstrSize);
- Assembler::BlockTrampolinePoolScope block_trampoline_pool(patcher.masm());
+ CodePatcher patcher(isolate, sequence, young_length);
intptr_t target = reinterpret_cast<intptr_t>(stub->instruction_start());
- // Don't use Call -- we need to preserve ip and lr.
- // GenerateMakeCodeYoungAgainCommon for the stub code.
+ // We need to push lr on stack so that GenerateMakeCodeYoungAgainCommon
+ // knows where to pick up the return address
+ //
+ // Since we can no longer guarentee ip will hold the branch address
+ // because of BRASL, use Call so that GenerateMakeCodeYoungAgainCommon
+ // can calculate the branch address offset
patcher.masm()->nop(); // marker to detect sequence (see IsOld)
- patcher.masm()->mov(r3, Operand(target));
- patcher.masm()->Jump(r3);
- for (int i = 0; i < kCodeAgingSequenceNops; i++) {
- patcher.masm()->nop();
+ patcher.masm()->CleanseP(r14);
+ patcher.masm()->Push(r14);
+ patcher.masm()->mov(r2, Operand(target));
+ patcher.masm()->Call(r2);
+ for (int i = 0; i < kNoCodeAgeSequenceLength - kCodeAgingSequenceLength;
+ i += 2) {
+ // TODO(joransiu): Create nop function to pad
+ // (kNoCodeAgeSequenceLength - kCodeAgingSequenceLength) bytes.
+ patcher.masm()->nop(); // 2-byte nops().
}
}
}
+
} // namespace internal
} // namespace v8
-#endif // V8_TARGET_ARCH_PPC
+#endif // V8_TARGET_ARCH_S390

Powered by Google App Engine
This is Rietveld 408576698