Chromium Code Reviews
chromiumcodereview-hr@appspot.gserviceaccount.com (chromiumcodereview-hr) | Please choose your nickname with Settings | Help | Chromium Project | Gerrit Changes | Sign out
(628)

Side by Side Diff: src/ppc/macro-assembler-ppc.cc

Issue 901083004: Contribution of PowerPC port (continuation of 422063005) - PPC dir update (Closed) Base URL: https://chromium.googlesource.com/v8/v8.git@master
Patch Set: Contribution of PowerPC port (continuation of 422063005) - PPC dir update -comments and rebase Created 5 years, 10 months ago
Use n/p to move between diff chunks; N/P to move between comments. Draft comments are only viewable by you.
Jump to:
View unified diff | Download patch
« no previous file with comments | « src/ppc/macro-assembler-ppc.h ('k') | src/ppc/regexp-macro-assembler-ppc.h » ('j') | no next file with comments »
Toggle Intra-line Diffs ('i') | Expand Comments ('e') | Collapse Comments ('c') | Show Comments Hide Comments ('s')
OLDNEW
1 // Copyright 2014 the V8 project authors. All rights reserved. 1 // Copyright 2014 the V8 project authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be 2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file. 3 // found in the LICENSE file.
4 4
5 #include <assert.h> // For assert 5 #include <assert.h> // For assert
6 #include <limits.h> // For LONG_MIN, LONG_MAX. 6 #include <limits.h> // For LONG_MIN, LONG_MAX.
7 7
8 #include "src/v8.h" 8 #include "src/v8.h"
9 9
10 #if V8_TARGET_ARCH_PPC 10 #if V8_TARGET_ARCH_PPC
(...skipping 595 matching lines...) Expand 10 before | Expand all | Expand 10 after
606 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) { 606 MemOperand MacroAssembler::SafepointRegistersAndDoublesSlot(Register reg) {
607 // General purpose registers are pushed last on the stack. 607 // General purpose registers are pushed last on the stack.
608 int doubles_size = DoubleRegister::NumAllocatableRegisters() * kDoubleSize; 608 int doubles_size = DoubleRegister::NumAllocatableRegisters() * kDoubleSize;
609 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize; 609 int register_offset = SafepointRegisterStackIndex(reg.code()) * kPointerSize;
610 return MemOperand(sp, doubles_size + register_offset); 610 return MemOperand(sp, doubles_size + register_offset);
611 } 611 }
612 612
613 613
614 void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst, 614 void MacroAssembler::CanonicalizeNaN(const DoubleRegister dst,
615 const DoubleRegister src) { 615 const DoubleRegister src) {
616 Label done; 616 // Turn potential sNaN into qNaN.
617 617 fadd(dst, src, kDoubleRegZero);
618 // Test for NaN
619 fcmpu(src, src);
620
621 if (dst.is(src)) {
622 bordered(&done);
623 } else {
624 Label is_nan;
625 bunordered(&is_nan);
626 fmr(dst, src);
627 b(&done);
628 bind(&is_nan);
629 }
630
631 // Replace with canonical NaN.
632 double nan_value = FixedDoubleArray::canonical_not_the_hole_nan_as_double();
633 LoadDoubleLiteral(dst, nan_value, r0);
634
635 bind(&done);
636 } 618 }
637 619
638 620
639 void MacroAssembler::ConvertIntToDouble(Register src, 621 void MacroAssembler::ConvertIntToDouble(Register src,
640 DoubleRegister double_dst) { 622 DoubleRegister double_dst) {
641 MovIntToDouble(double_dst, src, r0); 623 MovIntToDouble(double_dst, src, r0);
642 fcfid(double_dst, double_dst); 624 fcfid(double_dst, double_dst);
643 } 625 }
644 626
645 627
(...skipping 262 matching lines...) Expand 10 before | Expand all | Expand 10 after
908 // If we are using the simulator then we should always align to the expected 890 // If we are using the simulator then we should always align to the expected
909 // alignment. As the simulator is used to generate snapshots we do not know 891 // alignment. As the simulator is used to generate snapshots we do not know
910 // if the target platform will need alignment, so this is controlled from a 892 // if the target platform will need alignment, so this is controlled from a
911 // flag. 893 // flag.
912 return FLAG_sim_stack_alignment; 894 return FLAG_sim_stack_alignment;
913 #endif 895 #endif
914 } 896 }
915 897
916 898
917 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count, 899 void MacroAssembler::LeaveExitFrame(bool save_doubles, Register argument_count,
918 bool restore_context) { 900 bool restore_context,
901 bool argument_count_is_length) {
919 #if V8_OOL_CONSTANT_POOL 902 #if V8_OOL_CONSTANT_POOL
920 ConstantPoolUnavailableScope constant_pool_unavailable(this); 903 ConstantPoolUnavailableScope constant_pool_unavailable(this);
921 #endif 904 #endif
922 // Optionally restore all double registers. 905 // Optionally restore all double registers.
923 if (save_doubles) { 906 if (save_doubles) {
924 // Calculate the stack location of the saved doubles and restore them. 907 // Calculate the stack location of the saved doubles and restore them.
925 const int kNumRegs = DoubleRegister::kNumVolatileRegisters; 908 const int kNumRegs = DoubleRegister::kNumVolatileRegisters;
926 const int offset = 909 const int offset =
927 (ExitFrameConstants::kFrameSize + kNumRegs * kDoubleSize); 910 (ExitFrameConstants::kFrameSize + kNumRegs * kDoubleSize);
928 addi(r6, fp, Operand(-offset)); 911 addi(r6, fp, Operand(-offset));
(...skipping 12 matching lines...) Expand all
941 } 924 }
942 #ifdef DEBUG 925 #ifdef DEBUG
943 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate()))); 926 mov(ip, Operand(ExternalReference(Isolate::kContextAddress, isolate())));
944 StoreP(r6, MemOperand(ip)); 927 StoreP(r6, MemOperand(ip));
945 #endif 928 #endif
946 929
947 // Tear down the exit frame, pop the arguments, and return. 930 // Tear down the exit frame, pop the arguments, and return.
948 LeaveFrame(StackFrame::EXIT); 931 LeaveFrame(StackFrame::EXIT);
949 932
950 if (argument_count.is_valid()) { 933 if (argument_count.is_valid()) {
951 ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2)); 934 if (!argument_count_is_length) {
935 ShiftLeftImm(argument_count, argument_count, Operand(kPointerSizeLog2));
936 }
952 add(sp, sp, argument_count); 937 add(sp, sp, argument_count);
953 } 938 }
954 } 939 }
955 940
956 941
957 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) { 942 void MacroAssembler::MovFromFloatResult(const DoubleRegister dst) {
958 Move(dst, d1); 943 Move(dst, d1);
959 } 944 }
960 945
961 946
(...skipping 773 matching lines...) Expand 10 before | Expand all | Expand 10 after
1735 } 1720 }
1736 } 1721 }
1737 1722
1738 1723
1739 void MacroAssembler::UndoAllocationInNewSpace(Register object, 1724 void MacroAssembler::UndoAllocationInNewSpace(Register object,
1740 Register scratch) { 1725 Register scratch) {
1741 ExternalReference new_space_allocation_top = 1726 ExternalReference new_space_allocation_top =
1742 ExternalReference::new_space_allocation_top_address(isolate()); 1727 ExternalReference::new_space_allocation_top_address(isolate());
1743 1728
1744 // Make sure the object has no tag before resetting top. 1729 // Make sure the object has no tag before resetting top.
1745 mov(r0, Operand(~kHeapObjectTagMask)); 1730 ClearRightImm(object, object, Operand(kHeapObjectTagSize));
1746 and_(object, object, r0);
1747 // was.. and_(object, object, Operand(~kHeapObjectTagMask));
1748 #ifdef DEBUG 1731 #ifdef DEBUG
1749 // Check that the object un-allocated is below the current top. 1732 // Check that the object un-allocated is below the current top.
1750 mov(scratch, Operand(new_space_allocation_top)); 1733 mov(scratch, Operand(new_space_allocation_top));
1751 LoadP(scratch, MemOperand(scratch)); 1734 LoadP(scratch, MemOperand(scratch));
1752 cmp(object, scratch); 1735 cmp(object, scratch);
1753 Check(lt, kUndoAllocationOfNonAllocatedMemory); 1736 Check(lt, kUndoAllocationOfNonAllocatedMemory);
1754 #endif 1737 #endif
1755 // Write the address of the object to un-allocate as the current top. 1738 // Write the address of the object to un-allocate as the current top.
1756 mov(scratch, Operand(new_space_allocation_top)); 1739 mov(scratch, Operand(new_space_allocation_top));
1757 StoreP(object, MemOperand(scratch)); 1740 StoreP(object, MemOperand(scratch));
(...skipping 177 matching lines...) Expand 10 before | Expand all | Expand 10 after
1935 Label smi_value, store; 1918 Label smi_value, store;
1936 1919
1937 // Handle smi values specially. 1920 // Handle smi values specially.
1938 JumpIfSmi(value_reg, &smi_value); 1921 JumpIfSmi(value_reg, &smi_value);
1939 1922
1940 // Ensure that the object is a heap number 1923 // Ensure that the object is a heap number
1941 CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail, 1924 CheckMap(value_reg, scratch1, isolate()->factory()->heap_number_map(), fail,
1942 DONT_DO_SMI_CHECK); 1925 DONT_DO_SMI_CHECK);
1943 1926
1944 lfd(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset)); 1927 lfd(double_scratch, FieldMemOperand(value_reg, HeapNumber::kValueOffset));
1945 // Force a canonical NaN. 1928 // Double value, turn potential sNaN into qNaN.
1946 CanonicalizeNaN(double_scratch); 1929 CanonicalizeNaN(double_scratch);
1947 b(&store); 1930 b(&store);
1948 1931
1949 bind(&smi_value); 1932 bind(&smi_value);
1950 SmiToDouble(double_scratch, value_reg); 1933 SmiToDouble(double_scratch, value_reg);
1951 1934
1952 bind(&store); 1935 bind(&store);
1953 SmiToDoubleArrayOffset(scratch1, key_reg); 1936 SmiToDoubleArrayOffset(scratch1, key_reg);
1954 add(scratch1, elements_reg, scratch1); 1937 add(scratch1, elements_reg, scratch1);
1955 stfd(double_scratch, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize - 1938 stfd(double_scratch, FieldMemOperand(scratch1, FixedDoubleArray::kHeaderSize -
1956 elements_offset)); 1939 elements_offset));
1957 } 1940 }
1958 1941
1959 1942
1960 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left, 1943 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
1961 Register right, 1944 Register right,
1962 Register overflow_dst, 1945 Register overflow_dst,
1963 Register scratch) { 1946 Register scratch) {
1964 DCHECK(!dst.is(overflow_dst)); 1947 DCHECK(!dst.is(overflow_dst));
1965 DCHECK(!dst.is(scratch)); 1948 DCHECK(!dst.is(scratch));
1966 DCHECK(!overflow_dst.is(scratch)); 1949 DCHECK(!overflow_dst.is(scratch));
1967 DCHECK(!overflow_dst.is(left)); 1950 DCHECK(!overflow_dst.is(left));
1968 DCHECK(!overflow_dst.is(right)); 1951 DCHECK(!overflow_dst.is(right));
1969 1952
1953 bool left_is_right = left.is(right);
1954 RCBit xorRC = left_is_right ? SetRC : LeaveRC;
1955
1970 // C = A+B; C overflows if A/B have same sign and C has diff sign than A 1956 // C = A+B; C overflows if A/B have same sign and C has diff sign than A
1971 if (dst.is(left)) { 1957 if (dst.is(left)) {
1972 mr(scratch, left); // Preserve left. 1958 mr(scratch, left); // Preserve left.
1973 add(dst, left, right); // Left is overwritten. 1959 add(dst, left, right); // Left is overwritten.
1974 xor_(scratch, dst, scratch); // Original left. 1960 xor_(overflow_dst, dst, scratch, xorRC); // Original left.
1975 xor_(overflow_dst, dst, right); 1961 if (!left_is_right) xor_(scratch, dst, right);
1976 } else if (dst.is(right)) { 1962 } else if (dst.is(right)) {
1977 mr(scratch, right); // Preserve right. 1963 mr(scratch, right); // Preserve right.
1978 add(dst, left, right); // Right is overwritten. 1964 add(dst, left, right); // Right is overwritten.
1979 xor_(scratch, dst, scratch); // Original right. 1965 xor_(overflow_dst, dst, left, xorRC);
1980 xor_(overflow_dst, dst, left); 1966 if (!left_is_right) xor_(scratch, dst, scratch); // Original right.
1981 } else { 1967 } else {
1982 add(dst, left, right); 1968 add(dst, left, right);
1983 xor_(overflow_dst, dst, left); 1969 xor_(overflow_dst, dst, left, xorRC);
1984 xor_(scratch, dst, right); 1970 if (!left_is_right) xor_(scratch, dst, right);
1985 } 1971 }
1986 and_(overflow_dst, scratch, overflow_dst, SetRC); 1972 if (!left_is_right) and_(overflow_dst, scratch, overflow_dst, SetRC);
1987 } 1973 }
1988 1974
1989 1975
1990 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left, 1976 void MacroAssembler::AddAndCheckForOverflow(Register dst, Register left,
1991 intptr_t right, 1977 intptr_t right,
1992 Register overflow_dst, 1978 Register overflow_dst,
1993 Register scratch) { 1979 Register scratch) {
1994 Register original_left = left; 1980 Register original_left = left;
1995 DCHECK(!dst.is(overflow_dst)); 1981 DCHECK(!dst.is(overflow_dst));
1996 DCHECK(!dst.is(scratch)); 1982 DCHECK(!dst.is(scratch));
(...skipping 81 matching lines...) Expand 10 before | Expand all | Expand 10 after
2078 if (smi_check_type == DO_SMI_CHECK) { 2064 if (smi_check_type == DO_SMI_CHECK) {
2079 JumpIfSmi(obj, fail); 2065 JumpIfSmi(obj, fail);
2080 } 2066 }
2081 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); 2067 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset));
2082 LoadRoot(r0, index); 2068 LoadRoot(r0, index);
2083 cmp(scratch, r0); 2069 cmp(scratch, r0);
2084 bne(fail); 2070 bne(fail);
2085 } 2071 }
2086 2072
2087 2073
2088 void MacroAssembler::DispatchMap(Register obj, Register scratch, 2074 void MacroAssembler::DispatchWeakMap(Register obj, Register scratch1,
2089 Handle<Map> map, Handle<Code> success, 2075 Register scratch2, Handle<WeakCell> cell,
2090 SmiCheckType smi_check_type) { 2076 Handle<Code> success,
2077 SmiCheckType smi_check_type) {
2091 Label fail; 2078 Label fail;
2092 if (smi_check_type == DO_SMI_CHECK) { 2079 if (smi_check_type == DO_SMI_CHECK) {
2093 JumpIfSmi(obj, &fail); 2080 JumpIfSmi(obj, &fail);
2094 } 2081 }
2095 LoadP(scratch, FieldMemOperand(obj, HeapObject::kMapOffset)); 2082 LoadP(scratch1, FieldMemOperand(obj, HeapObject::kMapOffset));
2096 mov(r0, Operand(map)); 2083 CmpWeakValue(scratch1, cell, scratch2);
2097 cmp(scratch, r0); 2084 Jump(success, RelocInfo::CODE_TARGET, eq);
2098 bne(&fail);
2099 Jump(success, RelocInfo::CODE_TARGET, al);
2100 bind(&fail); 2085 bind(&fail);
2101 } 2086 }
2102 2087
2103 2088
2089 void MacroAssembler::CmpWeakValue(Register value, Handle<WeakCell> cell,
2090 Register scratch, CRegister cr) {
2091 mov(scratch, Operand(cell));
2092 LoadP(scratch, FieldMemOperand(scratch, WeakCell::kValueOffset));
2093 cmp(value, scratch, cr);
2094 }
2095
2096
2097 void MacroAssembler::GetWeakValue(Register value, Handle<WeakCell> cell) {
2098 mov(value, Operand(cell));
2099 LoadP(value, FieldMemOperand(value, WeakCell::kValueOffset));
2100 }
2101
2102
2103 void MacroAssembler::LoadWeakValue(Register value, Handle<WeakCell> cell,
2104 Label* miss) {
2105 GetWeakValue(value, cell);
2106 JumpIfSmi(value, miss);
2107 }
2108
2109
2104 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result, 2110 void MacroAssembler::TryGetFunctionPrototype(Register function, Register result,
2105 Register scratch, Label* miss, 2111 Register scratch, Label* miss,
2106 bool miss_on_bound_function) { 2112 bool miss_on_bound_function) {
2107 Label non_instance; 2113 Label non_instance;
2108 if (miss_on_bound_function) { 2114 if (miss_on_bound_function) {
2109 // Check that the receiver isn't a smi. 2115 // Check that the receiver isn't a smi.
2110 JumpIfSmi(function, miss); 2116 JumpIfSmi(function, miss);
2111 2117
2112 // Check that the function really is a function. Load map into result reg. 2118 // Check that the function really is a function. Load map into result reg.
2113 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE); 2119 CompareObjectType(function, result, scratch, JS_FUNCTION_TYPE);
(...skipping 56 matching lines...) Expand 10 before | Expand all | Expand 10 after
2170 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs. 2176 DCHECK(AllowThisStubCall(stub)); // Stub calls are not allowed in some stubs.
2171 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond); 2177 Call(stub->GetCode(), RelocInfo::CODE_TARGET, ast_id, cond);
2172 } 2178 }
2173 2179
2174 2180
2175 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) { 2181 void MacroAssembler::TailCallStub(CodeStub* stub, Condition cond) {
2176 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond); 2182 Jump(stub->GetCode(), RelocInfo::CODE_TARGET, cond);
2177 } 2183 }
2178 2184
2179 2185
2180 static int AddressOffset(ExternalReference ref0, ExternalReference ref1) {
2181 return ref0.address() - ref1.address();
2182 }
2183
2184
2185 void MacroAssembler::CallApiFunctionAndReturn(
2186 Register function_address, ExternalReference thunk_ref, int stack_space,
2187 MemOperand return_value_operand, MemOperand* context_restore_operand) {
2188 ExternalReference next_address =
2189 ExternalReference::handle_scope_next_address(isolate());
2190 const int kNextOffset = 0;
2191 const int kLimitOffset = AddressOffset(
2192 ExternalReference::handle_scope_limit_address(isolate()), next_address);
2193 const int kLevelOffset = AddressOffset(
2194 ExternalReference::handle_scope_level_address(isolate()), next_address);
2195
2196 DCHECK(function_address.is(r4) || function_address.is(r5));
2197 Register scratch = r6;
2198
2199 Label profiler_disabled;
2200 Label end_profiler_check;
2201 mov(scratch, Operand(ExternalReference::is_profiling_address(isolate())));
2202 lbz(scratch, MemOperand(scratch, 0));
2203 cmpi(scratch, Operand::Zero());
2204 beq(&profiler_disabled);
2205
2206 // Additional parameter is the address of the actual callback.
2207 mov(scratch, Operand(thunk_ref));
2208 jmp(&end_profiler_check);
2209
2210 bind(&profiler_disabled);
2211 mr(scratch, function_address);
2212 bind(&end_profiler_check);
2213
2214 // Allocate HandleScope in callee-save registers.
2215 // r17 - next_address
2216 // r14 - next_address->kNextOffset
2217 // r15 - next_address->kLimitOffset
2218 // r16 - next_address->kLevelOffset
2219 mov(r17, Operand(next_address));
2220 LoadP(r14, MemOperand(r17, kNextOffset));
2221 LoadP(r15, MemOperand(r17, kLimitOffset));
2222 lwz(r16, MemOperand(r17, kLevelOffset));
2223 addi(r16, r16, Operand(1));
2224 stw(r16, MemOperand(r17, kLevelOffset));
2225
2226 if (FLAG_log_timer_events) {
2227 FrameScope frame(this, StackFrame::MANUAL);
2228 PushSafepointRegisters();
2229 PrepareCallCFunction(1, r3);
2230 mov(r3, Operand(ExternalReference::isolate_address(isolate())));
2231 CallCFunction(ExternalReference::log_enter_external_function(isolate()), 1);
2232 PopSafepointRegisters();
2233 }
2234
2235 // Native call returns to the DirectCEntry stub which redirects to the
2236 // return address pushed on stack (could have moved after GC).
2237 // DirectCEntry stub itself is generated early and never moves.
2238 DirectCEntryStub stub(isolate());
2239 stub.GenerateCall(this, scratch);
2240
2241 if (FLAG_log_timer_events) {
2242 FrameScope frame(this, StackFrame::MANUAL);
2243 PushSafepointRegisters();
2244 PrepareCallCFunction(1, r3);
2245 mov(r3, Operand(ExternalReference::isolate_address(isolate())));
2246 CallCFunction(ExternalReference::log_leave_external_function(isolate()), 1);
2247 PopSafepointRegisters();
2248 }
2249
2250 Label promote_scheduled_exception;
2251 Label exception_handled;
2252 Label delete_allocated_handles;
2253 Label leave_exit_frame;
2254 Label return_value_loaded;
2255
2256 // load value from ReturnValue
2257 LoadP(r3, return_value_operand);
2258 bind(&return_value_loaded);
2259 // No more valid handles (the result handle was the last one). Restore
2260 // previous handle scope.
2261 StoreP(r14, MemOperand(r17, kNextOffset));
2262 if (emit_debug_code()) {
2263 lwz(r4, MemOperand(r17, kLevelOffset));
2264 cmp(r4, r16);
2265 Check(eq, kUnexpectedLevelAfterReturnFromApiCall);
2266 }
2267 subi(r16, r16, Operand(1));
2268 stw(r16, MemOperand(r17, kLevelOffset));
2269 LoadP(r0, MemOperand(r17, kLimitOffset));
2270 cmp(r15, r0);
2271 bne(&delete_allocated_handles);
2272
2273 // Check if the function scheduled an exception.
2274 bind(&leave_exit_frame);
2275 LoadRoot(r14, Heap::kTheHoleValueRootIndex);
2276 mov(r15, Operand(ExternalReference::scheduled_exception_address(isolate())));
2277 LoadP(r15, MemOperand(r15));
2278 cmp(r14, r15);
2279 bne(&promote_scheduled_exception);
2280 bind(&exception_handled);
2281
2282 bool restore_context = context_restore_operand != NULL;
2283 if (restore_context) {
2284 LoadP(cp, *context_restore_operand);
2285 }
2286 // LeaveExitFrame expects unwind space to be in a register.
2287 mov(r14, Operand(stack_space));
2288 LeaveExitFrame(false, r14, !restore_context);
2289 blr();
2290
2291 bind(&promote_scheduled_exception);
2292 {
2293 FrameScope frame(this, StackFrame::INTERNAL);
2294 CallExternalReference(
2295 ExternalReference(Runtime::kPromoteScheduledException, isolate()), 0);
2296 }
2297 jmp(&exception_handled);
2298
2299 // HandleScope limit has changed. Delete allocated extensions.
2300 bind(&delete_allocated_handles);
2301 StoreP(r15, MemOperand(r17, kLimitOffset));
2302 mr(r14, r3);
2303 PrepareCallCFunction(1, r15);
2304 mov(r3, Operand(ExternalReference::isolate_address(isolate())));
2305 CallCFunction(ExternalReference::delete_handle_scope_extensions(isolate()),
2306 1);
2307 mr(r3, r14);
2308 b(&leave_exit_frame);
2309 }
2310
2311
2312 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) { 2186 bool MacroAssembler::AllowThisStubCall(CodeStub* stub) {
2313 return has_frame_ || !stub->SometimesSetsUpAFrame(); 2187 return has_frame_ || !stub->SometimesSetsUpAFrame();
2314 } 2188 }
2315 2189
2316 2190
2317 void MacroAssembler::IndexFromHash(Register hash, Register index) { 2191 void MacroAssembler::IndexFromHash(Register hash, Register index) {
2318 // If the hash field contains an array index pick it out. The assert checks 2192 // If the hash field contains an array index pick it out. The assert checks
2319 // that the constants for the maximum number of digits for an array index 2193 // that the constants for the maximum number of digits for an array index
2320 // cached in the hash field and the number of bits reserved for it does not 2194 // cached in the hash field and the number of bits reserved for it does not
2321 // conflict. 2195 // conflict.
(...skipping 23 matching lines...) Expand all
2345 Label done; 2219 Label done;
2346 DCHECK(!double_input.is(double_scratch)); 2220 DCHECK(!double_input.is(double_scratch));
2347 2221
2348 ConvertDoubleToInt64(double_input, 2222 ConvertDoubleToInt64(double_input,
2349 #if !V8_TARGET_ARCH_PPC64 2223 #if !V8_TARGET_ARCH_PPC64
2350 scratch, 2224 scratch,
2351 #endif 2225 #endif
2352 result, double_scratch); 2226 result, double_scratch);
2353 2227
2354 #if V8_TARGET_ARCH_PPC64 2228 #if V8_TARGET_ARCH_PPC64
2355 TestIfInt32(result, scratch, r0); 2229 TestIfInt32(result, r0);
2356 #else 2230 #else
2357 TestIfInt32(scratch, result, r0); 2231 TestIfInt32(scratch, result, r0);
2358 #endif 2232 #endif
2359 bne(&done); 2233 bne(&done);
2360 2234
2361 // convert back and compare 2235 // convert back and compare
2362 fcfid(double_scratch, double_scratch); 2236 fcfid(double_scratch, double_scratch);
2363 fcmpu(double_scratch, double_input); 2237 fcmpu(double_scratch, double_input);
2364 bind(&done); 2238 bind(&done);
2365 } 2239 }
(...skipping 16 matching lines...) Expand all
2382 2256
2383 // Convert (rounding to -Inf) 2257 // Convert (rounding to -Inf)
2384 ConvertDoubleToInt64(double_input, 2258 ConvertDoubleToInt64(double_input,
2385 #if !V8_TARGET_ARCH_PPC64 2259 #if !V8_TARGET_ARCH_PPC64
2386 scratch, 2260 scratch,
2387 #endif 2261 #endif
2388 result, double_scratch, kRoundToMinusInf); 2262 result, double_scratch, kRoundToMinusInf);
2389 2263
2390 // Test for overflow 2264 // Test for overflow
2391 #if V8_TARGET_ARCH_PPC64 2265 #if V8_TARGET_ARCH_PPC64
2392 TestIfInt32(result, scratch, r0); 2266 TestIfInt32(result, r0);
2393 #else 2267 #else
2394 TestIfInt32(scratch, result, r0); 2268 TestIfInt32(scratch, result, r0);
2395 #endif 2269 #endif
2396 bne(&exception); 2270 bne(&exception);
2397 2271
2398 // Test for exactness 2272 // Test for exactness
2399 fcfid(double_scratch, double_scratch); 2273 fcfid(double_scratch, double_scratch);
2400 fcmpu(double_scratch, double_input); 2274 fcmpu(double_scratch, double_input);
2401 beq(exact); 2275 beq(exact);
2402 b(done); 2276 b(done);
2403 2277
2404 bind(&exception); 2278 bind(&exception);
2405 } 2279 }
2406 2280
2407 2281
2408 void MacroAssembler::TryInlineTruncateDoubleToI(Register result, 2282 void MacroAssembler::TryInlineTruncateDoubleToI(Register result,
2409 DoubleRegister double_input, 2283 DoubleRegister double_input,
2410 Label* done) { 2284 Label* done) {
2411 DoubleRegister double_scratch = kScratchDoubleReg; 2285 DoubleRegister double_scratch = kScratchDoubleReg;
2286 #if !V8_TARGET_ARCH_PPC64
2412 Register scratch = ip; 2287 Register scratch = ip;
2288 #endif
2413 2289
2414 ConvertDoubleToInt64(double_input, 2290 ConvertDoubleToInt64(double_input,
2415 #if !V8_TARGET_ARCH_PPC64 2291 #if !V8_TARGET_ARCH_PPC64
2416 scratch, 2292 scratch,
2417 #endif 2293 #endif
2418 result, double_scratch); 2294 result, double_scratch);
2419 2295
2420 // Test for overflow 2296 // Test for overflow
2421 #if V8_TARGET_ARCH_PPC64 2297 #if V8_TARGET_ARCH_PPC64
2422 TestIfInt32(result, scratch, r0); 2298 TestIfInt32(result, r0);
2423 #else 2299 #else
2424 TestIfInt32(scratch, result, r0); 2300 TestIfInt32(scratch, result, r0);
2425 #endif 2301 #endif
2426 beq(done); 2302 beq(done);
2427 } 2303 }
2428 2304
2429 2305
2430 void MacroAssembler::TruncateDoubleToI(Register result, 2306 void MacroAssembler::TruncateDoubleToI(Register result,
2431 DoubleRegister double_input) { 2307 DoubleRegister double_input) {
2432 Label done; 2308 Label done;
(...skipping 293 matching lines...) Expand 10 before | Expand all | Expand 10 after
2726 Register map_in_out, Register scratch, Label* no_map_match) { 2602 Register map_in_out, Register scratch, Label* no_map_match) {
2727 // Load the global or builtins object from the current context. 2603 // Load the global or builtins object from the current context.
2728 LoadP(scratch, 2604 LoadP(scratch,
2729 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX))); 2605 MemOperand(cp, Context::SlotOffset(Context::GLOBAL_OBJECT_INDEX)));
2730 LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset)); 2606 LoadP(scratch, FieldMemOperand(scratch, GlobalObject::kNativeContextOffset));
2731 2607
2732 // Check that the function's map is the same as the expected cached map. 2608 // Check that the function's map is the same as the expected cached map.
2733 LoadP(scratch, 2609 LoadP(scratch,
2734 MemOperand(scratch, Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX))); 2610 MemOperand(scratch, Context::SlotOffset(Context::JS_ARRAY_MAPS_INDEX)));
2735 size_t offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize; 2611 size_t offset = expected_kind * kPointerSize + FixedArrayBase::kHeaderSize;
2736 LoadP(scratch, FieldMemOperand(scratch, offset)); 2612 LoadP(ip, FieldMemOperand(scratch, offset));
2737 cmp(map_in_out, scratch); 2613 cmp(map_in_out, ip);
2738 bne(no_map_match); 2614 bne(no_map_match);
2739 2615
2740 // Use the transitioned cached map. 2616 // Use the transitioned cached map.
2741 offset = transitioned_kind * kPointerSize + FixedArrayBase::kHeaderSize; 2617 offset = transitioned_kind * kPointerSize + FixedArrayBase::kHeaderSize;
2742 LoadP(map_in_out, FieldMemOperand(scratch, offset)); 2618 LoadP(map_in_out, FieldMemOperand(scratch, offset));
2743 } 2619 }
2744 2620
2745 2621
2746 void MacroAssembler::LoadGlobalFunction(int index, Register function) { 2622 void MacroAssembler::LoadGlobalFunction(int index, Register function) {
2747 // Load the global or builtins object from the current context. 2623 // Load the global or builtins object from the current context.
(...skipping 65 matching lines...) Expand 10 before | Expand all | Expand 10 after
2813 DCHECK(!src.is(overflow)); 2689 DCHECK(!src.is(overflow));
2814 SmiTag(dst, src); 2690 SmiTag(dst, src);
2815 xor_(overflow, dst, src, SetRC); // Overflow if (value ^ 2 * value) < 0. 2691 xor_(overflow, dst, src, SetRC); // Overflow if (value ^ 2 * value) < 0.
2816 } 2692 }
2817 } 2693 }
2818 #endif 2694 #endif
2819 2695
2820 void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2, 2696 void MacroAssembler::JumpIfNotBothSmi(Register reg1, Register reg2,
2821 Label* on_not_both_smi) { 2697 Label* on_not_both_smi) {
2822 STATIC_ASSERT(kSmiTag == 0); 2698 STATIC_ASSERT(kSmiTag == 0);
2823 DCHECK_EQ(1, static_cast<int>(kSmiTagMask));
2824 orx(r0, reg1, reg2, LeaveRC); 2699 orx(r0, reg1, reg2, LeaveRC);
2825 JumpIfNotSmi(r0, on_not_both_smi); 2700 JumpIfNotSmi(r0, on_not_both_smi);
2826 } 2701 }
2827 2702
2828 2703
2829 void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src, 2704 void MacroAssembler::UntagAndJumpIfSmi(Register dst, Register src,
2830 Label* smi_case) { 2705 Label* smi_case) {
2831 STATIC_ASSERT(kSmiTag == 0); 2706 STATIC_ASSERT(kSmiTag == 0);
2832 STATIC_ASSERT(kSmiTagSize == 1); 2707 TestBitRange(src, kSmiTagSize - 1, 0, r0);
2833 TestBit(src, 0, r0);
2834 SmiUntag(dst, src); 2708 SmiUntag(dst, src);
2835 beq(smi_case, cr0); 2709 beq(smi_case, cr0);
2836 } 2710 }
2837 2711
2838 2712
2839 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src, 2713 void MacroAssembler::UntagAndJumpIfNotSmi(Register dst, Register src,
2840 Label* non_smi_case) { 2714 Label* non_smi_case) {
2841 STATIC_ASSERT(kSmiTag == 0); 2715 STATIC_ASSERT(kSmiTag == 0);
2842 STATIC_ASSERT(kSmiTagSize == 1); 2716 TestBitRange(src, kSmiTagSize - 1, 0, r0);
2843 TestBit(src, 0, r0);
2844 SmiUntag(dst, src); 2717 SmiUntag(dst, src);
2845 bne(non_smi_case, cr0); 2718 bne(non_smi_case, cr0);
2846 } 2719 }
2847 2720
2848 2721
2849 void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2, 2722 void MacroAssembler::JumpIfEitherSmi(Register reg1, Register reg2,
2850 Label* on_either_smi) { 2723 Label* on_either_smi) {
2851 STATIC_ASSERT(kSmiTag == 0); 2724 STATIC_ASSERT(kSmiTag == 0);
2852 JumpIfSmi(reg1, on_either_smi); 2725 JumpIfSmi(reg1, on_either_smi);
2853 JumpIfSmi(reg2, on_either_smi); 2726 JumpIfSmi(reg2, on_either_smi);
(...skipping 832 matching lines...) Expand 10 before | Expand all | Expand 10 after
3686 3559
3687 if (cc == ne) { 3560 if (cc == ne) {
3688 bne(condition_met, cr0); 3561 bne(condition_met, cr0);
3689 } 3562 }
3690 if (cc == eq) { 3563 if (cc == eq) {
3691 beq(condition_met, cr0); 3564 beq(condition_met, cr0);
3692 } 3565 }
3693 } 3566 }
3694 3567
3695 3568
3696 void MacroAssembler::CheckMapDeprecated(Handle<Map> map, Register scratch,
3697 Label* if_deprecated) {
3698 if (map->CanBeDeprecated()) {
3699 mov(scratch, Operand(map));
3700 lwz(scratch, FieldMemOperand(scratch, Map::kBitField3Offset));
3701 ExtractBitMask(scratch, scratch, Map::Deprecated::kMask, SetRC);
3702 bne(if_deprecated, cr0);
3703 }
3704 }
3705
3706
3707 void MacroAssembler::JumpIfBlack(Register object, Register scratch0, 3569 void MacroAssembler::JumpIfBlack(Register object, Register scratch0,
3708 Register scratch1, Label* on_black) { 3570 Register scratch1, Label* on_black) {
3709 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern. 3571 HasColor(object, scratch0, scratch1, on_black, 1, 0); // kBlackBitPattern.
3710 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0); 3572 DCHECK(strcmp(Marking::kBlackBitPattern, "10") == 0);
3711 } 3573 }
3712 3574
3713 3575
3714 void MacroAssembler::HasColor(Register object, Register bitmap_scratch, 3576 void MacroAssembler::HasColor(Register object, Register bitmap_scratch,
3715 Register mask_scratch, Label* has_color, 3577 Register mask_scratch, Label* has_color,
3716 int first_bit, int second_bit) { 3578 int first_bit, int second_bit) {
(...skipping 172 matching lines...) Expand 10 before | Expand all | Expand 10 after
3889 3751
3890 bind(&done); 3752 bind(&done);
3891 } 3753 }
3892 3754
3893 3755
3894 // Saturate a value into 8-bit unsigned integer 3756 // Saturate a value into 8-bit unsigned integer
3895 // if input_value < 0, output_value is 0 3757 // if input_value < 0, output_value is 0
3896 // if input_value > 255, output_value is 255 3758 // if input_value > 255, output_value is 255
3897 // otherwise output_value is the input_value 3759 // otherwise output_value is the input_value
3898 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) { 3760 void MacroAssembler::ClampUint8(Register output_reg, Register input_reg) {
3899 Label done, negative_label, overflow_label;
3900 int satval = (1 << 8) - 1; 3761 int satval = (1 << 8) - 1;
3901 3762
3902 cmpi(input_reg, Operand::Zero()); 3763 if (CpuFeatures::IsSupported(ISELECT)) {
3903 blt(&negative_label); 3764 // set to 0 if negative
3765 cmpi(input_reg, Operand::Zero());
3766 isel(lt, output_reg, r0, input_reg);
3904 3767
3905 cmpi(input_reg, Operand(satval)); 3768 // set to satval if > satval
3906 bgt(&overflow_label); 3769 li(r0, Operand(satval));
3907 if (!output_reg.is(input_reg)) { 3770 cmpi(output_reg, Operand(satval));
3908 mr(output_reg, input_reg); 3771 isel(lt, output_reg, output_reg, r0);
3772 } else {
3773 Label done, negative_label, overflow_label;
3774 cmpi(input_reg, Operand::Zero());
3775 blt(&negative_label);
3776
3777 cmpi(input_reg, Operand(satval));
3778 bgt(&overflow_label);
3779 if (!output_reg.is(input_reg)) {
3780 mr(output_reg, input_reg);
3781 }
3782 b(&done);
3783
3784 bind(&negative_label);
3785 li(output_reg, Operand::Zero()); // set to 0 if negative
3786 b(&done);
3787
3788 bind(&overflow_label); // set to satval if > satval
3789 li(output_reg, Operand(satval));
3790
3791 bind(&done);
3909 } 3792 }
3910 b(&done);
3911
3912 bind(&negative_label);
3913 li(output_reg, Operand::Zero()); // set to 0 if negative
3914 b(&done);
3915
3916
3917 bind(&overflow_label); // set to satval if > satval
3918 li(output_reg, Operand(satval));
3919
3920 bind(&done);
3921 } 3793 }
3922 3794
3923 3795
3924 void MacroAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); } 3796 void MacroAssembler::SetRoundingMode(FPRoundingMode RN) { mtfsfi(7, RN); }
3925 3797
3926 3798
3927 void MacroAssembler::ResetRoundingMode() { 3799 void MacroAssembler::ResetRoundingMode() {
3928 mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest) 3800 mtfsfi(7, kRoundToNearest); // reset (default is kRoundToNearest)
3929 } 3801 }
3930 3802
(...skipping 44 matching lines...) Expand 10 before | Expand all | Expand 10 after
3975 3847
3976 3848
3977 void MacroAssembler::EnumLength(Register dst, Register map) { 3849 void MacroAssembler::EnumLength(Register dst, Register map) {
3978 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0); 3850 STATIC_ASSERT(Map::EnumLengthBits::kShift == 0);
3979 lwz(dst, FieldMemOperand(map, Map::kBitField3Offset)); 3851 lwz(dst, FieldMemOperand(map, Map::kBitField3Offset));
3980 ExtractBitMask(dst, dst, Map::EnumLengthBits::kMask); 3852 ExtractBitMask(dst, dst, Map::EnumLengthBits::kMask);
3981 SmiTag(dst); 3853 SmiTag(dst);
3982 } 3854 }
3983 3855
3984 3856
3857 void MacroAssembler::LoadAccessor(Register dst, Register holder,
3858 int accessor_index,
3859 AccessorComponent accessor) {
3860 LoadP(dst, FieldMemOperand(holder, HeapObject::kMapOffset));
3861 LoadInstanceDescriptors(dst, dst);
3862 LoadP(dst,
3863 FieldMemOperand(dst, DescriptorArray::GetValueOffset(accessor_index)));
3864 const int getterOffset = AccessorPair::kGetterOffset;
3865 const int setterOffset = AccessorPair::kSetterOffset;
3866 int offset = ((accessor == ACCESSOR_GETTER) ? getterOffset : setterOffset);
3867 LoadP(dst, FieldMemOperand(dst, offset));
3868 }
3869
3870
3985 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) { 3871 void MacroAssembler::CheckEnumCache(Register null_value, Label* call_runtime) {
3986 Register empty_fixed_array_value = r9; 3872 Register empty_fixed_array_value = r9;
3987 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex); 3873 LoadRoot(empty_fixed_array_value, Heap::kEmptyFixedArrayRootIndex);
3988 Label next, start; 3874 Label next, start;
3989 mr(r5, r3); 3875 mr(r5, r3);
3990 3876
3991 // Check if the enum length field is properly initialized, indicating that 3877 // Check if the enum length field is properly initialized, indicating that
3992 // there is an enum cache. 3878 // there is an enum cache.
3993 LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset)); 3879 LoadP(r4, FieldMemOperand(r5, HeapObject::kMapOffset));
3994 3880
(...skipping 420 matching lines...) Expand 10 before | Expand all | Expand 10 after
4415 And(dst, src, Operand(smi), rc); 4301 And(dst, src, Operand(smi), rc);
4416 #endif 4302 #endif
4417 } 4303 }
4418 4304
4419 4305
4420 // Load a "pointer" sized value from the memory location 4306 // Load a "pointer" sized value from the memory location
4421 void MacroAssembler::LoadP(Register dst, const MemOperand& mem, 4307 void MacroAssembler::LoadP(Register dst, const MemOperand& mem,
4422 Register scratch) { 4308 Register scratch) {
4423 int offset = mem.offset(); 4309 int offset = mem.offset();
4424 4310
4425 if (!scratch.is(no_reg) && !is_int16(offset)) { 4311 if (!is_int16(offset)) {
4426 /* cannot use d-form */ 4312 /* cannot use d-form */
4427 LoadIntLiteral(scratch, offset); 4313 DCHECK(!scratch.is(no_reg));
4314 mov(scratch, Operand(offset));
4428 #if V8_TARGET_ARCH_PPC64 4315 #if V8_TARGET_ARCH_PPC64
4429 ldx(dst, MemOperand(mem.ra(), scratch)); 4316 ldx(dst, MemOperand(mem.ra(), scratch));
4430 #else 4317 #else
4431 lwzx(dst, MemOperand(mem.ra(), scratch)); 4318 lwzx(dst, MemOperand(mem.ra(), scratch));
4432 #endif 4319 #endif
4433 } else { 4320 } else {
4434 #if V8_TARGET_ARCH_PPC64 4321 #if V8_TARGET_ARCH_PPC64
4435 int misaligned = (offset & 3); 4322 int misaligned = (offset & 3);
4436 if (misaligned) { 4323 if (misaligned) {
4437 // adjust base to conform to offset alignment requirements 4324 // adjust base to conform to offset alignment requirements
4438 // Todo: enhance to use scratch if dst is unsuitable 4325 // Todo: enhance to use scratch if dst is unsuitable
4439 DCHECK(!dst.is(r0)); 4326 DCHECK(!dst.is(r0));
4440 addi(dst, mem.ra(), Operand((offset & 3) - 4)); 4327 addi(dst, mem.ra(), Operand((offset & 3) - 4));
4441 ld(dst, MemOperand(dst, (offset & ~3) + 4)); 4328 ld(dst, MemOperand(dst, (offset & ~3) + 4));
4442 } else { 4329 } else {
4443 ld(dst, mem); 4330 ld(dst, mem);
4444 } 4331 }
4445 #else 4332 #else
4446 lwz(dst, mem); 4333 lwz(dst, mem);
4447 #endif 4334 #endif
4448 } 4335 }
4449 } 4336 }
4450 4337
4451 4338
4452 // Store a "pointer" sized value to the memory location 4339 // Store a "pointer" sized value to the memory location
4453 void MacroAssembler::StoreP(Register src, const MemOperand& mem, 4340 void MacroAssembler::StoreP(Register src, const MemOperand& mem,
4454 Register scratch) { 4341 Register scratch) {
4455 int offset = mem.offset(); 4342 int offset = mem.offset();
4456 4343
4457 if (!scratch.is(no_reg) && !is_int16(offset)) { 4344 if (!is_int16(offset)) {
4458 /* cannot use d-form */ 4345 /* cannot use d-form */
4459 LoadIntLiteral(scratch, offset); 4346 DCHECK(!scratch.is(no_reg));
4347 mov(scratch, Operand(offset));
4460 #if V8_TARGET_ARCH_PPC64 4348 #if V8_TARGET_ARCH_PPC64
4461 stdx(src, MemOperand(mem.ra(), scratch)); 4349 stdx(src, MemOperand(mem.ra(), scratch));
4462 #else 4350 #else
4463 stwx(src, MemOperand(mem.ra(), scratch)); 4351 stwx(src, MemOperand(mem.ra(), scratch));
4464 #endif 4352 #endif
4465 } else { 4353 } else {
4466 #if V8_TARGET_ARCH_PPC64 4354 #if V8_TARGET_ARCH_PPC64
4467 int misaligned = (offset & 3); 4355 int misaligned = (offset & 3);
4468 if (misaligned) { 4356 if (misaligned) {
4469 // adjust base to conform to offset alignment requirements 4357 // adjust base to conform to offset alignment requirements
(...skipping 12 matching lines...) Expand all
4482 #else 4370 #else
4483 stw(src, mem); 4371 stw(src, mem);
4484 #endif 4372 #endif
4485 } 4373 }
4486 } 4374 }
4487 4375
4488 void MacroAssembler::LoadWordArith(Register dst, const MemOperand& mem, 4376 void MacroAssembler::LoadWordArith(Register dst, const MemOperand& mem,
4489 Register scratch) { 4377 Register scratch) {
4490 int offset = mem.offset(); 4378 int offset = mem.offset();
4491 4379
4492 if (!scratch.is(no_reg) && !is_int16(offset)) { 4380 if (!is_int16(offset)) {
4493 /* cannot use d-form */ 4381 DCHECK(!scratch.is(no_reg));
4494 LoadIntLiteral(scratch, offset); 4382 mov(scratch, Operand(offset));
4495 #if V8_TARGET_ARCH_PPC64 4383 lwax(dst, MemOperand(mem.ra(), scratch));
4496 // lwax(dst, MemOperand(mem.ra(), scratch));
4497 DCHECK(0); // lwax not yet implemented
4498 #else
4499 lwzx(dst, MemOperand(mem.ra(), scratch));
4500 #endif
4501 } else { 4384 } else {
4502 #if V8_TARGET_ARCH_PPC64 4385 #if V8_TARGET_ARCH_PPC64
4503 int misaligned = (offset & 3); 4386 int misaligned = (offset & 3);
4504 if (misaligned) { 4387 if (misaligned) {
4505 // adjust base to conform to offset alignment requirements 4388 // adjust base to conform to offset alignment requirements
4506 // Todo: enhance to use scratch if dst is unsuitable 4389 // Todo: enhance to use scratch if dst is unsuitable
4507 DCHECK(!dst.is(r0)); 4390 DCHECK(!dst.is(r0));
4508 addi(dst, mem.ra(), Operand((offset & 3) - 4)); 4391 addi(dst, mem.ra(), Operand((offset & 3) - 4));
4509 lwa(dst, MemOperand(dst, (offset & ~3) + 4)); 4392 lwa(dst, MemOperand(dst, (offset & ~3) + 4));
4510 } else { 4393 } else {
(...skipping 31 matching lines...) Expand 10 before | Expand all | Expand 10 after
4542 4425
4543 if (!is_int16(offset)) { 4426 if (!is_int16(offset)) {
4544 LoadIntLiteral(scratch, offset); 4427 LoadIntLiteral(scratch, offset);
4545 stwx(src, MemOperand(base, scratch)); 4428 stwx(src, MemOperand(base, scratch));
4546 } else { 4429 } else {
4547 stw(src, mem); 4430 stw(src, mem);
4548 } 4431 }
4549 } 4432 }
4550 4433
4551 4434
4435 void MacroAssembler::LoadHalfWordArith(Register dst, const MemOperand& mem,
4436 Register scratch) {
4437 int offset = mem.offset();
4438
4439 if (!is_int16(offset)) {
4440 DCHECK(!scratch.is(no_reg));
4441 mov(scratch, Operand(offset));
4442 lhax(dst, MemOperand(mem.ra(), scratch));
4443 } else {
4444 lha(dst, mem);
4445 }
4446 }
4447
4448
4552 // Variable length depending on whether offset fits into immediate field 4449 // Variable length depending on whether offset fits into immediate field
4553 // MemOperand currently only supports d-form 4450 // MemOperand currently only supports d-form
4554 void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem, 4451 void MacroAssembler::LoadHalfWord(Register dst, const MemOperand& mem,
4555 Register scratch) { 4452 Register scratch) {
4556 Register base = mem.ra(); 4453 Register base = mem.ra();
4557 int offset = mem.offset(); 4454 int offset = mem.offset();
4558 4455
4559 if (!is_int16(offset)) { 4456 if (!is_int16(offset)) {
4560 LoadIntLiteral(scratch, offset); 4457 LoadIntLiteral(scratch, offset);
4561 lhzx(dst, MemOperand(base, scratch)); 4458 lhzx(dst, MemOperand(base, scratch));
(...skipping 53 matching lines...) Expand 10 before | Expand all | Expand 10 after
4615 4512
4616 void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem, 4513 void MacroAssembler::LoadRepresentation(Register dst, const MemOperand& mem,
4617 Representation r, Register scratch) { 4514 Representation r, Register scratch) {
4618 DCHECK(!r.IsDouble()); 4515 DCHECK(!r.IsDouble());
4619 if (r.IsInteger8()) { 4516 if (r.IsInteger8()) {
4620 LoadByte(dst, mem, scratch); 4517 LoadByte(dst, mem, scratch);
4621 extsb(dst, dst); 4518 extsb(dst, dst);
4622 } else if (r.IsUInteger8()) { 4519 } else if (r.IsUInteger8()) {
4623 LoadByte(dst, mem, scratch); 4520 LoadByte(dst, mem, scratch);
4624 } else if (r.IsInteger16()) { 4521 } else if (r.IsInteger16()) {
4625 LoadHalfWord(dst, mem, scratch); 4522 LoadHalfWordArith(dst, mem, scratch);
4626 extsh(dst, dst);
4627 } else if (r.IsUInteger16()) { 4523 } else if (r.IsUInteger16()) {
4628 LoadHalfWord(dst, mem, scratch); 4524 LoadHalfWord(dst, mem, scratch);
4629 #if V8_TARGET_ARCH_PPC64 4525 #if V8_TARGET_ARCH_PPC64
4630 } else if (r.IsInteger32()) { 4526 } else if (r.IsInteger32()) {
4631 LoadWord(dst, mem, scratch); 4527 LoadWordArith(dst, mem, scratch);
4632 #endif 4528 #endif
4633 } else { 4529 } else {
4634 LoadP(dst, mem, scratch); 4530 LoadP(dst, mem, scratch);
4635 } 4531 }
4636 } 4532 }
4637 4533
4638 4534
4639 void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem, 4535 void MacroAssembler::StoreRepresentation(Register src, const MemOperand& mem,
4640 Representation r, Register scratch) { 4536 Representation r, Register scratch) {
4641 DCHECK(!r.IsDouble()); 4537 DCHECK(!r.IsDouble());
4642 if (r.IsInteger8() || r.IsUInteger8()) { 4538 if (r.IsInteger8() || r.IsUInteger8()) {
4643 StoreByte(src, mem, scratch); 4539 StoreByte(src, mem, scratch);
4644 } else if (r.IsInteger16() || r.IsUInteger16()) { 4540 } else if (r.IsInteger16() || r.IsUInteger16()) {
4645 StoreHalfWord(src, mem, scratch); 4541 StoreHalfWord(src, mem, scratch);
4646 #if V8_TARGET_ARCH_PPC64 4542 #if V8_TARGET_ARCH_PPC64
4647 } else if (r.IsInteger32()) { 4543 } else if (r.IsInteger32()) {
4648 StoreWord(src, mem, scratch); 4544 StoreWord(src, mem, scratch);
4649 #endif 4545 #endif
4650 } else { 4546 } else {
4651 if (r.IsHeapObject()) { 4547 if (r.IsHeapObject()) {
4652 AssertNotSmi(src); 4548 AssertNotSmi(src);
4653 } else if (r.IsSmi()) { 4549 } else if (r.IsSmi()) {
4654 AssertSmi(src); 4550 AssertSmi(src);
4655 } 4551 }
4656 StoreP(src, mem, scratch); 4552 StoreP(src, mem, scratch);
4657 } 4553 }
4658 } 4554 }
4659 4555
4660 4556
4557 void MacroAssembler::LoadDouble(DoubleRegister dst, const MemOperand& mem,
4558 Register scratch) {
4559 Register base = mem.ra();
4560 int offset = mem.offset();
4561
4562 if (!is_int16(offset)) {
4563 mov(scratch, Operand(offset));
4564 lfdx(dst, MemOperand(base, scratch));
4565 } else {
4566 lfd(dst, mem);
4567 }
4568 }
4569
4570
4571 void MacroAssembler::StoreDouble(DoubleRegister src, const MemOperand& mem,
4572 Register scratch) {
4573 Register base = mem.ra();
4574 int offset = mem.offset();
4575
4576 if (!is_int16(offset)) {
4577 mov(scratch, Operand(offset));
4578 stfdx(src, MemOperand(base, scratch));
4579 } else {
4580 stfd(src, mem);
4581 }
4582 }
4583
4584
4661 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg, 4585 void MacroAssembler::TestJSArrayForAllocationMemento(Register receiver_reg,
4662 Register scratch_reg, 4586 Register scratch_reg,
4663 Label* no_memento_found) { 4587 Label* no_memento_found) {
4664 ExternalReference new_space_start = 4588 ExternalReference new_space_start =
4665 ExternalReference::new_space_start(isolate()); 4589 ExternalReference::new_space_start(isolate());
4666 ExternalReference new_space_allocation_top = 4590 ExternalReference new_space_allocation_top =
4667 ExternalReference::new_space_allocation_top_address(isolate()); 4591 ExternalReference::new_space_allocation_top_address(isolate());
4668 addi(scratch_reg, receiver_reg, 4592 addi(scratch_reg, receiver_reg,
4669 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag)); 4593 Operand(JSArray::kSize + AllocationMemento::kSize - kHeapObjectTag));
4670 Cmpi(scratch_reg, Operand(new_space_start), r0); 4594 Cmpi(scratch_reg, Operand(new_space_start), r0);
(...skipping 139 matching lines...) Expand 10 before | Expand all | Expand 10 after
4810 } 4734 }
4811 if (mag.shift > 0) srawi(result, result, mag.shift); 4735 if (mag.shift > 0) srawi(result, result, mag.shift);
4812 ExtractBit(r0, dividend, 31); 4736 ExtractBit(r0, dividend, 31);
4813 add(result, result, r0); 4737 add(result, result, r0);
4814 } 4738 }
4815 4739
4816 } // namespace internal 4740 } // namespace internal
4817 } // namespace v8 4741 } // namespace v8
4818 4742
4819 #endif // V8_TARGET_ARCH_PPC 4743 #endif // V8_TARGET_ARCH_PPC
OLDNEW
« no previous file with comments | « src/ppc/macro-assembler-ppc.h ('k') | src/ppc/regexp-macro-assembler-ppc.h » ('j') | no next file with comments »

Powered by Google App Engine
This is Rietveld 408576698