OLD | NEW |
1 // Copyright 2014 the V8 project authors. All rights reserved. | 1 // Copyright 2014 the V8 project authors. All rights reserved. |
2 // Use of this source code is governed by a BSD-style license that can be | 2 // Use of this source code is governed by a BSD-style license that can be |
3 // found in the LICENSE file. | 3 // found in the LICENSE file. |
4 | 4 |
5 #if V8_TARGET_ARCH_PPC | 5 #if V8_TARGET_ARCH_PPC |
6 | 6 |
7 #include "src/code-stubs.h" | 7 #include "src/code-stubs.h" |
8 #include "src/api-arguments.h" | 8 #include "src/api-arguments.h" |
9 #include "src/base/bits.h" | 9 #include "src/base/bits.h" |
10 #include "src/bootstrapper.h" | 10 #include "src/bootstrapper.h" |
(...skipping 5890 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
5901 { | 5901 { |
5902 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); | 5902 FrameAndConstantPoolScope scope(masm, StackFrame::INTERNAL); |
5903 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); | 5903 __ CallRuntimeSaveDoubles(Runtime::kAllocateHeapNumber); |
5904 __ stfd(value, FieldMemOperand(r3, HeapNumber::kValueOffset)); | 5904 __ stfd(value, FieldMemOperand(r3, HeapNumber::kValueOffset)); |
5905 } | 5905 } |
5906 __ blr(); | 5906 __ blr(); |
5907 } | 5907 } |
5908 | 5908 |
5909 } // anonymous namespace | 5909 } // anonymous namespace |
5910 | 5910 |
| 5911 #define ASSEMBLE_ATOMIC_LOAD(instr, dst, base, index) \ |
| 5912 do { \ |
| 5913 Label not_taken; \ |
| 5914 __ sync(); \ |
| 5915 __ instr(dst, MemOperand(base, index)); \ |
| 5916 __ bind(¬_taken); \ |
| 5917 __ cmp(dst, dst); \ |
| 5918 __ bne(¬_taken); \ |
| 5919 __ isync(); \ |
| 5920 } while (0) |
| 5921 |
5911 void AtomicsLoadStub::Generate(MacroAssembler* masm) { | 5922 void AtomicsLoadStub::Generate(MacroAssembler* masm) { |
5912 Register object = r4; | 5923 Register object = r4; |
5913 Register index = r3; // Index is an untagged word32. | 5924 Register index = r3; // Index is an untagged word32. |
5914 Register backing_store = r5; | 5925 Register backing_store = r5; |
5915 Label table, i8, u8, i16, u16, i32, u32; | 5926 Label table, i8, u8, i16, u16, i32, u32; |
5916 | 5927 |
5917 GetTypedArrayBackingStore(masm, backing_store, object, r6, d0); | 5928 GetTypedArrayBackingStore(masm, backing_store, object, r6, d0); |
5918 TypedArrayJumpTablePrologue(masm, object, r6, r7, &table); | 5929 TypedArrayJumpTablePrologue(masm, object, r6, r7, &table); |
5919 | 5930 |
5920 __ bind(&i8); | 5931 __ bind(&i8); |
5921 __ lbzx(r3, MemOperand(backing_store, index)); | 5932 ASSEMBLE_ATOMIC_LOAD(lbzx, r3, backing_store, index); |
5922 __ lwsync(); | |
5923 __ extsb(r3, r3); | 5933 __ extsb(r3, r3); |
5924 __ SmiTag(r3); | 5934 __ SmiTag(r3); |
5925 __ blr(); | 5935 __ blr(); |
5926 | 5936 |
5927 __ bind(&u8); | 5937 __ bind(&u8); |
5928 __ lbzx(r3, MemOperand(backing_store, index)); | 5938 ASSEMBLE_ATOMIC_LOAD(lbzx, r3, backing_store, index); |
5929 __ lwsync(); | |
5930 __ SmiTag(r3); | 5939 __ SmiTag(r3); |
5931 __ blr(); | 5940 __ blr(); |
5932 | 5941 |
5933 __ bind(&i16); | 5942 __ bind(&i16); |
5934 __ ShiftLeftImm(index, index, Operand(1)); | 5943 __ ShiftLeftImm(index, index, Operand(1)); |
5935 __ lhax(r3, MemOperand(backing_store, index)); | 5944 ASSEMBLE_ATOMIC_LOAD(lhax, r3, backing_store, index); |
5936 __ lwsync(); | |
5937 __ SmiTag(r3); | 5945 __ SmiTag(r3); |
5938 __ blr(); | 5946 __ blr(); |
5939 | 5947 |
5940 __ bind(&u16); | 5948 __ bind(&u16); |
5941 __ ShiftLeftImm(index, index, Operand(1)); | 5949 __ ShiftLeftImm(index, index, Operand(1)); |
5942 __ lhzx(r3, MemOperand(backing_store, index)); | 5950 ASSEMBLE_ATOMIC_LOAD(lhzx, r3, backing_store, index); |
5943 __ lwsync(); | |
5944 __ SmiTag(r3); | 5951 __ SmiTag(r3); |
5945 __ blr(); | 5952 __ blr(); |
5946 | 5953 |
5947 Label use_heap_number; | 5954 Label use_heap_number; |
5948 | 5955 |
5949 __ bind(&i32); | 5956 __ bind(&i32); |
5950 __ ShiftLeftImm(index, index, Operand(2)); | 5957 __ ShiftLeftImm(index, index, Operand(2)); |
5951 __ lwax(r3, MemOperand(backing_store, index)); | 5958 ASSEMBLE_ATOMIC_LOAD(lwax, r3, backing_store, index); |
5952 __ lwsync(); | |
5953 #if V8_TARGET_ARCH_PPC64 | 5959 #if V8_TARGET_ARCH_PPC64 |
5954 __ SmiTag(r3); | 5960 __ SmiTag(r3); |
5955 __ blr(); | 5961 __ blr(); |
5956 #else | 5962 #else |
5957 ReturnInteger32(masm, d0, r3, &use_heap_number); | 5963 ReturnInteger32(masm, d0, r3, &use_heap_number); |
5958 #endif | 5964 #endif |
5959 | 5965 |
5960 __ bind(&u32); | 5966 __ bind(&u32); |
5961 __ ShiftLeftImm(index, index, Operand(2)); | 5967 __ ShiftLeftImm(index, index, Operand(2)); |
5962 __ lwzx(r3, MemOperand(backing_store, index)); | 5968 ASSEMBLE_ATOMIC_LOAD(lwzx, r3, backing_store, index); |
5963 __ lwsync(); | |
5964 ReturnUnsignedInteger32(masm, d0, r3, &use_heap_number); | 5969 ReturnUnsignedInteger32(masm, d0, r3, &use_heap_number); |
5965 | 5970 |
5966 __ bind(&use_heap_number); | 5971 __ bind(&use_heap_number); |
5967 ReturnAllocatedHeapNumber(masm, d0, r4, r5, r6); | 5972 ReturnAllocatedHeapNumber(masm, d0, r4, r5, r6); |
5968 | 5973 |
5969 TypedArrayJumpTableEpilogue(masm, &table, &i8, &u8, &i16, &u16, &i32, &u32, | 5974 TypedArrayJumpTableEpilogue(masm, &table, &i8, &u8, &i16, &u16, &i32, &u32, |
5970 &u8); | 5975 &u8); |
5971 } | 5976 } |
5972 | 5977 |
5973 #undef __ | 5978 #undef __ |
5974 } // namespace internal | 5979 } // namespace internal |
5975 } // namespace v8 | 5980 } // namespace v8 |
5976 | 5981 |
5977 #endif // V8_TARGET_ARCH_PPC | 5982 #endif // V8_TARGET_ARCH_PPC |
OLD | NEW |