OLD | NEW |
1 // Copyright 2011 the V8 project authors. All rights reserved. | 1 // Copyright 2011 the V8 project authors. All rights reserved. |
2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
4 // met: | 4 // met: |
5 // | 5 // |
6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
(...skipping 12 matching lines...) Loading... |
23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
27 | 27 |
28 #include "v8.h" | 28 #include "v8.h" |
29 | 29 |
30 #if defined(V8_TARGET_ARCH_ARM) | 30 #if defined(V8_TARGET_ARCH_ARM) |
31 | 31 |
32 #include "codegen.h" | 32 #include "codegen.h" |
| 33 #include "macro-assembler.h" |
33 | 34 |
34 namespace v8 { | 35 namespace v8 { |
35 namespace internal { | 36 namespace internal { |
36 | 37 |
| 38 #define __ ACCESS_MASM(masm) |
| 39 |
37 // ------------------------------------------------------------------------- | 40 // ------------------------------------------------------------------------- |
38 // Platform-specific RuntimeCallHelper functions. | 41 // Platform-specific RuntimeCallHelper functions. |
39 | 42 |
40 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { | 43 void StubRuntimeCallHelper::BeforeCall(MacroAssembler* masm) const { |
41 masm->EnterFrame(StackFrame::INTERNAL); | 44 masm->EnterFrame(StackFrame::INTERNAL); |
42 ASSERT(!masm->has_frame()); | 45 ASSERT(!masm->has_frame()); |
43 masm->set_has_frame(true); | 46 masm->set_has_frame(true); |
44 } | 47 } |
45 | 48 |
46 | 49 |
47 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { | 50 void StubRuntimeCallHelper::AfterCall(MacroAssembler* masm) const { |
48 masm->LeaveFrame(StackFrame::INTERNAL); | 51 masm->LeaveFrame(StackFrame::INTERNAL); |
49 ASSERT(masm->has_frame()); | 52 ASSERT(masm->has_frame()); |
50 masm->set_has_frame(false); | 53 masm->set_has_frame(false); |
51 } | 54 } |
52 | 55 |
53 | 56 |
| 57 // ------------------------------------------------------------------------- |
| 58 // Code generators |
| 59 |
| 60 void ElementsTransitionGenerator::GenerateSmiOnlyToObject( |
| 61 MacroAssembler* masm) { |
| 62 // ----------- S t a t e ------------- |
| 63 // -- r0 : value |
| 64 // -- r1 : key |
| 65 // -- r2 : receiver |
| 66 // -- lr : return address |
| 67 // -- r3 : target map, scratch for subsequent call |
| 68 // -- r4 : scratch (elements) |
| 69 // ----------------------------------- |
| 70 // Set transitioned map. |
| 71 __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); |
| 72 __ RecordWriteField(r2, |
| 73 HeapObject::kMapOffset, |
| 74 r3, |
| 75 r9, |
| 76 kLRHasNotBeenSaved, |
| 77 kDontSaveFPRegs, |
| 78 EMIT_REMEMBERED_SET, |
| 79 OMIT_SMI_CHECK); |
| 80 } |
| 81 |
| 82 |
| 83 void ElementsTransitionGenerator::GenerateSmiOnlyToDouble( |
| 84 MacroAssembler* masm, Label* fail) { |
| 85 // ----------- S t a t e ------------- |
| 86 // -- r0 : value |
| 87 // -- r1 : key |
| 88 // -- r2 : receiver |
| 89 // -- lr : return address |
| 90 // -- r3 : target map, scratch for subsequent call |
| 91 // -- r4 : scratch (elements) |
| 92 // ----------------------------------- |
| 93 Label loop, entry, convert_hole; |
| 94 bool vfp3_supported = CpuFeatures::IsSupported(VFP3); |
| 95 |
| 96 __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset)); |
| 97 __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset)); |
| 98 // r4: source FixedArray |
| 99 // r5: number of elements (smi-tagged) |
| 100 |
| 101 // Allocate new FixedDoubleArray. |
| 102 __ mov(ip, Operand(FixedDoubleArray::kHeaderSize)); |
| 103 __ add(ip, ip, Operand(r5, LSL, 2)); |
| 104 __ AllocateInNewSpace(ip, r6, r7, r9, fail, NO_ALLOCATION_FLAGS); |
| 105 // r6: destination FixedDoubleArray, not tagged as heap object |
| 106 __ LoadRoot(r9, Heap::kFixedDoubleArrayMapRootIndex); |
| 107 __ str(r9, MemOperand(r6, HeapObject::kMapOffset)); |
| 108 // Set destination FixedDoubleArray's length. |
| 109 __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset)); |
| 110 // Update receiver's map. |
| 111 __ push(lr); |
| 112 __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); |
| 113 __ RecordWriteField(r2, |
| 114 HeapObject::kMapOffset, |
| 115 r3, |
| 116 r9, |
| 117 kLRHasBeenSaved, |
| 118 kDontSaveFPRegs, |
| 119 EMIT_REMEMBERED_SET, |
| 120 OMIT_SMI_CHECK); |
| 121 // Replace receiver's backing store with newly created FixedDoubleArray. |
| 122 __ add(r3, r6, Operand(kHeapObjectTag)); |
| 123 __ str(r3, FieldMemOperand(r2, JSObject::kElementsOffset)); |
| 124 __ RecordWriteField(r2, |
| 125 JSObject::kElementsOffset, |
| 126 r3, |
| 127 r9, |
| 128 kLRHasBeenSaved, |
| 129 kDontSaveFPRegs, |
| 130 EMIT_REMEMBERED_SET, |
| 131 OMIT_SMI_CHECK); |
| 132 __ pop(lr); |
| 133 |
| 134 // Prepare for conversion loop. |
| 135 __ add(r3, r4, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 136 __ add(r7, r6, Operand(FixedDoubleArray::kHeaderSize)); |
| 137 __ add(r6, r7, Operand(r5, LSL, 2)); |
| 138 __ mov(r4, Operand(kHoleNanLower32)); |
| 139 __ mov(r5, Operand(kHoleNanUpper32)); |
| 140 // r3: begin of source FixedArray element fields, not tagged |
| 141 // r4: kHoleNanLower32 |
| 142 // r5: kHoleNanUpper32 |
| 143 // r6: end of destination FixedDoubleArray, not tagged |
| 144 // r7: begin of FixedDoubleArray element fields, not tagged |
| 145 if (vfp3_supported) __ Push(r1, r0); |
| 146 |
| 147 __ b(&entry); |
| 148 // Convert and copy elements. |
| 149 __ bind(&loop); |
| 150 __ ldr(r9, MemOperand(r3, 4, PostIndex)); |
| 151 // r9: current element |
| 152 __ JumpIfNotSmi(r9, &convert_hole); |
| 153 |
| 154 // Normal smi, convert to double and store. |
| 155 __ SmiUntag(r9); |
| 156 if (vfp3_supported) { |
| 157 CpuFeatures::Scope scope(VFP3); |
| 158 __ vmov(s0, r9); |
| 159 __ vcvt_f64_s32(d0, s0); |
| 160 __ vstr(d0, r7, 0); |
| 161 __ add(r7, r7, Operand(8)); |
| 162 } else { |
| 163 FloatingPointHelper::ConvertIntToDouble(masm, |
| 164 r9, |
| 165 FloatingPointHelper::kCoreRegisters, |
| 166 d0, |
| 167 r0, |
| 168 r1, |
| 169 ip, |
| 170 s0); |
| 171 __ Strd(r0, r1, MemOperand(r7, 8, PostIndex)); |
| 172 } |
| 173 __ b(&entry); |
| 174 |
| 175 // Hole found, store the-hole NaN. |
| 176 __ bind(&convert_hole); |
| 177 __ Strd(r4, r5, MemOperand(r7, 8, PostIndex)); |
| 178 |
| 179 __ bind(&entry); |
| 180 __ cmp(r7, r6); |
| 181 __ b(lt, &loop); |
| 182 |
| 183 if (vfp3_supported) __ Pop(r1, r0); |
| 184 } |
| 185 |
| 186 |
| 187 void ElementsTransitionGenerator::GenerateDoubleToObject( |
| 188 MacroAssembler* masm, Label* fail) { |
| 189 // ----------- S t a t e ------------- |
| 190 // -- r0 : value |
| 191 // -- r1 : key |
| 192 // -- r2 : receiver |
| 193 // -- lr : return address |
| 194 // -- r3 : target map, scratch for subsequent call |
| 195 // -- r4 : scratch (elements) |
| 196 // ----------------------------------- |
| 197 Label entry, loop, convert_hole, gc_required; |
| 198 |
| 199 __ push(lr); |
| 200 __ Push(r3, r2, r1, r0); |
| 201 |
| 202 __ ldr(r4, FieldMemOperand(r2, JSObject::kElementsOffset)); |
| 203 __ ldr(r5, FieldMemOperand(r4, FixedArray::kLengthOffset)); |
| 204 // r4: source FixedDoubleArray |
| 205 // r5: number of elements (smi-tagged) |
| 206 |
| 207 // Allocate new FixedArray. |
| 208 __ mov(r0, Operand(FixedDoubleArray::kHeaderSize)); |
| 209 __ add(r0, r0, Operand(r5, LSL, 1)); |
| 210 __ AllocateInNewSpace(r0, r6, r7, r9, &gc_required, NO_ALLOCATION_FLAGS); |
| 211 // r6: destination FixedArray, not tagged as heap object |
| 212 __ LoadRoot(r9, Heap::kFixedArrayMapRootIndex); |
| 213 __ str(r9, MemOperand(r6, HeapObject::kMapOffset)); |
| 214 // Set destination FixedDoubleArray's length. |
| 215 __ str(r5, MemOperand(r6, FixedDoubleArray::kLengthOffset)); |
| 216 |
| 217 // Prepare for conversion loop. |
| 218 __ add(r4, r4, Operand(FixedDoubleArray::kHeaderSize - kHeapObjectTag + 4)); |
| 219 __ add(r3, r6, Operand(FixedArray::kHeaderSize)); |
| 220 __ add(r6, r6, Operand(kHeapObjectTag)); |
| 221 __ add(r5, r3, Operand(r5, LSL, 1)); |
| 222 __ LoadRoot(r7, Heap::kTheHoleValueRootIndex); |
| 223 __ LoadRoot(r9, Heap::kHeapNumberMapRootIndex); |
| 224 // Using offsetted addresses in r4 to fully take advantage of post-indexing. |
| 225 // r3: begin of destination FixedArray element fields, not tagged |
| 226 // r4: begin of source FixedDoubleArray element fields, not tagged, +4 |
| 227 // r5: end of destination FixedArray, not tagged |
| 228 // r6: destination FixedArray |
| 229 // r7: the-hole pointer |
| 230 // r9: heap number map |
| 231 __ b(&entry); |
| 232 |
| 233 // Call into runtime if GC is required. |
| 234 __ bind(&gc_required); |
| 235 __ Pop(r3, r2, r1, r0); |
| 236 __ pop(lr); |
| 237 __ b(fail); |
| 238 |
| 239 __ bind(&loop); |
| 240 __ ldr(r1, MemOperand(r4, 8, PostIndex)); |
| 241 // lr: current element's upper 32 bit |
| 242 // r4: address of next element's upper 32 bit |
| 243 __ cmp(r1, Operand(kHoleNanUpper32)); |
| 244 __ b(eq, &convert_hole); |
| 245 |
| 246 // Non-hole double, copy value into a heap number. |
| 247 __ AllocateHeapNumber(r2, r0, lr, r9, &gc_required); |
| 248 // r2: new heap number |
| 249 __ ldr(r0, MemOperand(r4, 12, NegOffset)); |
| 250 __ Strd(r0, r1, FieldMemOperand(r2, HeapNumber::kValueOffset)); |
| 251 __ mov(r0, r3); |
| 252 __ str(r2, MemOperand(r3, 4, PostIndex)); |
| 253 __ RecordWrite(r6, |
| 254 r0, |
| 255 r2, |
| 256 kLRHasBeenSaved, |
| 257 kDontSaveFPRegs, |
| 258 EMIT_REMEMBERED_SET, |
| 259 OMIT_SMI_CHECK); |
| 260 __ b(&entry); |
| 261 |
| 262 // Replace the-hole NaN with the-hole pointer. |
| 263 __ bind(&convert_hole); |
| 264 __ str(r7, MemOperand(r3, 4, PostIndex)); |
| 265 |
| 266 __ bind(&entry); |
| 267 __ cmp(r3, r5); |
| 268 __ b(lt, &loop); |
| 269 |
| 270 __ Pop(r3, r2, r1, r0); |
| 271 // Update receiver's map. |
| 272 __ str(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); |
| 273 __ RecordWriteField(r2, |
| 274 HeapObject::kMapOffset, |
| 275 r3, |
| 276 r9, |
| 277 kLRHasBeenSaved, |
| 278 kDontSaveFPRegs, |
| 279 EMIT_REMEMBERED_SET, |
| 280 OMIT_SMI_CHECK); |
| 281 // Replace receiver's backing store with newly created and filled FixedArray. |
| 282 __ str(r6, FieldMemOperand(r2, JSObject::kElementsOffset)); |
| 283 __ RecordWriteField(r2, |
| 284 JSObject::kElementsOffset, |
| 285 r6, |
| 286 r9, |
| 287 kLRHasBeenSaved, |
| 288 kDontSaveFPRegs, |
| 289 EMIT_REMEMBERED_SET, |
| 290 OMIT_SMI_CHECK); |
| 291 __ pop(lr); |
| 292 } |
| 293 |
| 294 #undef __ |
| 295 |
54 } } // namespace v8::internal | 296 } } // namespace v8::internal |
55 | 297 |
56 #endif // V8_TARGET_ARCH_ARM | 298 #endif // V8_TARGET_ARCH_ARM |
OLD | NEW |