| OLD | NEW |
| 1 // Copyright 2012 the V8 project authors. All rights reserved. | 1 // Copyright 2011-2012 the V8 project authors. All rights reserved. |
| 2 // Redistribution and use in source and binary forms, with or without | 2 // Redistribution and use in source and binary forms, with or without |
| 3 // modification, are permitted provided that the following conditions are | 3 // modification, are permitted provided that the following conditions are |
| 4 // met: | 4 // met: |
| 5 // | 5 // |
| 6 // * Redistributions of source code must retain the above copyright | 6 // * Redistributions of source code must retain the above copyright |
| 7 // notice, this list of conditions and the following disclaimer. | 7 // notice, this list of conditions and the following disclaimer. |
| 8 // * Redistributions in binary form must reproduce the above | 8 // * Redistributions in binary form must reproduce the above |
| 9 // copyright notice, this list of conditions and the following | 9 // copyright notice, this list of conditions and the following |
| 10 // disclaimer in the documentation and/or other materials provided | 10 // disclaimer in the documentation and/or other materials provided |
| 11 // with the distribution. | 11 // with the distribution. |
| 12 // * Neither the name of Google Inc. nor the names of its | 12 // * Neither the name of Google Inc. nor the names of its |
| 13 // contributors may be used to endorse or promote products derived | 13 // contributors may be used to endorse or promote products derived |
| 14 // from this software without specific prior written permission. | 14 // from this software without specific prior written permission. |
| 15 // | 15 // |
| 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS | 16 // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS |
| 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT | 17 // "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT |
| 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR | 18 // LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR |
| 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT | 19 // A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT |
| 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, | 20 // OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, |
| 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT | 21 // SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT |
| 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, | 22 // LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, |
| 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY | 23 // DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY |
| 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT | 24 // THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT |
| 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE | 25 // (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE |
| 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. | 26 // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. |
| 27 | 27 |
| 28 #include "v8.h" | 28 #include "v8.h" |
| 29 | 29 |
| 30 #if defined(V8_TARGET_ARCH_ARM) | 30 #if defined(V8_TARGET_ARCH_SH4) |
| 31 | 31 |
| 32 #include "ic-inl.h" | 32 #include "ic-inl.h" |
| 33 #include "codegen.h" | 33 #include "codegen.h" |
| 34 #include "stub-cache.h" | 34 #include "stub-cache.h" |
| 35 | 35 |
| 36 namespace v8 { | 36 namespace v8 { |
| 37 namespace internal { | 37 namespace internal { |
| 38 | 38 |
| 39 #define __ ACCESS_MASM(masm) | 39 #define __ ACCESS_MASM(masm) |
| 40 | 40 |
| 41 #include "map-sh4.h" // Define register map |
| 41 | 42 |
| 42 static void ProbeTable(Isolate* isolate, | 43 static void ProbeTable(Isolate* isolate, |
| 43 MacroAssembler* masm, | 44 MacroAssembler* masm, |
| 44 Code::Flags flags, | 45 Code::Flags flags, |
| 45 StubCache::Table table, | 46 StubCache::Table table, |
| 46 Register receiver, | 47 Register receiver, |
| 47 Register name, | 48 Register name, |
| 48 // Number of the cache entry, not scaled. | 49 // Number of the cache entry, not scaled. |
| 49 Register offset, | 50 Register offset, |
| 50 Register scratch, | 51 Register scratch, |
| 51 Register scratch2, | 52 Register scratch2, |
| 52 Register offset_scratch) { | 53 Register offset_scratch) { |
| 53 ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); | 54 ExternalReference key_offset(isolate->stub_cache()->key_reference(table)); |
| 54 ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); | 55 ExternalReference value_offset(isolate->stub_cache()->value_reference(table)); |
| 55 ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); | 56 ExternalReference map_offset(isolate->stub_cache()->map_reference(table)); |
| 56 | 57 |
| 57 uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address()); | 58 uint32_t key_off_addr = reinterpret_cast<uint32_t>(key_offset.address()); |
| 58 uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address()); | 59 uint32_t value_off_addr = reinterpret_cast<uint32_t>(value_offset.address()); |
| 59 uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address()); | 60 uint32_t map_off_addr = reinterpret_cast<uint32_t>(map_offset.address()); |
| 60 | 61 |
| 61 // Check the relative positions of the address fields. | 62 // Check the relative positions of the address fields. |
| 62 ASSERT(value_off_addr > key_off_addr); | 63 ASSERT(value_off_addr > key_off_addr); |
| 63 ASSERT((value_off_addr - key_off_addr) % 4 == 0); | 64 ASSERT((value_off_addr - key_off_addr) % 4 == 0); |
| 64 ASSERT((value_off_addr - key_off_addr) < (256 * 4)); | 65 ASSERT((value_off_addr - key_off_addr) < (256 * 4)); |
| 65 ASSERT(map_off_addr > key_off_addr); | 66 ASSERT(map_off_addr > key_off_addr); |
| 66 ASSERT((map_off_addr - key_off_addr) % 4 == 0); | 67 ASSERT((map_off_addr - key_off_addr) % 4 == 0); |
| 67 ASSERT((map_off_addr - key_off_addr) < (256 * 4)); | 68 ASSERT((map_off_addr - key_off_addr) < (256 * 4)); |
| 68 | 69 |
| 70 // Check that ip is not used |
| 71 ASSERT(!name.is(ip) && !offset.is(ip) && !scratch.is(ip) && !scratch2.is(ip)); |
| 72 |
| 73 |
| 69 Label miss; | 74 Label miss; |
| 70 Register base_addr = scratch; | 75 Register base_addr = scratch; |
| 71 scratch = no_reg; | 76 scratch = no_reg; |
| 72 | 77 |
| 73 // Multiply by 3 because there are 3 fields per entry (name, code, map). | 78 // Multiply by 3 because there are 3 fields per entry (name, code, map). |
| 74 __ add(offset_scratch, offset, Operand(offset, LSL, 1)); | 79 __ lsl(offset_scratch, offset, Operand(1)); |
| 80 __ add(offset_scratch, offset, offset_scratch); |
| 75 | 81 |
| 76 // Calculate the base address of the entry. | 82 // Calculate the base address of the entry. |
| 77 __ mov(base_addr, Operand(key_offset)); | 83 __ mov(base_addr, Operand(key_offset)); |
| 78 __ add(base_addr, base_addr, Operand(offset_scratch, LSL, kPointerSizeLog2)); | 84 __ lsl(offset_scratch, offset_scratch, Operand(kPointerSizeLog2)); |
| 85 __ add(base_addr, base_addr, offset_scratch); |
| 79 | 86 |
| 80 // Check that the key in the entry matches the name. | 87 // Check that the key in the entry matches the name. |
| 81 __ ldr(ip, MemOperand(base_addr, 0)); | 88 __ ldr(ip, MemOperand(base_addr, 0)); |
| 82 __ cmp(name, ip); | 89 __ cmp(name, ip); |
| 83 __ b(ne, &miss); | 90 __ b(ne, &miss); |
| 84 | 91 |
| 85 // Check the map matches. | 92 // Check the map matches. |
| 86 __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr)); | 93 __ ldr(ip, MemOperand(base_addr, map_off_addr - key_off_addr)); |
| 87 __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 94 __ ldr(scratch2, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 88 __ cmp(ip, scratch2); | 95 __ cmp(ip, scratch2); |
| 89 __ b(ne, &miss); | 96 __ b(ne, &miss); |
| 90 | 97 |
| 91 // Get the code entry from the cache. | 98 // Get the code entry from the cache. |
| 92 Register code = scratch2; | 99 Register code = scratch2; |
| 93 scratch2 = no_reg; | 100 scratch2 = no_reg; |
| 94 __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr)); | 101 __ ldr(code, MemOperand(base_addr, value_off_addr - key_off_addr)); |
| 95 | 102 |
| 96 // Check that the flags match what we're looking for. | 103 // Check that the flags match what we're looking for. |
| 97 Register flags_reg = base_addr; | 104 Register flags_reg = base_addr; |
| 98 base_addr = no_reg; | 105 base_addr = no_reg; |
| 99 __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset)); | 106 __ ldr(flags_reg, FieldMemOperand(code, Code::kFlagsOffset)); |
| 100 // It's a nice optimization if this constant is encodable in the bic insn. | 107 // It's a nice optimization if this constant is encodable in the bic insn. |
| 101 | 108 // TODO(STM): to check soon |
| 102 uint32_t mask = Code::kFlagsNotUsedInLookup; | 109 __ cmp(flags_reg, Operand(flags)); |
| 103 ASSERT(__ ImmediateFitsAddrMode1Instruction(mask)); | |
| 104 __ bic(flags_reg, flags_reg, Operand(mask)); | |
| 105 // Using cmn and the negative instead of cmp means we can use movw. | |
| 106 if (flags < 0) { | |
| 107 __ cmn(flags_reg, Operand(-flags)); | |
| 108 } else { | |
| 109 __ cmp(flags_reg, Operand(flags)); | |
| 110 } | |
| 111 __ b(ne, &miss); | 110 __ b(ne, &miss); |
| 112 | 111 |
| 113 #ifdef DEBUG | |
| 114 if (FLAG_test_secondary_stub_cache && table == StubCache::kPrimary) { | |
| 115 __ jmp(&miss); | |
| 116 } else if (FLAG_test_primary_stub_cache && table == StubCache::kSecondary) { | |
| 117 __ jmp(&miss); | |
| 118 } | |
| 119 #endif | |
| 120 | |
| 121 // Jump to the first instruction in the code stub. | 112 // Jump to the first instruction in the code stub. |
| 122 __ add(pc, code, Operand(Code::kHeaderSize - kHeapObjectTag)); | 113 __ add(offset, offset, Operand(Code::kHeaderSize - kHeapObjectTag)); |
| 114 __ jmp(offset); |
| 123 | 115 |
| 124 // Miss: fall through. | 116 // Miss: fall through. |
| 125 __ bind(&miss); | 117 __ bind(&miss); |
| 126 } | 118 } |
| 127 | 119 |
| 128 | 120 |
| 129 // Helper function used to check that the dictionary doesn't contain | 121 // Helper function used to check that the dictionary doesn't contain |
| 130 // the property. This function may return false negatives, so miss_label | 122 // the property. This function may return false negatives, so miss_label |
| 131 // must always call a backup property check that is complete. | 123 // must always call a backup property check that is complete. |
| 132 // This function is safe to call if the receiver has fast properties. | 124 // This function is safe to call if the receiver has fast properties. |
| (...skipping 16 matching lines...) Expand all Loading... |
| 149 | 141 |
| 150 // Bail out if the receiver has a named interceptor or requires access checks. | 142 // Bail out if the receiver has a named interceptor or requires access checks. |
| 151 Register map = scratch1; | 143 Register map = scratch1; |
| 152 __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 144 __ ldr(map, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 153 __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset)); | 145 __ ldrb(scratch0, FieldMemOperand(map, Map::kBitFieldOffset)); |
| 154 __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask)); | 146 __ tst(scratch0, Operand(kInterceptorOrAccessCheckNeededMask)); |
| 155 __ b(ne, miss_label); | 147 __ b(ne, miss_label); |
| 156 | 148 |
| 157 // Check that receiver is a JSObject. | 149 // Check that receiver is a JSObject. |
| 158 __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); | 150 __ ldrb(scratch0, FieldMemOperand(map, Map::kInstanceTypeOffset)); |
| 159 __ cmp(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); | 151 __ cmpge(scratch0, Operand(FIRST_SPEC_OBJECT_TYPE)); |
| 160 __ b(lt, miss_label); | 152 __ bf(miss_label); |
| 161 | 153 |
| 162 // Load properties array. | 154 // Load properties array. |
| 163 Register properties = scratch0; | 155 Register properties = scratch0; |
| 164 __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); | 156 __ ldr(properties, FieldMemOperand(receiver, JSObject::kPropertiesOffset)); |
| 165 // Check that the properties array is a dictionary. | 157 // Check that the properties array is a dictionary. |
| 166 __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset)); | 158 __ ldr(map, FieldMemOperand(properties, HeapObject::kMapOffset)); |
| 167 Register tmp = properties; | 159 Register tmp = properties; |
| 168 __ LoadRoot(tmp, Heap::kHashTableMapRootIndex); | 160 __ LoadRoot(tmp, Heap::kHashTableMapRootIndex); |
| 169 __ cmp(map, tmp); | 161 __ cmp(map, tmp); |
| 170 __ b(ne, miss_label); | 162 __ b(ne, miss_label); |
| (...skipping 42 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 213 ASSERT(!extra2.is(name)); | 205 ASSERT(!extra2.is(name)); |
| 214 ASSERT(!extra2.is(scratch)); | 206 ASSERT(!extra2.is(scratch)); |
| 215 ASSERT(!extra2.is(extra)); | 207 ASSERT(!extra2.is(extra)); |
| 216 | 208 |
| 217 // Check scratch, extra and extra2 registers are valid. | 209 // Check scratch, extra and extra2 registers are valid. |
| 218 ASSERT(!scratch.is(no_reg)); | 210 ASSERT(!scratch.is(no_reg)); |
| 219 ASSERT(!extra.is(no_reg)); | 211 ASSERT(!extra.is(no_reg)); |
| 220 ASSERT(!extra2.is(no_reg)); | 212 ASSERT(!extra2.is(no_reg)); |
| 221 ASSERT(!extra3.is(no_reg)); | 213 ASSERT(!extra3.is(no_reg)); |
| 222 | 214 |
| 215 // Check that ip is not used |
| 216 ASSERT(!receiver.is(ip) && !name.is(ip) && !scratch.is(ip) && |
| 217 !extra.is(ip) && !extra2.is(ip)); |
| 218 |
| 223 Counters* counters = masm->isolate()->counters(); | 219 Counters* counters = masm->isolate()->counters(); |
| 224 __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, | 220 __ IncrementCounter(counters->megamorphic_stub_cache_probes(), 1, |
| 225 extra2, extra3); | 221 extra2, extra3); |
| 226 | 222 |
| 227 // Check that the receiver isn't a smi. | 223 // Check that the receiver isn't a smi. |
| 228 __ JumpIfSmi(receiver, &miss); | 224 __ JumpIfSmi(receiver, &miss); |
| 229 | 225 |
| 230 // Get the map of the receiver and compute the hash. | 226 // Get the map of the receiver and compute the hash. |
| 231 __ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset)); | 227 __ ldr(scratch, FieldMemOperand(name, String::kHashFieldOffset)); |
| 232 __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 228 __ ldr(ip, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 233 __ add(scratch, scratch, Operand(ip)); | 229 __ add(scratch, scratch, ip); |
| 234 uint32_t mask = kPrimaryTableSize - 1; | 230 uint32_t mask = kPrimaryTableSize - 1; |
| 235 // We shift out the last two bits because they are not part of the hash and | 231 // We shift out the last two bits because they are not part of the hash and |
| 236 // they are always 01 for maps. | 232 // they are always 01 for maps. |
| 237 __ mov(scratch, Operand(scratch, LSR, kHeapObjectTagSize)); | 233 __ lsr(scratch, scratch, Operand(kHeapObjectTagSize)); |
| 238 // Mask down the eor argument to the minimum to keep the immediate | 234 // Mask down the eor argument to the minimum to keep the immediate small |
| 239 // ARM-encodable. | |
| 240 __ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask)); | 235 __ eor(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask)); |
| 241 // Prefer and_ to ubfx here because ubfx takes 2 cycles. | 236 __ land(scratch, scratch, Operand(mask)); |
| 242 __ and_(scratch, scratch, Operand(mask)); | |
| 243 | 237 |
| 244 // Probe the primary table. | 238 // Probe the primary table. |
| 245 ProbeTable(isolate, | 239 ProbeTable(isolate, |
| 246 masm, | 240 masm, |
| 247 flags, | 241 flags, |
| 248 kPrimary, | 242 kPrimary, |
| 249 receiver, | 243 receiver, |
| 250 name, | 244 name, |
| 251 scratch, | 245 scratch, |
| 252 extra, | 246 extra, |
| 253 extra2, | 247 extra2, |
| 254 extra3); | 248 extra3); |
| 255 | 249 |
| 256 // Primary miss: Compute hash for secondary probe. | 250 // Primary miss: Compute hash for secondary probe. |
| 257 __ sub(scratch, scratch, Operand(name, LSR, kHeapObjectTagSize)); | 251 __ lsr(extra3, name, Operand(kHeapObjectTagSize)); |
| 252 __ sub(scratch, scratch, extra3); |
| 258 uint32_t mask2 = kSecondaryTableSize - 1; | 253 uint32_t mask2 = kSecondaryTableSize - 1; |
| 259 __ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2)); | 254 __ add(scratch, scratch, Operand((flags >> kHeapObjectTagSize) & mask2)); |
| 260 __ and_(scratch, scratch, Operand(mask2)); | 255 __ land(scratch, scratch, Operand(mask2)); |
| 261 | 256 |
| 262 // Probe the secondary table. | 257 // Probe the secondary table. |
| 263 ProbeTable(isolate, | 258 ProbeTable(isolate, |
| 264 masm, | 259 masm, |
| 265 flags, | 260 flags, |
| 266 kSecondary, | 261 kSecondary, |
| 267 receiver, | 262 receiver, |
| 268 name, | 263 name, |
| 269 scratch, | 264 scratch, |
| 270 extra, | 265 extra, |
| (...skipping 73 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 344 | 339 |
| 345 | 340 |
| 346 void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm, | 341 void StubCompiler::GenerateLoadArrayLength(MacroAssembler* masm, |
| 347 Register receiver, | 342 Register receiver, |
| 348 Register scratch, | 343 Register scratch, |
| 349 Label* miss_label) { | 344 Label* miss_label) { |
| 350 // Check that the receiver isn't a smi. | 345 // Check that the receiver isn't a smi. |
| 351 __ JumpIfSmi(receiver, miss_label); | 346 __ JumpIfSmi(receiver, miss_label); |
| 352 | 347 |
| 353 // Check that the object is a JS array. | 348 // Check that the object is a JS array. |
| 354 __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE); | 349 __ CompareObjectType(receiver, scratch, scratch, JS_ARRAY_TYPE, eq); |
| 355 __ b(ne, miss_label); | 350 __ b(ne, miss_label); |
| 356 | 351 |
| 357 // Load length directly from the JS array. | 352 // Load length directly from the JS array. |
| 358 __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 353 __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 359 __ Ret(); | 354 __ Ret(); |
| 360 } | 355 } |
| 361 | 356 |
| 362 | 357 |
| 363 // Generate code to check if an object is a string. If the object is a | 358 // Generate code to check if an object is a string. If the object is a |
| 364 // heap object, its map's instance type is left in the scratch1 register. | 359 // heap object, its map's instance type is left in the scratch1 register. |
| 365 // If this is not needed, scratch1 and scratch2 may be the same register. | 360 // If this is not needed, scratch1 and scratch2 may be the same register. |
| 366 static void GenerateStringCheck(MacroAssembler* masm, | 361 static void GenerateStringCheck(MacroAssembler* masm, |
| 367 Register receiver, | 362 Register receiver, |
| 368 Register scratch1, | 363 Register scratch1, |
| 369 Register scratch2, | 364 Register scratch2, |
| 370 Label* smi, | 365 Label* smi, |
| 371 Label* non_string_object) { | 366 Label* non_string_object) { |
| 367 ASSERT(!receiver.is(ip) && !scratch1.is(ip) && !scratch2.is(ip)); |
| 368 |
| 372 // Check that the receiver isn't a smi. | 369 // Check that the receiver isn't a smi. |
| 373 __ JumpIfSmi(receiver, smi); | 370 __ JumpIfSmi(receiver, smi); |
| 374 | 371 |
| 375 // Check that the object is a string. | 372 // Check that the object is a string. |
| 376 __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); | 373 __ ldr(scratch1, FieldMemOperand(receiver, HeapObject::kMapOffset)); |
| 377 __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); | 374 __ ldrb(scratch1, FieldMemOperand(scratch1, Map::kInstanceTypeOffset)); |
| 378 __ and_(scratch2, scratch1, Operand(kIsNotStringMask)); | 375 __ land(scratch2, scratch1, Operand(kIsNotStringMask)); |
| 379 // The cast is to resolve the overload for the argument of 0x0. | 376 // The cast is to resolve the overload for the argument of 0x0. |
| 380 __ cmp(scratch2, Operand(static_cast<int32_t>(kStringTag))); | 377 __ cmp(scratch2, Operand(static_cast<int32_t>(kStringTag))); |
| 381 __ b(ne, non_string_object); | 378 __ b(ne, non_string_object); |
| 382 } | 379 } |
| 383 | 380 |
| 384 | 381 |
| 385 // Generate code to load the length from a string object and return the length. | 382 // Generate code to load the length from a string object and return the length. |
| 386 // If the receiver object is not a string or a wrapped string object the | 383 // If the receiver object is not a string or a wrapped string object the |
| 387 // execution continues at the miss label. The register containing the | 384 // execution continues at the miss label. The register containing the |
| 388 // receiver is potentially clobbered. | 385 // receiver is potentially clobbered. |
| 389 void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm, | 386 void StubCompiler::GenerateLoadStringLength(MacroAssembler* masm, |
| 390 Register receiver, | 387 Register receiver, |
| 391 Register scratch1, | 388 Register scratch1, |
| 392 Register scratch2, | 389 Register scratch2, |
| 393 Label* miss, | 390 Label* miss, |
| 394 bool support_wrappers) { | 391 bool support_wrappers) { |
| 392 ASSERT(!receiver.is(ip) && !scratch1.is(ip) && !scratch2.is(ip)); |
| 395 Label check_wrapper; | 393 Label check_wrapper; |
| 396 | 394 |
| 397 // Check if the object is a string leaving the instance type in the | 395 // Check if the object is a string leaving the instance type in the |
| 398 // scratch1 register. | 396 // scratch1 register. |
| 399 GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, | 397 GenerateStringCheck(masm, receiver, scratch1, scratch2, miss, |
| 400 support_wrappers ? &check_wrapper : miss); | 398 support_wrappers ? &check_wrapper : miss); |
| 401 | 399 |
| 402 // Load length directly from the string. | 400 // Load length directly from the string. |
| 403 __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset)); | 401 __ ldr(r0, FieldMemOperand(receiver, String::kLengthOffset)); |
| 404 __ Ret(); | 402 __ Ret(); |
| (...skipping 187 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 592 const ParameterCount& arguments, | 590 const ParameterCount& arguments, |
| 593 Label* miss, | 591 Label* miss, |
| 594 Code::ExtraICState extra_ic_state) { | 592 Code::ExtraICState extra_ic_state) { |
| 595 // ----------- S t a t e ------------- | 593 // ----------- S t a t e ------------- |
| 596 // -- r0: receiver | 594 // -- r0: receiver |
| 597 // -- r1: function to call | 595 // -- r1: function to call |
| 598 // ----------------------------------- | 596 // ----------------------------------- |
| 599 | 597 |
| 600 // Check that the function really is a function. | 598 // Check that the function really is a function. |
| 601 __ JumpIfSmi(r1, miss); | 599 __ JumpIfSmi(r1, miss); |
| 602 __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); | 600 __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE, eq); |
| 603 __ b(ne, miss); | 601 __ b(ne, miss); |
| 604 | 602 |
| 605 // Patch the receiver on the stack with the global proxy if | 603 // Patch the receiver on the stack with the global proxy if |
| 606 // necessary. | 604 // necessary. |
| 607 if (object->IsGlobalObject()) { | 605 if (object->IsGlobalObject()) { |
| 608 __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset)); | 606 __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset)); |
| 609 __ str(r3, MemOperand(sp, arguments.immediate() * kPointerSize)); | 607 __ str(r3, MemOperand(sp, arguments.immediate() * kPointerSize)); |
| 610 } | 608 } |
| 611 | 609 |
| 612 // Invoke the function. | 610 // Invoke the function. |
| (...skipping 85 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 698 // Pass the additional arguments. | 696 // Pass the additional arguments. |
| 699 Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); | 697 Handle<CallHandlerInfo> api_call_info = optimization.api_call_info(); |
| 700 Handle<Object> call_data(api_call_info->data()); | 698 Handle<Object> call_data(api_call_info->data()); |
| 701 if (masm->isolate()->heap()->InNewSpace(*call_data)) { | 699 if (masm->isolate()->heap()->InNewSpace(*call_data)) { |
| 702 __ Move(r0, api_call_info); | 700 __ Move(r0, api_call_info); |
| 703 __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset)); | 701 __ ldr(r6, FieldMemOperand(r0, CallHandlerInfo::kDataOffset)); |
| 704 } else { | 702 } else { |
| 705 __ Move(r6, call_data); | 703 __ Move(r6, call_data); |
| 706 } | 704 } |
| 707 __ mov(r7, Operand(ExternalReference::isolate_address())); | 705 __ mov(r7, Operand(ExternalReference::isolate_address())); |
| 708 // Store JS function, call data and isolate. | 706 // Store js function and call data. |
| 709 __ stm(ib, sp, r5.bit() | r6.bit() | r7.bit()); | 707 __ str(r5, MemOperand(sp, 4)); |
| 708 __ str(r6, MemOperand(sp, 8)); |
| 709 __ str(r7, MemOperand(sp, 12)); |
| 710 | 710 |
| 711 // Prepare arguments. | 711 // Prepare arguments. |
| 712 __ add(r2, sp, Operand(3 * kPointerSize)); | 712 __ add(r2, sp, Operand(3 * kPointerSize)); |
| 713 | 713 |
| 714 // Allocate the v8::Arguments structure in the arguments' space since | 714 // Allocate the v8::Arguments structure in the arguments' space since |
| 715 // it's not controlled by GC. | 715 // it's not controlled by GC. |
| 716 const int kApiStackSpace = 4; | 716 const int kApiStackSpace = 4; |
| 717 | 717 |
| 718 FrameScope frame_scope(masm, StackFrame::MANUAL); | 718 FrameScope frame_scope(masm, StackFrame::MANUAL); |
| 719 __ EnterExitFrame(false, kApiStackSpace); | 719 __ EnterExitFrame(false, kApiStackSpace); |
| (...skipping 253 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 973 scratch, | 973 scratch, |
| 974 miss); | 974 miss); |
| 975 } | 975 } |
| 976 current = Handle<JSObject>(JSObject::cast(current->GetPrototype())); | 976 current = Handle<JSObject>(JSObject::cast(current->GetPrototype())); |
| 977 } | 977 } |
| 978 } | 978 } |
| 979 | 979 |
| 980 | 980 |
| 981 // Convert and store int passed in register ival to IEEE 754 single precision | 981 // Convert and store int passed in register ival to IEEE 754 single precision |
| 982 // floating point value at memory location (dst + 4 * wordoffset) | 982 // floating point value at memory location (dst + 4 * wordoffset) |
| 983 // If VFP3 is available use it for conversion. | 983 // If FPU is available use it for conversion. |
| 984 static void StoreIntAsFloat(MacroAssembler* masm, | 984 static void StoreIntAsFloat(MacroAssembler* masm, |
| 985 Register dst, | 985 Register dst, |
| 986 Register wordoffset, | 986 Register wordoffset, |
| 987 Register ival, | 987 Register ival, |
| 988 Register fval, | 988 Register fval, |
| 989 Register scratch1, | 989 Register scratch1, |
| 990 Register scratch2) { | 990 Register scratch2) { |
| 991 if (CpuFeatures::IsSupported(VFP2)) { | 991 if (CpuFeatures::IsSupported(FPU)) { |
| 992 CpuFeatures::Scope scope(VFP2); | 992 __ dfloat(dr0, ival); |
| 993 __ vmov(s0, ival); | 993 __ fcnvds(fr0, dr0); |
| 994 __ add(scratch1, dst, Operand(wordoffset, LSL, 2)); | 994 __ lsl(scratch1, wordoffset, Operand(2)); |
| 995 __ vcvt_f32_s32(s0, s0); | 995 __ add(scratch1, dst, scratch1); |
| 996 __ vstr(s0, scratch1, 0); | 996 __ fstr(fr0, MemOperand(scratch1, 0)); |
| 997 } else { | 997 } else { |
| 998 Label not_special, done; | 998 Label not_special, done; |
| 999 // Move sign bit from source to destination. This works because the sign | 999 // Move sign bit from source to destination. This works because the sign |
| 1000 // bit in the exponent word of the double has the same position and polarity | 1000 // bit in the exponent word of the double has the same position and polarity |
| 1001 // as the 2's complement sign bit in a Smi. | 1001 // as the 2's complement sign bit in a Smi. |
| 1002 ASSERT(kBinary32SignMask == 0x80000000u); | 1002 ASSERT(kBinary32SignMask == 0x80000000u); |
| 1003 | 1003 |
| 1004 __ and_(fval, ival, Operand(kBinary32SignMask), SetCC); | 1004 __ land(fval, ival, Operand(kBinary32SignMask)); |
| 1005 __ cmp(fval, Operand(0)); |
| 1005 // Negate value if it is negative. | 1006 // Negate value if it is negative. |
| 1006 __ rsb(ival, ival, Operand(0, RelocInfo::NONE), LeaveCC, ne); | 1007 __ rsb(ip, ival, Operand(0, RelocInfo::NONE)); |
| 1008 __ mov(ival, ip, ne); |
| 1007 | 1009 |
| 1008 // We have -1, 0 or 1, which we treat specially. Register ival contains | 1010 // We have -1, 0 or 1, which we treat specially. Register ival contains |
| 1009 // absolute value: it is either equal to 1 (special case of -1 and 1), | 1011 // absolute value: it is either equal to 1 (special case of -1 and 1), |
| 1010 // greater than 1 (not a special case) or less than 1 (special case of 0). | 1012 // greater than 1 (not a special case) or less than 1 (special case of 0). |
| 1011 __ cmp(ival, Operand(1)); | 1013 __ cmpgt(ival, Operand(1)); |
| 1012 __ b(gt, ¬_special); | 1014 __ b(t, ¬_special); |
| 1013 | 1015 |
| 1014 // For 1 or -1 we need to or in the 0 exponent (biased). | 1016 // For 1 or -1 we need to or in the 0 exponent (biased). |
| 1015 static const uint32_t exponent_word_for_1 = | 1017 static const uint32_t exponent_word_for_1 = |
| 1016 kBinary32ExponentBias << kBinary32ExponentShift; | 1018 kBinary32ExponentBias << kBinary32ExponentShift; |
| 1017 | 1019 |
| 1018 __ orr(fval, fval, Operand(exponent_word_for_1), LeaveCC, eq); | 1020 __ cmp(ival, Operand(1)); |
| 1021 __ orr(ip, fval, Operand(exponent_word_for_1)); |
| 1022 __ mov(fval, ip, eq); |
| 1019 __ b(&done); | 1023 __ b(&done); |
| 1020 | 1024 |
| 1021 __ bind(¬_special); | 1025 __ bind(¬_special); |
| 1022 // Count leading zeros. | 1026 // Count leading zeros. |
| 1023 // Gets the wrong answer for 0, but we already checked for that case above. | 1027 // Gets the wrong answer for 0, but we already checked for that case above. |
| 1024 Register zeros = scratch2; | 1028 Register zeros = scratch2; |
| 1025 __ CountLeadingZeros(zeros, ival, scratch1); | 1029 __ CountLeadingZeros(zeros, ival, scratch1); |
| 1026 | 1030 |
| 1027 // Compute exponent and or it into the exponent register. | 1031 // Compute exponent and or it into the exponent register. |
| 1028 __ rsb(scratch1, | 1032 __ rsb(scratch1, |
| 1029 zeros, | 1033 zeros, |
| 1030 Operand((kBitsPerInt - 1) + kBinary32ExponentBias)); | 1034 Operand((kBitsPerInt - 1) + kBinary32ExponentBias)); |
| 1031 | 1035 |
| 1036 __ lsl(ip, scratch1, Operand(kBinary32ExponentShift)); |
| 1032 __ orr(fval, | 1037 __ orr(fval, |
| 1033 fval, | 1038 fval, |
| 1034 Operand(scratch1, LSL, kBinary32ExponentShift)); | 1039 ip); |
| 1035 | 1040 |
| 1036 // Shift up the source chopping the top bit off. | 1041 // Shift up the source chopping the top bit off. |
| 1037 __ add(zeros, zeros, Operand(1)); | 1042 __ add(zeros, zeros, Operand(1)); |
| 1038 // This wouldn't work for 1 and -1 as the shift would be 32 which means 0. | 1043 // This wouldn't work for 1 and -1 as the shift would be 32 which means 0. |
| 1039 __ mov(ival, Operand(ival, LSL, zeros)); | 1044 __ lsl(ival, ival, zeros); |
| 1040 // And the top (top 20 bits). | 1045 // And the top (top 20 bits). |
| 1046 __ lsr(ip, ival, Operand(kBitsPerInt - kBinary32MantissaBits)); |
| 1041 __ orr(fval, | 1047 __ orr(fval, |
| 1042 fval, | 1048 fval, |
| 1043 Operand(ival, LSR, kBitsPerInt - kBinary32MantissaBits)); | 1049 ip); |
| 1044 | 1050 |
| 1045 __ bind(&done); | 1051 __ bind(&done); |
| 1046 __ str(fval, MemOperand(dst, wordoffset, LSL, 2)); | 1052 __ lsl(ip, wordoffset, Operand(2)); |
| 1053 __ str(fval, MemOperand(dst, ip)); |
| 1047 } | 1054 } |
| 1048 } | 1055 } |
| 1049 | 1056 |
| 1050 | 1057 |
| 1051 // Convert unsigned integer with specified number of leading zeroes in binary | 1058 // Convert unsigned integer with specified number of leading zeroes in binary |
| 1052 // representation to IEEE 754 double. | 1059 // representation to IEEE 754 double. |
| 1053 // Integer to convert is passed in register hiword. | 1060 // Integer to convert is passed in register hiword. |
| 1054 // Resulting double is returned in registers hiword:loword. | 1061 // Resulting double is returned in registers hiword:loword. |
| 1055 // This functions does not work correctly for 0. | 1062 // This functions does not work correctly for 0. |
| 1056 static void GenerateUInt2Double(MacroAssembler* masm, | 1063 static void GenerateUInt2Double(MacroAssembler* masm, |
| 1057 Register hiword, | 1064 Register hiword, |
| 1058 Register loword, | 1065 Register loword, |
| 1059 Register scratch, | 1066 Register scratch, |
| 1060 int leading_zeroes) { | 1067 int leading_zeroes) { |
| 1068 ASSERT(!scratch.is(hiword)); |
| 1061 const int meaningful_bits = kBitsPerInt - leading_zeroes - 1; | 1069 const int meaningful_bits = kBitsPerInt - leading_zeroes - 1; |
| 1062 const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits; | 1070 const int biased_exponent = HeapNumber::kExponentBias + meaningful_bits; |
| 1063 | 1071 |
| 1064 const int mantissa_shift_for_hi_word = | 1072 const int mantissa_shift_for_hi_word = |
| 1065 meaningful_bits - HeapNumber::kMantissaBitsInTopWord; | 1073 meaningful_bits - HeapNumber::kMantissaBitsInTopWord; |
| 1066 | 1074 |
| 1067 const int mantissa_shift_for_lo_word = | 1075 const int mantissa_shift_for_lo_word = |
| 1068 kBitsPerInt - mantissa_shift_for_hi_word; | 1076 kBitsPerInt - mantissa_shift_for_hi_word; |
| 1069 | 1077 |
| 1070 __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); | 1078 __ mov(scratch, Operand(biased_exponent << HeapNumber::kExponentShift)); |
| 1071 if (mantissa_shift_for_hi_word > 0) { | 1079 if (mantissa_shift_for_hi_word > 0) { |
| 1072 __ mov(loword, Operand(hiword, LSL, mantissa_shift_for_lo_word)); | 1080 __ lsl(loword, hiword, Operand(mantissa_shift_for_lo_word)); |
| 1073 __ orr(hiword, scratch, Operand(hiword, LSR, mantissa_shift_for_hi_word)); | 1081 __ lsr(hiword, hiword, Operand(mantissa_shift_for_hi_word)); |
| 1082 __ orr(hiword, scratch, hiword); |
| 1074 } else { | 1083 } else { |
| 1075 __ mov(loword, Operand(0, RelocInfo::NONE)); | 1084 __ mov(loword, Operand(0, RelocInfo::NONE)); |
| 1076 __ orr(hiword, scratch, Operand(hiword, LSL, mantissa_shift_for_hi_word)); | 1085 __ lsl(hiword, hiword, Operand(mantissa_shift_for_hi_word)); |
| 1086 __ orr(hiword, scratch, hiword); |
| 1077 } | 1087 } |
| 1078 | 1088 |
| 1079 // If least significant bit of biased exponent was not 1 it was corrupted | 1089 // If least significant bit of biased exponent was not 1 it was corrupted |
| 1080 // by most significant bit of mantissa so we should fix that. | 1090 // by most significant bit of mantissa so we should fix that. |
| 1081 if (!(biased_exponent & 1)) { | 1091 if (!(biased_exponent & 1)) { |
| 1082 __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); | 1092 __ bic(hiword, hiword, Operand(1 << HeapNumber::kExponentShift)); |
| 1083 } | 1093 } |
| 1084 } | 1094 } |
| 1085 | 1095 |
| 1086 | 1096 |
| (...skipping 305 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1392 CompileCallLoadPropertyWithInterceptor(masm(), | 1402 CompileCallLoadPropertyWithInterceptor(masm(), |
| 1393 receiver, | 1403 receiver, |
| 1394 holder_reg, | 1404 holder_reg, |
| 1395 name_reg, | 1405 name_reg, |
| 1396 interceptor_holder); | 1406 interceptor_holder); |
| 1397 // Check if interceptor provided a value for property. If it's | 1407 // Check if interceptor provided a value for property. If it's |
| 1398 // the case, return immediately. | 1408 // the case, return immediately. |
| 1399 Label interceptor_failed; | 1409 Label interceptor_failed; |
| 1400 __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex); | 1410 __ LoadRoot(scratch1, Heap::kNoInterceptorResultSentinelRootIndex); |
| 1401 __ cmp(r0, scratch1); | 1411 __ cmp(r0, scratch1); |
| 1402 __ b(eq, &interceptor_failed); | 1412 __ b(eq, &interceptor_failed, Label::kNear); |
| 1403 frame_scope.GenerateLeaveFrame(); | 1413 frame_scope.GenerateLeaveFrame(); |
| 1404 __ Ret(); | 1414 __ Ret(); |
| 1405 | 1415 |
| 1406 __ bind(&interceptor_failed); | 1416 __ bind(&interceptor_failed); |
| 1407 __ pop(name_reg); | 1417 __ pop(name_reg); |
| 1408 __ pop(holder_reg); | 1418 __ pop(holder_reg); |
| 1409 if (must_preserve_receiver_reg) { | 1419 if (must_preserve_receiver_reg) { |
| 1410 __ pop(receiver); | 1420 __ pop(receiver); |
| 1411 } | 1421 } |
| 1412 // Leave the internal frame. | 1422 // Leave the internal frame. |
| (...skipping 100 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1513 __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset)); | 1523 __ ldr(r1, FieldMemOperand(r3, JSGlobalPropertyCell::kValueOffset)); |
| 1514 | 1524 |
| 1515 // Check that the cell contains the same function. | 1525 // Check that the cell contains the same function. |
| 1516 if (heap()->InNewSpace(*function)) { | 1526 if (heap()->InNewSpace(*function)) { |
| 1517 // We can't embed a pointer to a function in new space so we have | 1527 // We can't embed a pointer to a function in new space so we have |
| 1518 // to verify that the shared function info is unchanged. This has | 1528 // to verify that the shared function info is unchanged. This has |
| 1519 // the nice side effect that multiple closures based on the same | 1529 // the nice side effect that multiple closures based on the same |
| 1520 // function can all use this call IC. Before we load through the | 1530 // function can all use this call IC. Before we load through the |
| 1521 // function, we have to verify that it still is a function. | 1531 // function, we have to verify that it still is a function. |
| 1522 __ JumpIfSmi(r1, miss); | 1532 __ JumpIfSmi(r1, miss); |
| 1523 __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE); | 1533 __ CompareObjectType(r1, r3, r3, JS_FUNCTION_TYPE, eq); |
| 1524 __ b(ne, miss); | 1534 __ b(ne, miss); |
| 1525 | 1535 |
| 1526 // Check the shared function info. Make sure it hasn't changed. | 1536 // Check the shared function info. Make sure it hasn't changed. |
| 1527 __ Move(r3, Handle<SharedFunctionInfo>(function->shared())); | 1537 __ Move(r3, Handle<SharedFunctionInfo>(function->shared())); |
| 1528 __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); | 1538 __ ldr(r4, FieldMemOperand(r1, JSFunction::kSharedFunctionInfoOffset)); |
| 1529 __ cmp(r4, r3); | 1539 __ cmp(r4, r3); |
| 1530 } else { | 1540 } else { |
| 1531 __ cmp(r1, Operand(function)); | 1541 __ cmp(r1, Operand(function)); |
| 1532 } | 1542 } |
| 1533 __ b(ne, miss); | 1543 __ b(ne, miss); |
| (...skipping 102 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1636 // Get the array's length into r0 and calculate new length. | 1646 // Get the array's length into r0 and calculate new length. |
| 1637 __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 1647 __ ldr(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 1638 STATIC_ASSERT(kSmiTagSize == 1); | 1648 STATIC_ASSERT(kSmiTagSize == 1); |
| 1639 STATIC_ASSERT(kSmiTag == 0); | 1649 STATIC_ASSERT(kSmiTag == 0); |
| 1640 __ add(r0, r0, Operand(Smi::FromInt(argc))); | 1650 __ add(r0, r0, Operand(Smi::FromInt(argc))); |
| 1641 | 1651 |
| 1642 // Get the elements' length. | 1652 // Get the elements' length. |
| 1643 __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset)); | 1653 __ ldr(r4, FieldMemOperand(elements, FixedArray::kLengthOffset)); |
| 1644 | 1654 |
| 1645 // Check if we could survive without allocation. | 1655 // Check if we could survive without allocation. |
| 1646 __ cmp(r0, r4); | 1656 __ cmpgt(r0, r4); |
| 1647 __ b(gt, &attempt_to_grow_elements); | 1657 __ bt_near(&attempt_to_grow_elements); |
| 1648 | 1658 |
| 1649 // Check if value is a smi. | 1659 // Check if value is a smi. |
| 1650 Label with_write_barrier; | 1660 Label with_write_barrier; |
| 1651 __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); | 1661 __ ldr(r4, MemOperand(sp, (argc - 1) * kPointerSize)); |
| 1652 __ JumpIfNotSmi(r4, &with_write_barrier); | 1662 __ JumpIfNotSmi(r4, &with_write_barrier); |
| 1653 | 1663 |
| 1654 // Save new length. | 1664 // Save new length. |
| 1655 __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 1665 __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 1656 | 1666 |
| 1657 // Store the value. | 1667 // Store the value. |
| 1658 // We may need a register containing the address end_elements below, | 1668 // We may need a register containing the address end_elements below, |
| 1659 // so write back the value in end_elements. | 1669 // so write back the value in end_elements. |
| 1660 __ add(end_elements, elements, | 1670 __ lsl(ip, r0, Operand(kPointerSizeLog2 - kSmiTagSize)); |
| 1661 Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); | 1671 __ add(end_elements, elements, ip); |
| 1662 const int kEndElementsOffset = | 1672 const int kEndElementsOffset = |
| 1663 FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize; | 1673 FixedArray::kHeaderSize - kHeapObjectTag - argc * kPointerSize; |
| 1664 __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); | 1674 __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); |
| 1665 | 1675 |
| 1666 // Check for a smi. | 1676 // Check for a smi. |
| 1667 __ Drop(argc + 1); | 1677 __ Drop(argc + 1); |
| 1668 __ Ret(); | 1678 __ Ret(); |
| 1669 | 1679 |
| 1670 __ bind(&with_write_barrier); | 1680 __ bind(&with_write_barrier); |
| 1671 | 1681 |
| (...skipping 32 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1704 } else { | 1714 } else { |
| 1705 __ CheckFastObjectElements(r3, r3, &call_builtin); | 1715 __ CheckFastObjectElements(r3, r3, &call_builtin); |
| 1706 } | 1716 } |
| 1707 | 1717 |
| 1708 // Save new length. | 1718 // Save new length. |
| 1709 __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 1719 __ str(r0, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 1710 | 1720 |
| 1711 // Store the value. | 1721 // Store the value. |
| 1712 // We may need a register containing the address end_elements below, | 1722 // We may need a register containing the address end_elements below, |
| 1713 // so write back the value in end_elements. | 1723 // so write back the value in end_elements. |
| 1714 __ add(end_elements, elements, | 1724 __ lsl(end_elements, r0, Operand(kPointerSizeLog2 - kSmiTagSize)); |
| 1715 Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); | 1725 __ add(end_elements, elements, end_elements); |
| 1716 __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); | 1726 __ str(r4, MemOperand(end_elements, kEndElementsOffset, PreIndex)); |
| 1717 | 1727 |
| 1718 __ RecordWrite(elements, | 1728 __ RecordWrite(elements, |
| 1719 end_elements, | 1729 end_elements, |
| 1720 r4, | 1730 r4, |
| 1721 kLRHasNotBeenSaved, | 1731 kLRHasNotBeenSaved, |
| 1722 kDontSaveFPRegs, | 1732 kDontSaveFPRegs, |
| 1723 EMIT_REMEMBERED_SET, | 1733 EMIT_REMEMBERED_SET, |
| 1724 OMIT_SMI_CHECK); | 1734 OMIT_SMI_CHECK); |
| 1725 __ Drop(argc + 1); | 1735 __ Drop(argc + 1); |
| (...skipping 17 matching lines...) Expand all Loading... |
| 1743 __ bind(&no_fast_elements_check); | 1753 __ bind(&no_fast_elements_check); |
| 1744 | 1754 |
| 1745 Isolate* isolate = masm()->isolate(); | 1755 Isolate* isolate = masm()->isolate(); |
| 1746 ExternalReference new_space_allocation_top = | 1756 ExternalReference new_space_allocation_top = |
| 1747 ExternalReference::new_space_allocation_top_address(isolate); | 1757 ExternalReference::new_space_allocation_top_address(isolate); |
| 1748 ExternalReference new_space_allocation_limit = | 1758 ExternalReference new_space_allocation_limit = |
| 1749 ExternalReference::new_space_allocation_limit_address(isolate); | 1759 ExternalReference::new_space_allocation_limit_address(isolate); |
| 1750 | 1760 |
| 1751 const int kAllocationDelta = 4; | 1761 const int kAllocationDelta = 4; |
| 1752 // Load top and check if it is the end of elements. | 1762 // Load top and check if it is the end of elements. |
| 1753 __ add(end_elements, elements, | 1763 __ lsl(end_elements, r0, Operand(kPointerSizeLog2 - kSmiTagSize)); |
| 1754 Operand(r0, LSL, kPointerSizeLog2 - kSmiTagSize)); | 1764 __ add(end_elements, elements, end_elements); |
| 1755 __ add(end_elements, end_elements, Operand(kEndElementsOffset)); | 1765 __ add(end_elements, end_elements, Operand(kEndElementsOffset)); |
| 1756 __ mov(r7, Operand(new_space_allocation_top)); | 1766 __ mov(r7, Operand(new_space_allocation_top)); |
| 1757 __ ldr(r3, MemOperand(r7)); | 1767 __ ldr(r3, MemOperand(r7)); |
| 1758 __ cmp(end_elements, r3); | 1768 __ cmp(end_elements, r3); |
| 1759 __ b(ne, &call_builtin); | 1769 __ b(ne, &call_builtin); |
| 1760 | 1770 |
| 1761 __ mov(r9, Operand(new_space_allocation_limit)); | 1771 __ mov(r9, Operand(new_space_allocation_limit)); |
| 1762 __ ldr(r9, MemOperand(r9)); | 1772 __ ldr(r9, MemOperand(r9)); |
| 1763 __ add(r3, r3, Operand(kAllocationDelta * kPointerSize)); | 1773 __ add(r3, r3, Operand(kAllocationDelta * kPointerSize)); |
| 1764 __ cmp(r3, r9); | 1774 __ cmphi(r3, r9); |
| 1765 __ b(hi, &call_builtin); | 1775 __ b(eq, &call_builtin); |
| 1766 | 1776 |
| 1767 // We fit and could grow elements. | 1777 // We fit and could grow elements. |
| 1768 // Update new_space_allocation_top. | 1778 // Update new_space_allocation_top. |
| 1769 __ str(r3, MemOperand(r7)); | 1779 __ str(r3, MemOperand(r7)); |
| 1770 // Push the argument. | 1780 // Push the argument. |
| 1771 __ str(r2, MemOperand(end_elements)); | 1781 __ str(r2, MemOperand(end_elements)); |
| 1772 // Fill the rest with holes. | 1782 // Fill the rest with holes. |
| 1773 __ LoadRoot(r3, Heap::kTheHoleValueRootIndex); | 1783 __ LoadRoot(r3, Heap::kTheHoleValueRootIndex); |
| 1774 for (int i = 1; i < kAllocationDelta; i++) { | 1784 for (int i = 1; i < kAllocationDelta; i++) { |
| 1775 __ str(r3, MemOperand(end_elements, i * kPointerSize)); | 1785 __ str(r3, MemOperand(end_elements, i * kPointerSize)); |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 1837 | 1847 |
| 1838 // Check that the elements are in fast mode and writable. | 1848 // Check that the elements are in fast mode and writable. |
| 1839 __ CheckMap(elements, | 1849 __ CheckMap(elements, |
| 1840 r0, | 1850 r0, |
| 1841 Heap::kFixedArrayMapRootIndex, | 1851 Heap::kFixedArrayMapRootIndex, |
| 1842 &call_builtin, | 1852 &call_builtin, |
| 1843 DONT_DO_SMI_CHECK); | 1853 DONT_DO_SMI_CHECK); |
| 1844 | 1854 |
| 1845 // Get the array's length into r4 and calculate new length. | 1855 // Get the array's length into r4 and calculate new length. |
| 1846 __ ldr(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 1856 __ ldr(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 1847 __ sub(r4, r4, Operand(Smi::FromInt(1)), SetCC); | 1857 __ cmpge(r4, Operand(Smi::FromInt(1))); // for branch below |
| 1848 __ b(lt, &return_undefined); | 1858 __ sub(r4, r4, Operand(Smi::FromInt(1))); |
| 1859 __ bf_near(&return_undefined); |
| 1849 | 1860 |
| 1850 // Get the last element. | 1861 // Get the last element. |
| 1851 __ LoadRoot(r6, Heap::kTheHoleValueRootIndex); | 1862 __ LoadRoot(r6, Heap::kTheHoleValueRootIndex); |
| 1852 STATIC_ASSERT(kSmiTagSize == 1); | 1863 STATIC_ASSERT(kSmiTagSize == 1); |
| 1853 STATIC_ASSERT(kSmiTag == 0); | 1864 STATIC_ASSERT(kSmiTag == 0); |
| 1854 // We can't address the last element in one operation. Compute the more | 1865 // We can't address the last element in one operation. Compute the more |
| 1855 // expensive shift first, and use an offset later on. | 1866 // expensive shift first, and use an offset later on. |
| 1856 __ add(elements, elements, Operand(r4, LSL, kPointerSizeLog2 - kSmiTagSize)); | 1867 __ lsl(r0, r4, Operand(kPointerSizeLog2 - kSmiTagSize)); |
| 1857 __ ldr(r0, FieldMemOperand(elements, FixedArray::kHeaderSize)); | 1868 __ add(elements, elements, r0); |
| 1869 __ ldr(r0, MemOperand(elements, FixedArray::kHeaderSize)); |
| 1858 __ cmp(r0, r6); | 1870 __ cmp(r0, r6); |
| 1859 __ b(eq, &call_builtin); | 1871 __ b(eq, &call_builtin); |
| 1860 | 1872 |
| 1861 // Set the array's length. | 1873 // Set the array's length. |
| 1862 __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); | 1874 __ str(r4, FieldMemOperand(receiver, JSArray::kLengthOffset)); |
| 1863 | 1875 |
| 1864 // Fill with the hole. | 1876 // Fill with the hole. |
| 1865 __ str(r6, FieldMemOperand(elements, FixedArray::kHeaderSize)); | 1877 __ str(r6, FieldMemOperand(elements, FixedArray::kHeaderSize)); |
| 1866 __ Drop(argc + 1); | 1878 __ Drop(argc + 1); |
| 1867 __ Ret(); | 1879 __ Ret(); |
| (...skipping 222 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2090 // Load the char code argument. | 2102 // Load the char code argument. |
| 2091 Register code = r1; | 2103 Register code = r1; |
| 2092 __ ldr(code, MemOperand(sp, 0 * kPointerSize)); | 2104 __ ldr(code, MemOperand(sp, 0 * kPointerSize)); |
| 2093 | 2105 |
| 2094 // Check the code is a smi. | 2106 // Check the code is a smi. |
| 2095 Label slow; | 2107 Label slow; |
| 2096 STATIC_ASSERT(kSmiTag == 0); | 2108 STATIC_ASSERT(kSmiTag == 0); |
| 2097 __ JumpIfNotSmi(code, &slow); | 2109 __ JumpIfNotSmi(code, &slow); |
| 2098 | 2110 |
| 2099 // Convert the smi code to uint16. | 2111 // Convert the smi code to uint16. |
| 2100 __ and_(code, code, Operand(Smi::FromInt(0xffff))); | 2112 __ land(code, code, Operand(Smi::FromInt(0xffff))); |
| 2101 | 2113 |
| 2102 StringCharFromCodeGenerator generator(code, r0); | 2114 StringCharFromCodeGenerator generator(code, r0); |
| 2103 generator.GenerateFast(masm()); | 2115 generator.GenerateFast(masm()); |
| 2104 __ Drop(argc + 1); | 2116 __ Drop(argc + 1); |
| 2105 __ Ret(); | 2117 __ Ret(); |
| 2106 | 2118 |
| 2107 StubRuntimeCallHelper call_helper; | 2119 StubRuntimeCallHelper call_helper; |
| 2108 generator.GenerateSlow(masm(), call_helper); | 2120 generator.GenerateSlow(masm(), call_helper); |
| 2109 | 2121 |
| 2110 // Tail call the full function. We do not have to patch the receiver | 2122 // Tail call the full function. We do not have to patch the receiver |
| (...skipping 18 matching lines...) Expand all Loading... |
| 2129 Handle<JSFunction> function, | 2141 Handle<JSFunction> function, |
| 2130 Handle<String> name) { | 2142 Handle<String> name) { |
| 2131 // ----------- S t a t e ------------- | 2143 // ----------- S t a t e ------------- |
| 2132 // -- r2 : function name | 2144 // -- r2 : function name |
| 2133 // -- lr : return address | 2145 // -- lr : return address |
| 2134 // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based) | 2146 // -- sp[(argc - n - 1) * 4] : arg[n] (zero-based) |
| 2135 // -- ... | 2147 // -- ... |
| 2136 // -- sp[argc * 4] : receiver | 2148 // -- sp[argc * 4] : receiver |
| 2137 // ----------------------------------- | 2149 // ----------------------------------- |
| 2138 | 2150 |
| 2139 if (!CpuFeatures::IsSupported(VFP2)) { | 2151 // TODO(STM): implement this using FPU |
| 2152 // if (!CpuFeatures::IsSupported(FPU)) |
| 2153 { |
| 2140 return Handle<Code>::null(); | 2154 return Handle<Code>::null(); |
| 2141 } | 2155 } |
| 2142 | |
| 2143 CpuFeatures::Scope scope_vfp2(VFP2); | |
| 2144 const int argc = arguments().immediate(); | |
| 2145 // If the object is not a JSObject or we got an unexpected number of | |
| 2146 // arguments, bail out to the regular call. | |
| 2147 if (!object->IsJSObject() || argc != 1) return Handle<Code>::null(); | |
| 2148 | |
| 2149 Label miss, slow; | |
| 2150 GenerateNameCheck(name, &miss); | |
| 2151 | |
| 2152 if (cell.is_null()) { | |
| 2153 __ ldr(r1, MemOperand(sp, 1 * kPointerSize)); | |
| 2154 STATIC_ASSERT(kSmiTag == 0); | |
| 2155 __ JumpIfSmi(r1, &miss); | |
| 2156 CheckPrototypes(Handle<JSObject>::cast(object), r1, holder, r0, r3, r4, | |
| 2157 name, &miss); | |
| 2158 } else { | |
| 2159 ASSERT(cell->value() == *function); | |
| 2160 GenerateGlobalReceiverCheck(Handle<JSObject>::cast(object), holder, name, | |
| 2161 &miss); | |
| 2162 GenerateLoadFunctionFromCell(cell, function, &miss); | |
| 2163 } | |
| 2164 | |
| 2165 // Load the (only) argument into r0. | |
| 2166 __ ldr(r0, MemOperand(sp, 0 * kPointerSize)); | |
| 2167 | |
| 2168 // If the argument is a smi, just return. | |
| 2169 STATIC_ASSERT(kSmiTag == 0); | |
| 2170 __ tst(r0, Operand(kSmiTagMask)); | |
| 2171 __ Drop(argc + 1, eq); | |
| 2172 __ Ret(eq); | |
| 2173 | |
| 2174 __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK); | |
| 2175 | |
| 2176 Label wont_fit_smi, no_vfp_exception, restore_fpscr_and_return; | |
| 2177 | |
| 2178 // If vfp3 is enabled, we use the fpu rounding with the RM (round towards | |
| 2179 // minus infinity) mode. | |
| 2180 | |
| 2181 // Load the HeapNumber value. | |
| 2182 // We will need access to the value in the core registers, so we load it | |
| 2183 // with ldrd and move it to the fpu. It also spares a sub instruction for | |
| 2184 // updating the HeapNumber value address, as vldr expects a multiple | |
| 2185 // of 4 offset. | |
| 2186 __ Ldrd(r4, r5, FieldMemOperand(r0, HeapNumber::kValueOffset)); | |
| 2187 __ vmov(d1, r4, r5); | |
| 2188 | |
| 2189 // Backup FPSCR. | |
| 2190 __ vmrs(r3); | |
| 2191 // Set custom FPCSR: | |
| 2192 // - Set rounding mode to "Round towards Minus Infinity" | |
| 2193 // (i.e. bits [23:22] = 0b10). | |
| 2194 // - Clear vfp cumulative exception flags (bits [3:0]). | |
| 2195 // - Make sure Flush-to-zero mode control bit is unset (bit 22). | |
| 2196 __ bic(r9, r3, | |
| 2197 Operand(kVFPExceptionMask | kVFPRoundingModeMask | kVFPFlushToZeroMask)); | |
| 2198 __ orr(r9, r9, Operand(kRoundToMinusInf)); | |
| 2199 __ vmsr(r9); | |
| 2200 | |
| 2201 // Convert the argument to an integer. | |
| 2202 __ vcvt_s32_f64(s0, d1, kFPSCRRounding); | |
| 2203 | |
| 2204 // Use vcvt latency to start checking for special cases. | |
| 2205 // Get the argument exponent and clear the sign bit. | |
| 2206 __ bic(r6, r5, Operand(HeapNumber::kSignMask)); | |
| 2207 __ mov(r6, Operand(r6, LSR, HeapNumber::kMantissaBitsInTopWord)); | |
| 2208 | |
| 2209 // Retrieve FPSCR and check for vfp exceptions. | |
| 2210 __ vmrs(r9); | |
| 2211 __ tst(r9, Operand(kVFPExceptionMask)); | |
| 2212 __ b(&no_vfp_exception, eq); | |
| 2213 | |
| 2214 // Check for NaN, Infinity, and -Infinity. | |
| 2215 // They are invariant through a Math.Floor call, so just | |
| 2216 // return the original argument. | |
| 2217 __ sub(r7, r6, Operand(HeapNumber::kExponentMask | |
| 2218 >> HeapNumber::kMantissaBitsInTopWord), SetCC); | |
| 2219 __ b(&restore_fpscr_and_return, eq); | |
| 2220 // We had an overflow or underflow in the conversion. Check if we | |
| 2221 // have a big exponent. | |
| 2222 __ cmp(r7, Operand(HeapNumber::kMantissaBits)); | |
| 2223 // If greater or equal, the argument is already round and in r0. | |
| 2224 __ b(&restore_fpscr_and_return, ge); | |
| 2225 __ b(&wont_fit_smi); | |
| 2226 | |
| 2227 __ bind(&no_vfp_exception); | |
| 2228 // Move the result back to general purpose register r0. | |
| 2229 __ vmov(r0, s0); | |
| 2230 // Check if the result fits into a smi. | |
| 2231 __ add(r1, r0, Operand(0x40000000), SetCC); | |
| 2232 __ b(&wont_fit_smi, mi); | |
| 2233 // Tag the result. | |
| 2234 STATIC_ASSERT(kSmiTag == 0); | |
| 2235 __ mov(r0, Operand(r0, LSL, kSmiTagSize)); | |
| 2236 | |
| 2237 // Check for -0. | |
| 2238 __ cmp(r0, Operand(0, RelocInfo::NONE)); | |
| 2239 __ b(&restore_fpscr_and_return, ne); | |
| 2240 // r5 already holds the HeapNumber exponent. | |
| 2241 __ tst(r5, Operand(HeapNumber::kSignMask)); | |
| 2242 // If our HeapNumber is negative it was -0, so load its address and return. | |
| 2243 // Else r0 is loaded with 0, so we can also just return. | |
| 2244 __ ldr(r0, MemOperand(sp, 0 * kPointerSize), ne); | |
| 2245 | |
| 2246 __ bind(&restore_fpscr_and_return); | |
| 2247 // Restore FPSCR and return. | |
| 2248 __ vmsr(r3); | |
| 2249 __ Drop(argc + 1); | |
| 2250 __ Ret(); | |
| 2251 | |
| 2252 __ bind(&wont_fit_smi); | |
| 2253 // Restore FPCSR and fall to slow case. | |
| 2254 __ vmsr(r3); | |
| 2255 | |
| 2256 __ bind(&slow); | |
| 2257 // Tail call the full function. We do not have to patch the receiver | |
| 2258 // because the function makes no use of it. | |
| 2259 __ InvokeFunction( | |
| 2260 function, arguments(), JUMP_FUNCTION, NullCallWrapper(), CALL_AS_METHOD); | |
| 2261 | |
| 2262 __ bind(&miss); | |
| 2263 // r2: function name. | |
| 2264 GenerateMissBranch(); | |
| 2265 | |
| 2266 // Return the generated code. | |
| 2267 return cell.is_null() ? GetCode(function) : GetCode(Code::NORMAL, name); | |
| 2268 } | 2156 } |
| 2269 | 2157 |
| 2270 | 2158 |
| 2271 Handle<Code> CallStubCompiler::CompileMathAbsCall( | 2159 Handle<Code> CallStubCompiler::CompileMathAbsCall( |
| 2272 Handle<Object> object, | 2160 Handle<Object> object, |
| 2273 Handle<JSObject> holder, | 2161 Handle<JSObject> holder, |
| 2274 Handle<JSGlobalPropertyCell> cell, | 2162 Handle<JSGlobalPropertyCell> cell, |
| 2275 Handle<JSFunction> function, | 2163 Handle<JSFunction> function, |
| 2276 Handle<String> name) { | 2164 Handle<String> name) { |
| 2277 // ----------- S t a t e ------------- | 2165 // ----------- S t a t e ------------- |
| (...skipping 27 matching lines...) Expand all Loading... |
| 2305 // Load the (only) argument into r0. | 2193 // Load the (only) argument into r0. |
| 2306 __ ldr(r0, MemOperand(sp, 0 * kPointerSize)); | 2194 __ ldr(r0, MemOperand(sp, 0 * kPointerSize)); |
| 2307 | 2195 |
| 2308 // Check if the argument is a smi. | 2196 // Check if the argument is a smi. |
| 2309 Label not_smi; | 2197 Label not_smi; |
| 2310 STATIC_ASSERT(kSmiTag == 0); | 2198 STATIC_ASSERT(kSmiTag == 0); |
| 2311 __ JumpIfNotSmi(r0, ¬_smi); | 2199 __ JumpIfNotSmi(r0, ¬_smi); |
| 2312 | 2200 |
| 2313 // Do bitwise not or do nothing depending on the sign of the | 2201 // Do bitwise not or do nothing depending on the sign of the |
| 2314 // argument. | 2202 // argument. |
| 2315 __ eor(r1, r0, Operand(r0, ASR, kBitsPerInt - 1)); | 2203 __ asr(r1, r0, Operand(kBitsPerInt - 1)); |
| 2204 __ eor(r1, r0, r1); |
| 2316 | 2205 |
| 2317 // Add 1 or do nothing depending on the sign of the argument. | 2206 // Add 1 or do nothing depending on the sign of the argument. |
| 2318 __ sub(r0, r1, Operand(r0, ASR, kBitsPerInt - 1), SetCC); | 2207 __ asr(r0, r0, Operand(kBitsPerInt - 1)); |
| 2208 __ sub(r0, r1, r0); |
| 2209 __ cmpge(r0, Operand(0)); |
| 2319 | 2210 |
| 2320 // If the result is still negative, go to the slow case. | 2211 // If the result is still negative, go to the slow case. |
| 2321 // This only happens for the most negative smi. | 2212 // This only happens for the most negative smi. |
| 2322 Label slow; | 2213 Label slow; |
| 2323 __ b(mi, &slow); | 2214 __ b(f, &slow); |
| 2324 | 2215 |
| 2325 // Smi case done. | 2216 // Smi case done. |
| 2326 __ Drop(argc + 1); | 2217 __ Drop(argc + 1); |
| 2327 __ Ret(); | 2218 __ Ret(); |
| 2328 | 2219 |
| 2329 // Check if the argument is a heap number and load its exponent and | 2220 // Check if the argument is a heap number and load its exponent and |
| 2330 // sign. | 2221 // sign. |
| 2331 __ bind(¬_smi); | 2222 __ bind(¬_smi); |
| 2332 __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK); | 2223 __ CheckMap(r0, r1, Heap::kHeapNumberMapRootIndex, &slow, DONT_DO_SMI_CHECK); |
| 2333 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 2224 __ ldr(r1, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
| 2334 | 2225 |
| 2335 // Check the sign of the argument. If the argument is positive, | 2226 // Check the sign of the argument. If the argument is positive, |
| 2336 // just return it. | 2227 // just return it. |
| 2337 Label negative_sign; | 2228 Label negative_sign; |
| 2338 __ tst(r1, Operand(HeapNumber::kSignMask)); | 2229 __ tst(r1, Operand(HeapNumber::kSignMask)); |
| 2339 __ b(ne, &negative_sign); | 2230 __ b(ne, &negative_sign, Label::kNear); |
| 2340 __ Drop(argc + 1); | 2231 __ Drop(argc + 1); |
| 2341 __ Ret(); | 2232 __ Ret(); |
| 2342 | 2233 |
| 2343 // If the argument is negative, clear the sign, and return a new | 2234 // If the argument is negative, clear the sign, and return a new |
| 2344 // number. | 2235 // number. |
| 2345 __ bind(&negative_sign); | 2236 __ bind(&negative_sign); |
| 2346 __ eor(r1, r1, Operand(HeapNumber::kSignMask)); | 2237 __ eor(r1, r1, Operand(HeapNumber::kSignMask)); |
| 2347 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | 2238 __ ldr(r3, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); |
| 2348 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | 2239 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 2349 __ AllocateHeapNumber(r0, r4, r5, r6, &slow); | 2240 __ AllocateHeapNumber(r0, r4, r5, r6, &slow); |
| (...skipping 113 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2463 // necessary. | 2354 // necessary. |
| 2464 if (object->IsGlobalObject()) { | 2355 if (object->IsGlobalObject()) { |
| 2465 __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); | 2356 __ ldr(r3, FieldMemOperand(r1, GlobalObject::kGlobalReceiverOffset)); |
| 2466 __ str(r3, MemOperand(sp, argc * kPointerSize)); | 2357 __ str(r3, MemOperand(sp, argc * kPointerSize)); |
| 2467 } | 2358 } |
| 2468 break; | 2359 break; |
| 2469 | 2360 |
| 2470 case STRING_CHECK: | 2361 case STRING_CHECK: |
| 2471 if (function->IsBuiltin() || !function->shared()->is_classic_mode()) { | 2362 if (function->IsBuiltin() || !function->shared()->is_classic_mode()) { |
| 2472 // Check that the object is a two-byte string or a symbol. | 2363 // Check that the object is a two-byte string or a symbol. |
| 2473 __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE); | 2364 __ CompareObjectType(r1, r3, r3, FIRST_NONSTRING_TYPE, ge); |
| 2474 __ b(ge, &miss); | 2365 __ b(eq, &miss); |
| 2475 // Check that the maps starting from the prototype haven't changed. | 2366 // Check that the maps starting from the prototype haven't changed. |
| 2476 GenerateDirectLoadGlobalFunctionPrototype( | 2367 GenerateDirectLoadGlobalFunctionPrototype( |
| 2477 masm(), Context::STRING_FUNCTION_INDEX, r0, &miss); | 2368 masm(), Context::STRING_FUNCTION_INDEX, r0, &miss); |
| 2478 CheckPrototypes( | 2369 CheckPrototypes( |
| 2479 Handle<JSObject>(JSObject::cast(object->GetPrototype())), | 2370 Handle<JSObject>(JSObject::cast(object->GetPrototype())), |
| 2480 r0, holder, r3, r1, r4, name, &miss); | 2371 r0, holder, r3, r1, r4, name, &miss); |
| 2481 } else { | 2372 } else { |
| 2482 // Calling non-strict non-builtins with a value as the receiver | 2373 // Calling non-strict non-builtins with a value as the receiver |
| 2483 // requires boxing. | 2374 // requires boxing. |
| 2484 __ jmp(&miss); | 2375 __ jmp(&miss); |
| 2485 } | 2376 } |
| 2486 break; | 2377 break; |
| 2487 | 2378 |
| 2488 case NUMBER_CHECK: | 2379 case NUMBER_CHECK: |
| 2489 if (function->IsBuiltin() || !function->shared()->is_classic_mode()) { | 2380 if (function->IsBuiltin() || !function->shared()->is_classic_mode()) { |
| 2490 Label fast; | 2381 Label fast; |
| 2491 // Check that the object is a smi or a heap number. | 2382 // Check that the object is a smi or a heap number. |
| 2492 __ JumpIfSmi(r1, &fast); | 2383 __ JumpIfSmi(r1, &fast); |
| 2493 __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE); | 2384 __ CompareObjectType(r1, r0, r0, HEAP_NUMBER_TYPE, eq); |
| 2494 __ b(ne, &miss); | 2385 __ b(ne, &miss); |
| 2495 __ bind(&fast); | 2386 __ bind(&fast); |
| 2496 // Check that the maps starting from the prototype haven't changed. | 2387 // Check that the maps starting from the prototype haven't changed. |
| 2497 GenerateDirectLoadGlobalFunctionPrototype( | 2388 GenerateDirectLoadGlobalFunctionPrototype( |
| 2498 masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss); | 2389 masm(), Context::NUMBER_FUNCTION_INDEX, r0, &miss); |
| 2499 CheckPrototypes( | 2390 CheckPrototypes( |
| 2500 Handle<JSObject>(JSObject::cast(object->GetPrototype())), | 2391 Handle<JSObject>(JSObject::cast(object->GetPrototype())), |
| 2501 r0, holder, r3, r1, r4, name, &miss); | 2392 r0, holder, r3, r1, r4, name, &miss); |
| 2502 } else { | 2393 } else { |
| 2503 // Calling non-strict non-builtins with a value as the receiver | 2394 // Calling non-strict non-builtins with a value as the receiver |
| (...skipping 110 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 2614 __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset)); | 2505 __ ldr(r3, FieldMemOperand(r0, GlobalObject::kGlobalReceiverOffset)); |
| 2615 __ str(r3, MemOperand(sp, argc * kPointerSize)); | 2506 __ str(r3, MemOperand(sp, argc * kPointerSize)); |
| 2616 } | 2507 } |
| 2617 | 2508 |
| 2618 // Set up the context (function already in r1). | 2509 // Set up the context (function already in r1). |
| 2619 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); | 2510 __ ldr(cp, FieldMemOperand(r1, JSFunction::kContextOffset)); |
| 2620 | 2511 |
| 2621 // Jump to the cached code (tail call). | 2512 // Jump to the cached code (tail call). |
| 2622 Counters* counters = masm()->isolate()->counters(); | 2513 Counters* counters = masm()->isolate()->counters(); |
| 2623 __ IncrementCounter(counters->call_global_inline(), 1, r3, r4); | 2514 __ IncrementCounter(counters->call_global_inline(), 1, r3, r4); |
| 2515 ASSERT(function->is_compiled()); |
| 2516 Handle<Code> code(function->code()); |
| 2624 ParameterCount expected(function->shared()->formal_parameter_count()); | 2517 ParameterCount expected(function->shared()->formal_parameter_count()); |
| 2625 CallKind call_kind = CallICBase::Contextual::decode(extra_state_) | 2518 CallKind call_kind = CallICBase::Contextual::decode(extra_state_) |
| 2626 ? CALL_AS_FUNCTION | 2519 ? CALL_AS_FUNCTION |
| 2627 : CALL_AS_METHOD; | 2520 : CALL_AS_METHOD; |
| 2521 // TODO(STM): does it works without UseCrankshaft |
| 2628 // We call indirectly through the code field in the function to | 2522 // We call indirectly through the code field in the function to |
| 2629 // allow recompilation to take effect without changing any of the | 2523 // allow recompilation to take effect without changing any of the |
| 2630 // call sites. | 2524 // call sites. |
| 2631 __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); | 2525 __ ldr(r3, FieldMemOperand(r1, JSFunction::kCodeEntryOffset)); |
| 2632 __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION, | 2526 __ InvokeCode(r3, expected, arguments(), JUMP_FUNCTION, |
| 2633 NullCallWrapper(), call_kind); | 2527 NullCallWrapper(), call_kind); |
| 2634 | 2528 |
| 2635 // Handle call cache miss. | 2529 // Handle call cache miss. |
| 2636 __ bind(&miss); | 2530 __ bind(&miss); |
| 2637 __ IncrementCounter(counters->call_global_inline_miss(), 1, r1, r3); | 2531 __ IncrementCounter(counters->call_global_inline_miss(), 1, r1, r3); |
| (...skipping 538 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3176 Handle<JSObject> holder, | 3070 Handle<JSObject> holder, |
| 3177 Handle<String> name) { | 3071 Handle<String> name) { |
| 3178 // ----------- S t a t e ------------- | 3072 // ----------- S t a t e ------------- |
| 3179 // -- lr : return address | 3073 // -- lr : return address |
| 3180 // -- r0 : key | 3074 // -- r0 : key |
| 3181 // -- r1 : receiver | 3075 // -- r1 : receiver |
| 3182 // ----------------------------------- | 3076 // ----------------------------------- |
| 3183 Label miss; | 3077 Label miss; |
| 3184 | 3078 |
| 3185 // Check the key is the cached one. | 3079 // Check the key is the cached one. |
| 3186 __ cmp(r0, Operand(name)); | 3080 __ cmp(r0, Operand(name), ip); |
| 3187 __ b(ne, &miss); | 3081 __ b(ne, &miss); |
| 3188 | 3082 |
| 3189 LookupResult lookup(isolate()); | 3083 LookupResult lookup(isolate()); |
| 3190 LookupPostInterceptor(holder, name, &lookup); | 3084 LookupPostInterceptor(holder, name, &lookup); |
| 3191 GenerateLoadInterceptor(receiver, holder, &lookup, r1, r0, r2, r3, r4, name, | 3085 GenerateLoadInterceptor(receiver, holder, &lookup, r1, r0, r2, r3, r4, name, |
| 3192 &miss); | 3086 &miss); |
| 3193 __ bind(&miss); | 3087 __ bind(&miss); |
| 3194 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); | 3088 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); |
| 3195 | 3089 |
| 3196 return GetCode(Code::INTERCEPTOR, name); | 3090 return GetCode(Code::INTERCEPTOR, name); |
| 3197 } | 3091 } |
| 3198 | 3092 |
| 3199 | 3093 |
| 3200 Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength( | 3094 Handle<Code> KeyedLoadStubCompiler::CompileLoadArrayLength( |
| 3201 Handle<String> name) { | 3095 Handle<String> name) { |
| 3202 // ----------- S t a t e ------------- | 3096 // ----------- S t a t e ------------- |
| 3203 // -- lr : return address | 3097 // -- lr : return address |
| 3204 // -- r0 : key | 3098 // -- r0 : key |
| 3205 // -- r1 : receiver | 3099 // -- r1 : receiver |
| 3206 // ----------------------------------- | 3100 // ----------------------------------- |
| 3207 Label miss; | 3101 Label miss; |
| 3208 | 3102 |
| 3209 // Check the key is the cached one. | 3103 // Check the key is the cached one. |
| 3210 __ cmp(r0, Operand(name)); | 3104 __ cmp(r0, Operand(name), ip); |
| 3211 __ b(ne, &miss); | 3105 __ b(ne, &miss); |
| 3212 | 3106 |
| 3213 GenerateLoadArrayLength(masm(), r1, r2, &miss); | 3107 GenerateLoadArrayLength(masm(), r1, r2, &miss); |
| 3214 __ bind(&miss); | 3108 __ bind(&miss); |
| 3215 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); | 3109 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); |
| 3216 | 3110 |
| 3217 return GetCode(Code::CALLBACKS, name); | 3111 return GetCode(Code::CALLBACKS, name); |
| 3218 } | 3112 } |
| 3219 | 3113 |
| 3220 | 3114 |
| 3221 Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength( | 3115 Handle<Code> KeyedLoadStubCompiler::CompileLoadStringLength( |
| 3222 Handle<String> name) { | 3116 Handle<String> name) { |
| 3223 // ----------- S t a t e ------------- | 3117 // ----------- S t a t e ------------- |
| 3224 // -- lr : return address | 3118 // -- lr : return address |
| 3225 // -- r0 : key | 3119 // -- r0 : key |
| 3226 // -- r1 : receiver | 3120 // -- r1 : receiver |
| 3227 // ----------------------------------- | 3121 // ----------------------------------- |
| 3228 Label miss; | 3122 Label miss; |
| 3229 | 3123 |
| 3230 Counters* counters = masm()->isolate()->counters(); | 3124 Counters* counters = masm()->isolate()->counters(); |
| 3231 __ IncrementCounter(counters->keyed_load_string_length(), 1, r2, r3); | 3125 __ IncrementCounter(counters->keyed_load_string_length(), 1, r2, r3); |
| 3232 | 3126 |
| 3233 // Check the key is the cached one. | 3127 // Check the key is the cached one. |
| 3234 __ cmp(r0, Operand(name)); | 3128 __ cmp(r0, Operand(name), ip); |
| 3235 __ b(ne, &miss); | 3129 __ b(ne, &miss); |
| 3236 | 3130 |
| 3237 GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true); | 3131 GenerateLoadStringLength(masm(), r1, r2, r3, &miss, true); |
| 3238 __ bind(&miss); | 3132 __ bind(&miss); |
| 3239 __ DecrementCounter(counters->keyed_load_string_length(), 1, r2, r3); | 3133 __ DecrementCounter(counters->keyed_load_string_length(), 1, r2, r3); |
| 3240 | 3134 |
| 3241 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); | 3135 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); |
| 3242 | 3136 |
| 3243 return GetCode(Code::CALLBACKS, name); | 3137 return GetCode(Code::CALLBACKS, name); |
| 3244 } | 3138 } |
| 3245 | 3139 |
| 3246 | 3140 |
| 3247 Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype( | 3141 Handle<Code> KeyedLoadStubCompiler::CompileLoadFunctionPrototype( |
| 3248 Handle<String> name) { | 3142 Handle<String> name) { |
| 3249 // ----------- S t a t e ------------- | 3143 // ----------- S t a t e ------------- |
| 3250 // -- lr : return address | 3144 // -- lr : return address |
| 3251 // -- r0 : key | 3145 // -- r0 : key |
| 3252 // -- r1 : receiver | 3146 // -- r1 : receiver |
| 3253 // ----------------------------------- | 3147 // ----------------------------------- |
| 3254 Label miss; | 3148 Label miss; |
| 3255 | 3149 |
| 3256 Counters* counters = masm()->isolate()->counters(); | 3150 Counters* counters = masm()->isolate()->counters(); |
| 3257 __ IncrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3); | 3151 __ IncrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3); |
| 3258 | 3152 |
| 3259 // Check the name hasn't changed. | 3153 // Check the name hasn't changed. |
| 3260 __ cmp(r0, Operand(name)); | 3154 __ cmp(r0, Operand(name), ip); |
| 3261 __ b(ne, &miss); | 3155 __ b(ne, &miss); |
| 3262 | 3156 |
| 3263 GenerateLoadFunctionPrototype(masm(), r1, r2, r3, &miss); | 3157 GenerateLoadFunctionPrototype(masm(), r1, r2, r3, &miss); |
| 3264 __ bind(&miss); | 3158 __ bind(&miss); |
| 3265 __ DecrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3); | 3159 __ DecrementCounter(counters->keyed_load_function_prototype(), 1, r2, r3); |
| 3266 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); | 3160 GenerateLoadMiss(masm(), Code::KEYED_LOAD_IC); |
| 3267 | 3161 |
| 3268 return GetCode(Code::CALLBACKS, name); | 3162 return GetCode(Code::CALLBACKS, name); |
| 3269 } | 3163 } |
| 3270 | 3164 |
| (...skipping 25 matching lines...) Expand all Loading... |
| 3296 // -- lr : return address | 3190 // -- lr : return address |
| 3297 // -- r0 : key | 3191 // -- r0 : key |
| 3298 // -- r1 : receiver | 3192 // -- r1 : receiver |
| 3299 // ----------------------------------- | 3193 // ----------------------------------- |
| 3300 Label miss; | 3194 Label miss; |
| 3301 __ JumpIfSmi(r1, &miss); | 3195 __ JumpIfSmi(r1, &miss); |
| 3302 | 3196 |
| 3303 int receiver_count = receiver_maps->length(); | 3197 int receiver_count = receiver_maps->length(); |
| 3304 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); | 3198 __ ldr(r2, FieldMemOperand(r1, HeapObject::kMapOffset)); |
| 3305 for (int current = 0; current < receiver_count; ++current) { | 3199 for (int current = 0; current < receiver_count; ++current) { |
| 3200 Label skip; |
| 3306 __ mov(ip, Operand(receiver_maps->at(current))); | 3201 __ mov(ip, Operand(receiver_maps->at(current))); |
| 3307 __ cmp(r2, ip); | 3202 __ cmp(r2, ip); |
| 3308 __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET, eq); | 3203 __ bf_near(&skip); |
| 3204 __ Jump(handler_ics->at(current), RelocInfo::CODE_TARGET); |
| 3205 __ bind(&skip); |
| 3309 } | 3206 } |
| 3310 | 3207 |
| 3311 __ bind(&miss); | 3208 __ bind(&miss); |
| 3312 Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss(); | 3209 Handle<Code> miss_ic = isolate()->builtins()->KeyedLoadIC_Miss(); |
| 3313 __ Jump(miss_ic, RelocInfo::CODE_TARGET, al); | 3210 __ Jump(miss_ic, RelocInfo::CODE_TARGET); |
| 3314 | 3211 |
| 3315 // Return the generated code. | 3212 // Return the generated code. |
| 3316 return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC); | 3213 return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC); |
| 3317 } | 3214 } |
| 3318 | 3215 |
| 3319 | 3216 |
| 3320 Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object, | 3217 Handle<Code> KeyedStoreStubCompiler::CompileStoreField(Handle<JSObject> object, |
| 3321 int index, | 3218 int index, |
| 3322 Handle<Map> transition, | 3219 Handle<Map> transition, |
| 3323 Handle<String> name) { | 3220 Handle<String> name) { |
| (...skipping 71 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3395 // ----------------------------------- | 3292 // ----------------------------------- |
| 3396 Label miss; | 3293 Label miss; |
| 3397 __ JumpIfSmi(r2, &miss); | 3294 __ JumpIfSmi(r2, &miss); |
| 3398 | 3295 |
| 3399 int receiver_count = receiver_maps->length(); | 3296 int receiver_count = receiver_maps->length(); |
| 3400 __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); | 3297 __ ldr(r3, FieldMemOperand(r2, HeapObject::kMapOffset)); |
| 3401 for (int i = 0; i < receiver_count; ++i) { | 3298 for (int i = 0; i < receiver_count; ++i) { |
| 3402 __ mov(ip, Operand(receiver_maps->at(i))); | 3299 __ mov(ip, Operand(receiver_maps->at(i))); |
| 3403 __ cmp(r3, ip); | 3300 __ cmp(r3, ip); |
| 3404 if (transitioned_maps->at(i).is_null()) { | 3301 if (transitioned_maps->at(i).is_null()) { |
| 3405 __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, eq); | 3302 Label skip; |
| 3303 __ bf(&skip); |
| 3304 __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET); |
| 3305 __ bind(&skip); |
| 3406 } else { | 3306 } else { |
| 3407 Label next_map; | 3307 Label next_map; |
| 3408 __ b(ne, &next_map); | 3308 __ b(ne, &next_map); |
| 3409 __ mov(r3, Operand(transitioned_maps->at(i))); | 3309 __ mov(r3, Operand(transitioned_maps->at(i))); |
| 3410 __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET, al); | 3310 __ Jump(handler_stubs->at(i), RelocInfo::CODE_TARGET); |
| 3411 __ bind(&next_map); | 3311 __ bind(&next_map); |
| 3412 } | 3312 } |
| 3413 } | 3313 } |
| 3414 | 3314 |
| 3415 __ bind(&miss); | 3315 __ bind(&miss); |
| 3416 Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss(); | 3316 Handle<Code> miss_ic = isolate()->builtins()->KeyedStoreIC_Miss(); |
| 3417 __ Jump(miss_ic, RelocInfo::CODE_TARGET, al); | 3317 __ Jump(miss_ic, RelocInfo::CODE_TARGET); |
| 3418 | 3318 |
| 3419 // Return the generated code. | 3319 // Return the generated code. |
| 3420 return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC); | 3320 return GetCode(Code::NORMAL, factory()->empty_string(), MEGAMORPHIC); |
| 3421 } | 3321 } |
| 3422 | 3322 |
| 3423 | 3323 |
| 3424 Handle<Code> ConstructStubCompiler::CompileConstructStub( | 3324 Handle<Code> ConstructStubCompiler::CompileConstructStub( |
| 3425 Handle<JSFunction> function) { | 3325 Handle<JSFunction> function) { |
| 3426 // ----------- S t a t e ------------- | 3326 // ----------- S t a t e ------------- |
| 3427 // -- r0 : argc | 3327 // -- r0 : argc |
| (...skipping 14 matching lines...) Expand all Loading... |
| 3442 __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset)); | 3342 __ ldr(r2, FieldMemOperand(r2, SharedFunctionInfo::kDebugInfoOffset)); |
| 3443 __ cmp(r2, r7); | 3343 __ cmp(r2, r7); |
| 3444 __ b(ne, &generic_stub_call); | 3344 __ b(ne, &generic_stub_call); |
| 3445 #endif | 3345 #endif |
| 3446 | 3346 |
| 3447 // Load the initial map and verify that it is in fact a map. | 3347 // Load the initial map and verify that it is in fact a map. |
| 3448 // r1: constructor function | 3348 // r1: constructor function |
| 3449 // r7: undefined | 3349 // r7: undefined |
| 3450 __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); | 3350 __ ldr(r2, FieldMemOperand(r1, JSFunction::kPrototypeOrInitialMapOffset)); |
| 3451 __ JumpIfSmi(r2, &generic_stub_call); | 3351 __ JumpIfSmi(r2, &generic_stub_call); |
| 3452 __ CompareObjectType(r2, r3, r4, MAP_TYPE); | 3352 __ CompareObjectType(r2, r3, r4, MAP_TYPE, eq); |
| 3453 __ b(ne, &generic_stub_call); | 3353 __ b(f, &generic_stub_call); |
| 3454 | 3354 |
| 3455 #ifdef DEBUG | 3355 #ifdef DEBUG |
| 3456 // Cannot construct functions this way. | 3356 // Cannot construct functions this way. |
| 3457 // r0: argc | 3357 // r0: argc |
| 3458 // r1: constructor function | 3358 // r1: constructor function |
| 3459 // r2: initial map | 3359 // r2: initial map |
| 3460 // r7: undefined | 3360 // r7: undefined |
| 3461 __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE); | 3361 __ CompareInstanceType(r2, r3, JS_FUNCTION_TYPE, eq); |
| 3462 __ Check(ne, "Function constructed by construct stub."); | 3362 __ Check(ne, "Function constructed by construct stub."); |
| 3463 #endif | 3363 #endif |
| 3464 | 3364 |
| 3465 // Now allocate the JSObject in new space. | 3365 // Now allocate the JSObject in new space. |
| 3466 // r0: argc | 3366 // r0: argc |
| 3467 // r1: constructor function | 3367 // r1: constructor function |
| 3468 // r2: initial map | 3368 // r2: initial map |
| 3469 // r7: undefined | 3369 // r7: undefined |
| 3470 __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); | 3370 __ ldrb(r3, FieldMemOperand(r2, Map::kInstanceSizeOffset)); |
| 3471 __ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS); | 3371 __ AllocateInNewSpace(r3, r4, r5, r6, &generic_stub_call, SIZE_IN_WORDS); |
| (...skipping 10 matching lines...) Expand all Loading... |
| 3482 __ mov(r5, r4); | 3382 __ mov(r5, r4); |
| 3483 ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); | 3383 ASSERT_EQ(0 * kPointerSize, JSObject::kMapOffset); |
| 3484 __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); | 3384 __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); |
| 3485 ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); | 3385 ASSERT_EQ(1 * kPointerSize, JSObject::kPropertiesOffset); |
| 3486 __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); | 3386 __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); |
| 3487 ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); | 3387 ASSERT_EQ(2 * kPointerSize, JSObject::kElementsOffset); |
| 3488 __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); | 3388 __ str(r6, MemOperand(r5, kPointerSize, PostIndex)); |
| 3489 | 3389 |
| 3490 // Calculate the location of the first argument. The stack contains only the | 3390 // Calculate the location of the first argument. The stack contains only the |
| 3491 // argc arguments. | 3391 // argc arguments. |
| 3492 __ add(r1, sp, Operand(r0, LSL, kPointerSizeLog2)); | 3392 __ lsl(r1, r0, Operand(kPointerSizeLog2)); |
| 3393 __ add(r1, sp, r1); |
| 3493 | 3394 |
| 3494 // Fill all the in-object properties with undefined. | 3395 // Fill all the in-object properties with undefined. |
| 3495 // r0: argc | 3396 // r0: argc |
| 3496 // r1: first argument | 3397 // r1: first argument |
| 3497 // r3: object size (in words) | 3398 // r3: object size (in words) |
| 3498 // r4: JSObject (not tagged) | 3399 // r4: JSObject (not tagged) |
| 3499 // r5: First in-object property of JSObject (not tagged) | 3400 // r5: First in-object property of JSObject (not tagged) |
| 3500 // r7: undefined | 3401 // r7: undefined |
| 3501 // Fill the initialized properties with a constant value or a passed argument | 3402 // Fill the initialized properties with a constant value or a passed argument |
| 3502 // depending on the this.x = ...; assignment in the function. | 3403 // depending on the this.x = ...; assignment in the function. |
| 3503 Handle<SharedFunctionInfo> shared(function->shared()); | 3404 Handle<SharedFunctionInfo> shared(function->shared()); |
| 3504 for (int i = 0; i < shared->this_property_assignments_count(); i++) { | 3405 for (int i = 0; i < shared->this_property_assignments_count(); i++) { |
| 3505 if (shared->IsThisPropertyAssignmentArgument(i)) { | 3406 if (shared->IsThisPropertyAssignmentArgument(i)) { |
| 3506 Label not_passed, next; | 3407 Label not_passed, next; |
| 3507 // Check if the argument assigned to the property is actually passed. | 3408 // Check if the argument assigned to the property is actually passed. |
| 3508 int arg_number = shared->GetThisPropertyAssignmentArgument(i); | 3409 int arg_number = shared->GetThisPropertyAssignmentArgument(i); |
| 3509 __ cmp(r0, Operand(arg_number)); | 3410 __ cmpgt(r0, Operand(arg_number)); |
| 3510 __ b(le, ¬_passed); | 3411 __ b(f, ¬_passed, Label::kNear); |
| 3511 // Argument passed - find it on the stack. | 3412 // Argument passed - find it on the stack. |
| 3512 __ ldr(r2, MemOperand(r1, (arg_number + 1) * -kPointerSize)); | 3413 __ ldr(r2, MemOperand(r1, (arg_number + 1) * -kPointerSize)); |
| 3513 __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); | 3414 __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); |
| 3514 __ b(&next); | 3415 __ b_near(&next); |
| 3515 __ bind(¬_passed); | 3416 __ bind(¬_passed); |
| 3516 // Set the property to undefined. | 3417 // Set the property to undefined. |
| 3517 __ str(r7, MemOperand(r5, kPointerSize, PostIndex)); | 3418 __ str(r7, MemOperand(r5, kPointerSize, PostIndex)); |
| 3518 __ bind(&next); | 3419 __ bind(&next); |
| 3519 } else { | 3420 } else { |
| 3520 // Set the property to the constant value. | 3421 // Set the property to the constant value. |
| 3521 Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i)); | 3422 Handle<Object> constant(shared->GetThisPropertyAssignmentConstant(i)); |
| 3522 __ mov(r2, Operand(constant)); | 3423 __ mov(r2, Operand(constant)); |
| 3523 __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); | 3424 __ str(r2, MemOperand(r5, kPointerSize, PostIndex)); |
| 3524 } | 3425 } |
| (...skipping 10 matching lines...) Expand all Loading... |
| 3535 // r0: argc | 3436 // r0: argc |
| 3536 // r4: JSObject (not tagged) | 3437 // r4: JSObject (not tagged) |
| 3537 // Move argc to r1 and the JSObject to return to r0 and tag it. | 3438 // Move argc to r1 and the JSObject to return to r0 and tag it. |
| 3538 __ mov(r1, r0); | 3439 __ mov(r1, r0); |
| 3539 __ mov(r0, r4); | 3440 __ mov(r0, r4); |
| 3540 __ orr(r0, r0, Operand(kHeapObjectTag)); | 3441 __ orr(r0, r0, Operand(kHeapObjectTag)); |
| 3541 | 3442 |
| 3542 // r0: JSObject | 3443 // r0: JSObject |
| 3543 // r1: argc | 3444 // r1: argc |
| 3544 // Remove caller arguments and receiver from the stack and return. | 3445 // Remove caller arguments and receiver from the stack and return. |
| 3545 __ add(sp, sp, Operand(r1, LSL, kPointerSizeLog2)); | 3446 __ lsl(ip, r1, Operand(kPointerSizeLog2)); |
| 3447 __ add(sp, sp, ip); |
| 3546 __ add(sp, sp, Operand(kPointerSize)); | 3448 __ add(sp, sp, Operand(kPointerSize)); |
| 3547 Counters* counters = masm()->isolate()->counters(); | 3449 Counters* counters = masm()->isolate()->counters(); |
| 3548 __ IncrementCounter(counters->constructed_objects(), 1, r1, r2); | 3450 __ IncrementCounter(counters->constructed_objects(), 1, r1, r2); |
| 3549 __ IncrementCounter(counters->constructed_objects_stub(), 1, r1, r2); | 3451 __ IncrementCounter(counters->constructed_objects_stub(), 1, r1, r2); |
| 3550 __ Jump(lr); | 3452 __ Ret(); |
| 3551 | 3453 |
| 3552 // Jump to the generic stub in case the specialized code cannot handle the | 3454 // Jump to the generic stub in case the specialized code cannot handle the |
| 3553 // construction. | 3455 // construction. |
| 3554 __ bind(&generic_stub_call); | 3456 __ bind(&generic_stub_call); |
| 3555 Handle<Code> code = masm()->isolate()->builtins()->JSConstructStubGeneric(); | 3457 Handle<Code> code = masm()->isolate()->builtins()->JSConstructStubGeneric(); |
| 3556 __ Jump(code, RelocInfo::CODE_TARGET); | 3458 __ Jump(code, RelocInfo::CODE_TARGET); |
| 3557 | 3459 |
| 3558 // Return the generated code. | 3460 // Return the generated code. |
| 3559 return GetCode(); | 3461 return GetCode(); |
| 3560 } | 3462 } |
| 3561 | 3463 |
| 3562 | 3464 |
| 3563 #undef __ | 3465 #undef __ |
| 3564 #define __ ACCESS_MASM(masm) | 3466 #define __ ACCESS_MASM(masm) |
| 3565 | 3467 |
| 3566 | 3468 |
| 3567 void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( | 3469 void KeyedLoadStubCompiler::GenerateLoadDictionaryElement( |
| 3568 MacroAssembler* masm) { | 3470 MacroAssembler* masm) { |
| 3569 // ---------- S t a t e -------------- | 3471 // ---------- S t a t e -------------- |
| 3570 // -- lr : return address | 3472 // -- lr : return address |
| 3571 // -- r0 : key | 3473 // -- r0 : key |
| 3572 // -- r1 : receiver | 3474 // -- r1 : receiver |
| 3573 // ----------------------------------- | 3475 // ----------------------------------- |
| 3574 Label slow, miss_force_generic; | 3476 Label slow, miss_force_generic; |
| 3575 | 3477 |
| 3576 Register key = r0; | 3478 Register key = r0; |
| 3577 Register receiver = r1; | 3479 Register receiver = r1; |
| 3578 | 3480 |
| 3579 __ JumpIfNotSmi(key, &miss_force_generic); | 3481 __ JumpIfNotSmi(key, &miss_force_generic); |
| 3580 __ mov(r2, Operand(key, ASR, kSmiTagSize)); | 3482 __ asr(r2, key, Operand(kSmiTagSize)); |
| 3581 __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 3483 __ ldr(r4, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 3582 __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5); | 3484 __ LoadFromNumberDictionary(&slow, r4, key, r0, r2, r3, r5); |
| 3583 __ Ret(); | 3485 __ Ret(); |
| 3584 | 3486 |
| 3585 __ bind(&slow); | 3487 __ bind(&slow); |
| 3586 __ IncrementCounter( | 3488 __ IncrementCounter( |
| 3587 masm->isolate()->counters()->keyed_load_external_array_slow(), | 3489 masm->isolate()->counters()->keyed_load_external_array_slow(), |
| 3588 1, r2, r3); | 3490 1, r2, r3); |
| 3589 | 3491 |
| 3590 // ---------- S t a t e -------------- | 3492 // ---------- S t a t e -------------- |
| (...skipping 48 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 3639 } | 3541 } |
| 3640 return false; | 3542 return false; |
| 3641 } | 3543 } |
| 3642 | 3544 |
| 3643 | 3545 |
| 3644 static void GenerateSmiKeyCheck(MacroAssembler* masm, | 3546 static void GenerateSmiKeyCheck(MacroAssembler* masm, |
| 3645 Register key, | 3547 Register key, |
| 3646 Register scratch0, | 3548 Register scratch0, |
| 3647 Register scratch1, | 3549 Register scratch1, |
| 3648 DwVfpRegister double_scratch0, | 3550 DwVfpRegister double_scratch0, |
| 3649 DwVfpRegister double_scratch1, | |
| 3650 Label* fail) { | 3551 Label* fail) { |
| 3651 if (CpuFeatures::IsSupported(VFP2)) { | 3552 // TODO(STM): FPU support |
| 3652 CpuFeatures::Scope scope(VFP2); | 3553 // if (CpuFeatures::IsSupported(FPU)) { |
| 3653 Label key_ok; | 3554 // |
| 3654 // Check for smi or a smi inside a heap number. We convert the heap | 3555 // } else { |
| 3655 // number and check if the conversion is exact and fits into the smi | 3556 { |
| 3656 // range. | |
| 3657 __ JumpIfSmi(key, &key_ok); | |
| 3658 __ CheckMap(key, | |
| 3659 scratch0, | |
| 3660 Heap::kHeapNumberMapRootIndex, | |
| 3661 fail, | |
| 3662 DONT_DO_SMI_CHECK); | |
| 3663 __ sub(ip, key, Operand(kHeapObjectTag)); | |
| 3664 __ vldr(double_scratch0, ip, HeapNumber::kValueOffset); | |
| 3665 __ EmitVFPTruncate(kRoundToZero, | |
| 3666 scratch0, | |
| 3667 double_scratch0, | |
| 3668 scratch1, | |
| 3669 double_scratch1, | |
| 3670 kCheckForInexactConversion); | |
| 3671 __ b(ne, fail); | |
| 3672 __ TrySmiTag(scratch0, fail, scratch1); | |
| 3673 __ mov(key, scratch0); | |
| 3674 __ bind(&key_ok); | |
| 3675 } else { | |
| 3676 // Check that the key is a smi. | 3557 // Check that the key is a smi. |
| 3677 __ JumpIfNotSmi(key, fail); | 3558 __ JumpIfNotSmi(key, fail); |
| 3678 } | 3559 } |
| 3679 } | 3560 } |
| 3680 | 3561 |
| 3681 | 3562 |
| 3682 void KeyedLoadStubCompiler::GenerateLoadExternalArray( | 3563 void KeyedLoadStubCompiler::GenerateLoadExternalArray( |
| 3683 MacroAssembler* masm, | 3564 MacroAssembler* masm, |
| 3684 ElementsKind elements_kind) { | 3565 ElementsKind elements_kind) { |
| 3685 // ---------- S t a t e -------------- | 3566 // ---------- S t a t e -------------- |
| 3686 // -- lr : return address | 3567 // -- lr : return address |
| 3687 // -- r0 : key | 3568 // -- r0 : key |
| 3688 // -- r1 : receiver | 3569 // -- r1 : receiver |
| 3689 // ----------------------------------- | 3570 // ----------------------------------- |
| 3690 Label miss_force_generic, slow, failed_allocation; | 3571 Label miss_force_generic, slow, failed_allocation; |
| 3691 | 3572 |
| 3692 Register key = r0; | 3573 Register key = r0; |
| 3693 Register receiver = r1; | 3574 Register receiver = r1; |
| 3694 | 3575 |
| 3695 // This stub is meant to be tail-jumped to, the receiver must already | 3576 // This stub is meant to be tail-jumped to, the receiver must already |
| 3696 // have been verified by the caller to not be a smi. | 3577 // have been verified by the caller to not be a smi. |
| 3697 | 3578 |
| 3698 // Check that the key is a smi or a heap number convertible to a smi. | 3579 // Check that the key is a smi or a heap number convertible to a smi. |
| 3699 GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic); | 3580 GenerateSmiKeyCheck(masm, key, r4, r5, dr0, &miss_force_generic); |
| 3700 | 3581 |
| 3701 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 3582 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 3702 // r3: elements array | 3583 // r3: elements array |
| 3703 | 3584 |
| 3704 // Check that the index is in range. | 3585 // Check that the index is in range. |
| 3705 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); | 3586 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); |
| 3706 __ cmp(key, ip); | 3587 __ cmphs(key, ip); |
| 3707 // Unsigned comparison catches both negative and too-large values. | 3588 // Unsigned comparison catches both negative and too-large values. |
| 3708 __ b(hs, &miss_force_generic); | 3589 __ bt(&miss_force_generic); |
| 3709 | 3590 |
| 3710 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); | 3591 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); |
| 3711 // r3: base pointer of external storage | 3592 // r3: base pointer of external storage |
| 3712 | 3593 |
| 3713 // We are not untagging smi key and instead work with it | 3594 // We are not untagging smi key and instead work with it |
| 3714 // as if it was premultiplied by 2. | 3595 // as if it was premultiplied by 2. |
| 3715 STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1)); | 3596 STATIC_ASSERT((kSmiTag == 0) && (kSmiTagSize == 1)); |
| 3716 | 3597 |
| 3717 Register value = r2; | 3598 Register value = r2; |
| 3718 switch (elements_kind) { | 3599 switch (elements_kind) { |
| 3719 case EXTERNAL_BYTE_ELEMENTS: | 3600 case EXTERNAL_BYTE_ELEMENTS: |
| 3720 __ ldrsb(value, MemOperand(r3, key, LSR, 1)); | 3601 __ lsr(value, key, Operand(1)); |
| 3602 __ ldrsb(value, MemOperand(r3, value)); |
| 3721 break; | 3603 break; |
| 3722 case EXTERNAL_PIXEL_ELEMENTS: | 3604 case EXTERNAL_PIXEL_ELEMENTS: |
| 3723 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: | 3605 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
| 3724 __ ldrb(value, MemOperand(r3, key, LSR, 1)); | 3606 __ lsr(value, key, Operand(1)); |
| 3607 __ ldrb(value, MemOperand(r3, value)); |
| 3725 break; | 3608 break; |
| 3726 case EXTERNAL_SHORT_ELEMENTS: | 3609 case EXTERNAL_SHORT_ELEMENTS: |
| 3727 __ ldrsh(value, MemOperand(r3, key, LSL, 0)); | 3610 __ ldrsh(value, MemOperand(r3, key)); |
| 3728 break; | 3611 break; |
| 3729 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: | 3612 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: |
| 3730 __ ldrh(value, MemOperand(r3, key, LSL, 0)); | 3613 __ lsl(value, key, Operand(0)); |
| 3614 __ ldrh(value, MemOperand(r3, value)); |
| 3731 break; | 3615 break; |
| 3732 case EXTERNAL_INT_ELEMENTS: | 3616 case EXTERNAL_INT_ELEMENTS: |
| 3733 case EXTERNAL_UNSIGNED_INT_ELEMENTS: | 3617 case EXTERNAL_UNSIGNED_INT_ELEMENTS: |
| 3734 __ ldr(value, MemOperand(r3, key, LSL, 1)); | 3618 __ lsl(value, key, Operand(1)); |
| 3619 __ ldr(value, MemOperand(r3, value)); |
| 3735 break; | 3620 break; |
| 3736 case EXTERNAL_FLOAT_ELEMENTS: | 3621 case EXTERNAL_FLOAT_ELEMENTS: |
| 3737 if (CpuFeatures::IsSupported(VFP2)) { | 3622 if (CpuFeatures::IsSupported(FPU)) { |
| 3738 CpuFeatures::Scope scope(VFP2); | 3623 __ lsl(r2, key, Operand(1)); |
| 3739 __ add(r2, r3, Operand(key, LSL, 1)); | 3624 __ add(r2, r3, r2); |
| 3740 __ vldr(s0, r2, 0); | 3625 __ fldr(fr0, MemOperand(r2, 0)); |
| 3741 } else { | 3626 } else { |
| 3742 __ ldr(value, MemOperand(r3, key, LSL, 1)); | 3627 __ lsl(value, key, Operand(1)); |
| 3628 __ ldr(value, MemOperand(r3, value)); |
| 3743 } | 3629 } |
| 3744 break; | 3630 break; |
| 3745 case EXTERNAL_DOUBLE_ELEMENTS: | 3631 case EXTERNAL_DOUBLE_ELEMENTS: |
| 3746 if (CpuFeatures::IsSupported(VFP2)) { | 3632 if (CpuFeatures::IsSupported(FPU)) { |
| 3747 CpuFeatures::Scope scope(VFP2); | 3633 __ lsl(r2, key, Operand(2)); |
| 3748 __ add(r2, r3, Operand(key, LSL, 2)); | 3634 __ add(r2, r3, r2); |
| 3749 __ vldr(d0, r2, 0); | 3635 __ dldr(dr0, MemOperand(r2, 0), r2); |
| 3750 } else { | 3636 } else { |
| 3751 __ add(r4, r3, Operand(key, LSL, 2)); | 3637 __ lsl(r4, key, Operand(2)); |
| 3638 __ add(r4, r3, r4); |
| 3752 // r4: pointer to the beginning of the double we want to load. | 3639 // r4: pointer to the beginning of the double we want to load. |
| 3753 __ ldr(r2, MemOperand(r4, 0)); | 3640 __ ldr(r2, MemOperand(r4, 0)); |
| 3754 __ ldr(r3, MemOperand(r4, Register::kSizeInBytes)); | 3641 __ ldr(r3, MemOperand(r4, Register::kSizeInBytes)); |
| 3755 } | 3642 } |
| 3756 break; | 3643 break; |
| 3757 case FAST_ELEMENTS: | 3644 case FAST_ELEMENTS: |
| 3758 case FAST_SMI_ELEMENTS: | 3645 case FAST_SMI_ELEMENTS: |
| 3759 case FAST_DOUBLE_ELEMENTS: | 3646 case FAST_DOUBLE_ELEMENTS: |
| 3760 case FAST_HOLEY_ELEMENTS: | 3647 case FAST_HOLEY_ELEMENTS: |
| 3761 case FAST_HOLEY_SMI_ELEMENTS: | 3648 case FAST_HOLEY_SMI_ELEMENTS: |
| 3762 case FAST_HOLEY_DOUBLE_ELEMENTS: | 3649 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| 3763 case DICTIONARY_ELEMENTS: | 3650 case DICTIONARY_ELEMENTS: |
| 3764 case NON_STRICT_ARGUMENTS_ELEMENTS: | 3651 case NON_STRICT_ARGUMENTS_ELEMENTS: |
| 3765 UNREACHABLE(); | 3652 UNREACHABLE(); |
| 3766 break; | 3653 break; |
| 3767 } | 3654 } |
| 3768 | 3655 |
| 3769 // For integer array types: | 3656 // For integer array types: |
| 3770 // r2: value | 3657 // r2: value |
| 3771 // For float array type: | 3658 // For float array type: |
| 3772 // s0: value (if VFP3 is supported) | 3659 // s0: value (if FPU is supported) |
| 3773 // r2: value (if VFP3 is not supported) | 3660 // r2: value (if FPU is not supported) |
| 3774 // For double array type: | 3661 // For double array type: |
| 3775 // d0: value (if VFP3 is supported) | 3662 // d0: value (if FPU is supported) |
| 3776 // r2/r3: value (if VFP3 is not supported) | 3663 // r2/r3: value (if FPU is not supported) |
| 3777 | 3664 |
| 3778 if (elements_kind == EXTERNAL_INT_ELEMENTS) { | 3665 if (elements_kind == EXTERNAL_INT_ELEMENTS) { |
| 3779 // For the Int and UnsignedInt array types, we need to see whether | 3666 // For the Int and UnsignedInt array types, we need to see whether |
| 3780 // the value can be represented in a Smi. If not, we need to convert | 3667 // the value can be represented in a Smi. If not, we need to convert |
| 3781 // it to a HeapNumber. | 3668 // it to a HeapNumber. |
| 3782 Label box_int; | 3669 Label box_int; |
| 3783 __ cmp(value, Operand(0xC0000000)); | 3670 // TODO(STM): why is it different with ARM code ? |
| 3784 __ b(mi, &box_int); | 3671 __ add(r3, value, Operand(0x40000000)); // Non-smi value gives neg result |
| 3672 __ cmpge(r3, Operand(0)); |
| 3673 __ bf_near(&box_int); |
| 3785 // Tag integer as smi and return it. | 3674 // Tag integer as smi and return it. |
| 3786 __ mov(r0, Operand(value, LSL, kSmiTagSize)); | 3675 __ lsl(r0, value, Operand(kSmiTagSize)); |
| 3787 __ Ret(); | 3676 __ Ret(); |
| 3788 | 3677 |
| 3789 __ bind(&box_int); | 3678 __ bind(&box_int); |
| 3790 if (CpuFeatures::IsSupported(VFP2)) { | 3679 // Allocate a HeapNumber for the result and perform int-to-double |
| 3791 CpuFeatures::Scope scope(VFP2); | 3680 // conversion. Don't touch r0 or r1 as they are needed if allocation |
| 3792 // Allocate a HeapNumber for the result and perform int-to-double | 3681 // fails. |
| 3793 // conversion. Don't touch r0 or r1 as they are needed if allocation | 3682 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 3794 // fails. | 3683 __ AllocateHeapNumber(r5, r3, r4, r6, &slow); |
| 3795 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | 3684 // Now we can use r0 for the result as key is not needed any more. |
| 3685 __ mov(r0, r5); |
| 3796 | 3686 |
| 3797 __ AllocateHeapNumber(r5, r3, r4, r6, &slow, DONT_TAG_RESULT); | 3687 if (CpuFeatures::IsSupported(FPU)) { |
| 3798 // Now we can use r0 for the result as key is not needed any more. | 3688 __ dfloat(dr0, value); |
| 3799 __ add(r0, r5, Operand(kHeapObjectTag)); | 3689 ASSERT(Operand(kHeapObjectTag - HeapNumber::kValueOffset).is_int8()); |
| 3800 __ vmov(s0, value); | 3690 __ sub(r3, r0, Operand(kHeapObjectTag - HeapNumber::kValueOffset)); |
| 3801 __ vcvt_f64_s32(d0, s0); | 3691 __ dstr(dr0, MemOperand(r3, 0), r3); |
| 3802 __ vstr(d0, r5, HeapNumber::kValueOffset); | |
| 3803 __ Ret(); | 3692 __ Ret(); |
| 3804 } else { | 3693 } else { |
| 3805 // Allocate a HeapNumber for the result and perform int-to-double | |
| 3806 // conversion. Don't touch r0 or r1 as they are needed if allocation | |
| 3807 // fails. | |
| 3808 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | |
| 3809 __ AllocateHeapNumber(r5, r3, r4, r6, &slow, TAG_RESULT); | |
| 3810 // Now we can use r0 for the result as key is not needed any more. | |
| 3811 __ mov(r0, r5); | |
| 3812 Register dst1 = r1; | 3694 Register dst1 = r1; |
| 3813 Register dst2 = r3; | 3695 Register dst2 = r3; |
| 3814 FloatingPointHelper::Destination dest = | 3696 FloatingPointHelper::Destination dest = |
| 3815 FloatingPointHelper::kCoreRegisters; | 3697 FloatingPointHelper::kCoreRegisters; |
| 3816 FloatingPointHelper::ConvertIntToDouble(masm, | 3698 FloatingPointHelper::ConvertIntToDouble(masm, |
| 3817 value, | 3699 value, |
| 3818 dest, | 3700 dest, |
| 3819 d0, | 3701 dr0, |
| 3820 dst1, | 3702 dst1, |
| 3821 dst2, | 3703 dst2, |
| 3822 r9, | 3704 r9, |
| 3823 s0); | 3705 no_freg); |
| 3824 __ str(dst1, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); | 3706 __ str(dst1, FieldMemOperand(r0, HeapNumber::kMantissaOffset)); |
| 3825 __ str(dst2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); | 3707 __ str(dst2, FieldMemOperand(r0, HeapNumber::kExponentOffset)); |
| 3826 __ Ret(); | 3708 __ Ret(); |
| 3827 } | 3709 } |
| 3828 } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) { | 3710 } else if (elements_kind == EXTERNAL_UNSIGNED_INT_ELEMENTS) { |
| 3829 // The test is different for unsigned int values. Since we need | 3711 // The test is different for unsigned int values. Since we need |
| 3830 // the value to be in the range of a positive smi, we can't | 3712 // the value to be in the range of a positive smi, we can't |
| 3831 // handle either of the top two bits being set in the value. | 3713 // handle either of the top two bits being set in the value. |
| 3832 if (CpuFeatures::IsSupported(VFP2)) { | 3714 if (CpuFeatures::IsSupported(FPU)) { |
| 3833 CpuFeatures::Scope scope(VFP2); | |
| 3834 Label box_int, done; | 3715 Label box_int, done; |
| 3835 __ tst(value, Operand(0xC0000000)); | 3716 __ tst(value, Operand(0xC0000000)); |
| 3836 __ b(ne, &box_int); | 3717 __ b(ne, &box_int); |
| 3837 // Tag integer as smi and return it. | 3718 // Tag integer as smi and return it. |
| 3838 __ mov(r0, Operand(value, LSL, kSmiTagSize)); | 3719 __ lsl(r0, value, Operand(kSmiTagSize)); |
| 3839 __ Ret(); | 3720 __ Ret(); |
| 3840 | 3721 |
| 3841 __ bind(&box_int); | 3722 __ bind(&box_int); |
| 3842 __ vmov(s0, value); | 3723 __ dufloat(dr0, value, dr2, sh4_rtmp); |
| 3843 // Allocate a HeapNumber for the result and perform int-to-double | 3724 // Allocate a HeapNumber for the result and perform int-to-double |
| 3844 // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all | 3725 // conversion. Don't use r0 and r1 as AllocateHeapNumber clobbers all |
| 3845 // registers - also when jumping due to exhausted young space. | 3726 // registers - also when jumping due to exhausted young space. |
| 3846 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | 3727 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 3847 __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT); | 3728 __ AllocateHeapNumber(r2, r3, r4, r6, &slow); |
| 3848 | 3729 |
| 3849 __ vcvt_f64_u32(d0, s0); | 3730 __ sub(r1, r2, Operand(kHeapObjectTag)); |
| 3850 __ vstr(d0, r2, HeapNumber::kValueOffset); | 3731 __ dstr(dr0, MemOperand(r1, HeapNumber::kValueOffset)); |
| 3851 | 3732 |
| 3852 __ add(r0, r2, Operand(kHeapObjectTag)); | 3733 __ mov(r0, r2); |
| 3853 __ Ret(); | 3734 __ Ret(); |
| 3735 |
| 3854 } else { | 3736 } else { |
| 3855 // Check whether unsigned integer fits into smi. | 3737 // Check whether unsigned integer fits into smi. |
| 3856 Label box_int_0, box_int_1, done; | 3738 Label box_int_0, box_int_1, done; |
| 3857 __ tst(value, Operand(0x80000000)); | 3739 __ tst(value, Operand(0x80000000)); |
| 3858 __ b(ne, &box_int_0); | 3740 __ b(ne, &box_int_0, Label::kNear); |
| 3859 __ tst(value, Operand(0x40000000)); | 3741 __ tst(value, Operand(0x40000000)); |
| 3860 __ b(ne, &box_int_1); | 3742 __ b(ne, &box_int_1); |
| 3861 // Tag integer as smi and return it. | 3743 // Tag integer as smi and return it. |
| 3862 __ mov(r0, Operand(value, LSL, kSmiTagSize)); | 3744 __ lsl(r0, value, Operand(kSmiTagSize)); |
| 3863 __ Ret(); | 3745 __ Ret(); |
| 3864 | 3746 |
| 3865 Register hiword = value; // r2. | 3747 Register hiword = value; // r2. |
| 3866 Register loword = r3; | 3748 Register loword = r3; |
| 3867 | 3749 |
| 3868 __ bind(&box_int_0); | 3750 __ bind(&box_int_0); |
| 3869 // Integer does not have leading zeros. | 3751 // Integer does not have leading zeros. |
| 3870 GenerateUInt2Double(masm, hiword, loword, r4, 0); | 3752 GenerateUInt2Double(masm, hiword, loword, r4, 0); |
| 3871 __ b(&done); | 3753 __ b(&done); |
| 3872 | 3754 |
| 3873 __ bind(&box_int_1); | 3755 __ bind(&box_int_1); |
| 3874 // Integer has one leading zero. | 3756 // Integer has one leading zero. |
| 3875 GenerateUInt2Double(masm, hiword, loword, r4, 1); | 3757 GenerateUInt2Double(masm, hiword, loword, r4, 1); |
| 3876 | 3758 |
| 3877 | 3759 |
| 3878 __ bind(&done); | 3760 __ bind(&done); |
| 3879 // Integer was converted to double in registers hiword:loword. | 3761 // Integer was converted to double in registers hiword:loword. |
| 3880 // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber | 3762 // Wrap it into a HeapNumber. Don't use r0 and r1 as AllocateHeapNumber |
| 3881 // clobbers all registers - also when jumping due to exhausted young | 3763 // clobbers all registers - also when jumping due to exhausted young |
| 3882 // space. | 3764 // space. |
| 3883 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | 3765 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 3884 __ AllocateHeapNumber(r4, r5, r7, r6, &slow, TAG_RESULT); | 3766 __ AllocateHeapNumber(r4, r5, r7, r6, &slow); |
| 3885 | 3767 |
| 3886 __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset)); | 3768 __ str(hiword, FieldMemOperand(r4, HeapNumber::kExponentOffset)); |
| 3887 __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); | 3769 __ str(loword, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); |
| 3888 | 3770 |
| 3889 __ mov(r0, r4); | 3771 __ mov(r0, r4); |
| 3890 __ Ret(); | 3772 __ Ret(); |
| 3891 } | 3773 } |
| 3892 } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { | 3774 } else if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
| 3893 // For the floating-point array type, we need to always allocate a | 3775 // For the floating-point array type, we need to always allocate a |
| 3894 // HeapNumber. | 3776 // HeapNumber. |
| 3895 if (CpuFeatures::IsSupported(VFP2)) { | 3777 if (CpuFeatures::IsSupported(FPU)) { |
| 3896 CpuFeatures::Scope scope(VFP2); | |
| 3897 // Allocate a HeapNumber for the result. Don't use r0 and r1 as | 3778 // Allocate a HeapNumber for the result. Don't use r0 and r1 as |
| 3898 // AllocateHeapNumber clobbers all registers - also when jumping due to | 3779 // AllocateHeapNumber clobbers all registers - also when jumping due to |
| 3899 // exhausted young space. | 3780 // exhausted young space. |
| 3900 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | 3781 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 3901 __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT); | 3782 __ AllocateHeapNumber(r2, r3, r4, r6, &slow); |
| 3902 __ vcvt_f64_f32(d0, s0); | 3783 __ fcnvsd(dr0, fr0); |
| 3903 __ vstr(d0, r2, HeapNumber::kValueOffset); | 3784 ASSERT(Operand(kHeapObjectTag - HeapNumber::kValueOffset).is_int8()); |
| 3785 __ sub(r1, r2, Operand(kHeapObjectTag - HeapNumber::kValueOffset)); |
| 3786 __ dstr(dr0, MemOperand(r1, 0), r1); |
| 3904 | 3787 |
| 3905 __ add(r0, r2, Operand(kHeapObjectTag)); | 3788 __ mov(r0, r2); |
| 3906 __ Ret(); | 3789 __ Ret(); |
| 3907 } else { | 3790 } else { |
| 3908 // Allocate a HeapNumber for the result. Don't use r0 and r1 as | 3791 // Allocate a HeapNumber for the result. Don't use r0 and r1 as |
| 3909 // AllocateHeapNumber clobbers all registers - also when jumping due to | 3792 // AllocateHeapNumber clobbers all registers - also when jumping due to |
| 3910 // exhausted young space. | 3793 // exhausted young space. |
| 3911 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | 3794 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 3912 __ AllocateHeapNumber(r3, r4, r5, r6, &slow, TAG_RESULT); | 3795 __ AllocateHeapNumber(r3, r4, r5, r6, &slow); |
| 3913 // VFP is not available, do manual single to double conversion. | 3796 // VFP is not available, do manual single to double conversion. |
| 3914 | 3797 |
| 3915 // r2: floating point value (binary32) | 3798 // r2: floating point value (binary32) |
| 3916 // r3: heap number for result | 3799 // r3: heap number for result |
| 3917 | 3800 |
| 3918 // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to | 3801 // Extract mantissa to r0. OK to clobber r0 now as there are no jumps to |
| 3919 // the slow case from here. | 3802 // the slow case from here. |
| 3920 __ and_(r0, value, Operand(kBinary32MantissaMask)); | 3803 __ land(r0, value, Operand(kBinary32MantissaMask)); |
| 3921 | 3804 |
| 3922 // Extract exponent to r1. OK to clobber r1 now as there are no jumps to | 3805 // Extract exponent to r1. OK to clobber r1 now as there are no jumps to |
| 3923 // the slow case from here. | 3806 // the slow case from here. |
| 3924 __ mov(r1, Operand(value, LSR, kBinary32MantissaBits)); | 3807 __ lsr(r1, value, Operand(kBinary32MantissaBits)); |
| 3925 __ and_(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); | 3808 __ land(r1, r1, Operand(kBinary32ExponentMask >> kBinary32MantissaBits)); |
| 3926 | 3809 |
| 3927 Label exponent_rebiased; | 3810 Label exponent_rebiased; |
| 3928 __ teq(r1, Operand(0x00)); | 3811 __ teq(r1, Operand(0x00)); |
| 3929 __ b(eq, &exponent_rebiased); | 3812 __ b(eq, &exponent_rebiased); |
| 3930 | 3813 |
| 3931 __ teq(r1, Operand(0xff)); | 3814 __ teq(r1, Operand(0xff)); |
| 3932 __ mov(r1, Operand(0x7ff), LeaveCC, eq); | 3815 __ mov(r1, Operand(0x7ff), eq); |
| 3933 __ b(eq, &exponent_rebiased); | 3816 __ b(eq, &exponent_rebiased); |
| 3934 | 3817 |
| 3935 // Rebias exponent. | 3818 // Rebias exponent. |
| 3936 __ add(r1, | 3819 __ add(r1, |
| 3937 r1, | 3820 r1, |
| 3938 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); | 3821 Operand(-kBinary32ExponentBias + HeapNumber::kExponentBias)); |
| 3939 | 3822 |
| 3940 __ bind(&exponent_rebiased); | 3823 __ bind(&exponent_rebiased); |
| 3941 __ and_(r2, value, Operand(kBinary32SignMask)); | 3824 __ land(r2, value, Operand(kBinary32SignMask)); |
| 3942 value = no_reg; | 3825 value = no_reg; |
| 3943 __ orr(r2, r2, Operand(r1, LSL, HeapNumber::kMantissaBitsInTopWord)); | 3826 __ lsl(ip, r1, Operand(HeapNumber::kMantissaBitsInTopWord)); |
| 3827 __ orr(r2, r2, ip); |
| 3944 | 3828 |
| 3945 // Shift mantissa. | 3829 // Shift mantissa. |
| 3946 static const int kMantissaShiftForHiWord = | 3830 static const int kMantissaShiftForHiWord = |
| 3947 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; | 3831 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; |
| 3948 | 3832 |
| 3949 static const int kMantissaShiftForLoWord = | 3833 static const int kMantissaShiftForLoWord = |
| 3950 kBitsPerInt - kMantissaShiftForHiWord; | 3834 kBitsPerInt - kMantissaShiftForHiWord; |
| 3951 | 3835 |
| 3952 __ orr(r2, r2, Operand(r0, LSR, kMantissaShiftForHiWord)); | 3836 __ lsr(ip, r0, Operand(kMantissaShiftForHiWord)); |
| 3953 __ mov(r0, Operand(r0, LSL, kMantissaShiftForLoWord)); | 3837 __ orr(r2, r2, ip); |
| 3838 __ lsl(r0, r0, Operand(kMantissaShiftForLoWord)); |
| 3954 | 3839 |
| 3955 __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset)); | 3840 __ str(r2, FieldMemOperand(r3, HeapNumber::kExponentOffset)); |
| 3956 __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); | 3841 __ str(r0, FieldMemOperand(r3, HeapNumber::kMantissaOffset)); |
| 3957 | 3842 |
| 3958 __ mov(r0, r3); | 3843 __ mov(r0, r3); |
| 3959 __ Ret(); | 3844 __ Ret(); |
| 3960 } | 3845 } |
| 3961 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { | 3846 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
| 3962 if (CpuFeatures::IsSupported(VFP2)) { | 3847 if (CpuFeatures::IsSupported(FPU)) { |
| 3963 CpuFeatures::Scope scope(VFP2); | |
| 3964 // Allocate a HeapNumber for the result. Don't use r0 and r1 as | 3848 // Allocate a HeapNumber for the result. Don't use r0 and r1 as |
| 3965 // AllocateHeapNumber clobbers all registers - also when jumping due to | 3849 // AllocateHeapNumber clobbers all registers - also when jumping due to |
| 3966 // exhausted young space. | 3850 // exhausted young space. |
| 3967 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); | 3851 __ LoadRoot(r6, Heap::kHeapNumberMapRootIndex); |
| 3968 __ AllocateHeapNumber(r2, r3, r4, r6, &slow, DONT_TAG_RESULT); | 3852 __ AllocateHeapNumber(r2, r3, r4, r6, &slow); |
| 3969 __ vstr(d0, r2, HeapNumber::kValueOffset); | 3853 ASSERT(Operand(kHeapObjectTag - HeapNumber::kValueOffset).is_int8()); |
| 3854 __ sub(r1, r2, Operand(kHeapObjectTag - HeapNumber::kValueOffset)); |
| 3855 __ dstr(dr0, MemOperand(r1, 0), r1); |
| 3970 | 3856 |
| 3971 __ add(r0, r2, Operand(kHeapObjectTag)); | 3857 __ mov(r0, r2); |
| 3972 __ Ret(); | 3858 __ Ret(); |
| 3973 } else { | 3859 } else { |
| 3974 // Allocate a HeapNumber for the result. Don't use r0 and r1 as | 3860 // Allocate a HeapNumber for the result. Don't use r0 and r1 as |
| 3975 // AllocateHeapNumber clobbers all registers - also when jumping due to | 3861 // AllocateHeapNumber clobbers all registers - also when jumping due to |
| 3976 // exhausted young space. | 3862 // exhausted young space. |
| 3977 __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex); | 3863 __ LoadRoot(r7, Heap::kHeapNumberMapRootIndex); |
| 3978 __ AllocateHeapNumber(r4, r5, r6, r7, &slow, TAG_RESULT); | 3864 __ AllocateHeapNumber(r4, r5, r6, r7, &slow); |
| 3979 | 3865 |
| 3980 __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); | 3866 __ str(r2, FieldMemOperand(r4, HeapNumber::kMantissaOffset)); |
| 3981 __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset)); | 3867 __ str(r3, FieldMemOperand(r4, HeapNumber::kExponentOffset)); |
| 3982 __ mov(r0, r4); | 3868 __ mov(r0, r4); |
| 3983 __ Ret(); | 3869 __ Ret(); |
| 3984 } | 3870 } |
| 3985 | 3871 |
| 3986 } else { | 3872 } else { |
| 3987 // Tag integer as smi and return it. | 3873 // Tag integer as smi and return it. |
| 3988 __ mov(r0, Operand(value, LSL, kSmiTagSize)); | 3874 __ lsl(r0, value, Operand(kSmiTagSize)); |
| 3989 __ Ret(); | 3875 __ Ret(); |
| 3990 } | 3876 } |
| 3991 | 3877 |
| 3992 // Slow case, key and receiver still in r0 and r1. | 3878 // Slow case, key and receiver still in r0 and r1. |
| 3993 __ bind(&slow); | 3879 __ bind(&slow); |
| 3994 __ IncrementCounter( | 3880 __ IncrementCounter( |
| 3995 masm->isolate()->counters()->keyed_load_external_array_slow(), | 3881 masm->isolate()->counters()->keyed_load_external_array_slow(), |
| 3996 1, r2, r3); | 3882 1, r2, r3); |
| 3997 | 3883 |
| 3998 // ---------- S t a t e -------------- | 3884 // ---------- S t a t e -------------- |
| (...skipping 27 matching lines...) Expand all Loading... |
| 4026 // Register usage. | 3912 // Register usage. |
| 4027 Register value = r0; | 3913 Register value = r0; |
| 4028 Register key = r1; | 3914 Register key = r1; |
| 4029 Register receiver = r2; | 3915 Register receiver = r2; |
| 4030 // r3 mostly holds the elements array or the destination external array. | 3916 // r3 mostly holds the elements array or the destination external array. |
| 4031 | 3917 |
| 4032 // This stub is meant to be tail-jumped to, the receiver must already | 3918 // This stub is meant to be tail-jumped to, the receiver must already |
| 4033 // have been verified by the caller to not be a smi. | 3919 // have been verified by the caller to not be a smi. |
| 4034 | 3920 |
| 4035 // Check that the key is a smi or a heap number convertible to a smi. | 3921 // Check that the key is a smi or a heap number convertible to a smi. |
| 4036 GenerateSmiKeyCheck(masm, key, r4, r5, d1, d2, &miss_force_generic); | 3922 GenerateSmiKeyCheck(masm, key, r4, r5, dr0, &miss_force_generic); |
| 4037 | 3923 |
| 4038 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); | 3924 __ ldr(r3, FieldMemOperand(receiver, JSObject::kElementsOffset)); |
| 4039 | 3925 |
| 4040 // Check that the index is in range | 3926 // Check that the index is in range |
| 4041 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); | 3927 __ ldr(ip, FieldMemOperand(r3, ExternalArray::kLengthOffset)); |
| 4042 __ cmp(key, ip); | 3928 __ cmphs(key, ip); |
| 4043 // Unsigned comparison catches both negative and too-large values. | 3929 // Unsigned comparison catches both negative and too-large values. |
| 4044 __ b(hs, &miss_force_generic); | 3930 __ bt(&miss_force_generic); |
| 4045 | 3931 |
| 4046 // Handle both smis and HeapNumbers in the fast path. Go to the | 3932 // Handle both smis and HeapNumbers in the fast path. Go to the |
| 4047 // runtime for all other kinds of values. | 3933 // runtime for all other kinds of values. |
| 4048 // r3: external array. | 3934 // r3: external array. |
| 4049 if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) { | 3935 if (elements_kind == EXTERNAL_PIXEL_ELEMENTS) { |
| 4050 // Double to pixel conversion is only implemented in the runtime for now. | 3936 // Double to pixel conversion is only implemented in the runtime for now. |
| 4051 __ JumpIfNotSmi(value, &slow); | 3937 __ JumpIfNotSmi(value, &slow); |
| 4052 } else { | 3938 } else { |
| 4053 __ JumpIfNotSmi(value, &check_heap_number); | 3939 __ JumpIfNotSmi(value, &check_heap_number); |
| 4054 } | 3940 } |
| 4055 __ SmiUntag(r5, value); | 3941 __ SmiUntag(r5, value); |
| 4056 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); | 3942 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); |
| 4057 | 3943 |
| 4058 // r3: base pointer of external storage. | 3944 // r3: base pointer of external storage. |
| 4059 // r5: value (integer). | 3945 // r5: value (integer). |
| 4060 switch (elements_kind) { | 3946 switch (elements_kind) { |
| 4061 case EXTERNAL_PIXEL_ELEMENTS: | 3947 case EXTERNAL_PIXEL_ELEMENTS: |
| 4062 // Clamp the value to [0..255]. | 3948 // Clamp the value to [0..255]. |
| 4063 __ Usat(r5, 8, Operand(r5)); | 3949 __ Usat(r5, 8, r5); |
| 4064 __ strb(r5, MemOperand(r3, key, LSR, 1)); | 3950 __ lsr(r4, key, Operand(1)); |
| 3951 __ strb(r5, MemOperand(r3, r4)); |
| 4065 break; | 3952 break; |
| 4066 case EXTERNAL_BYTE_ELEMENTS: | 3953 case EXTERNAL_BYTE_ELEMENTS: |
| 4067 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: | 3954 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
| 4068 __ strb(r5, MemOperand(r3, key, LSR, 1)); | 3955 __ lsr(r4, key, Operand(1)); |
| 3956 __ strb(r5, MemOperand(r3, r4)); |
| 4069 break; | 3957 break; |
| 4070 case EXTERNAL_SHORT_ELEMENTS: | 3958 case EXTERNAL_SHORT_ELEMENTS: |
| 4071 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: | 3959 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: |
| 4072 __ strh(r5, MemOperand(r3, key, LSL, 0)); | 3960 __ strh(r5, MemOperand(r3, key)); |
| 4073 break; | 3961 break; |
| 4074 case EXTERNAL_INT_ELEMENTS: | 3962 case EXTERNAL_INT_ELEMENTS: |
| 4075 case EXTERNAL_UNSIGNED_INT_ELEMENTS: | 3963 case EXTERNAL_UNSIGNED_INT_ELEMENTS: |
| 4076 __ str(r5, MemOperand(r3, key, LSL, 1)); | 3964 __ lsl(r4, key, Operand(1)); |
| 3965 __ str(r5, MemOperand(r3, r4)); |
| 4077 break; | 3966 break; |
| 4078 case EXTERNAL_FLOAT_ELEMENTS: | 3967 case EXTERNAL_FLOAT_ELEMENTS: |
| 4079 // Perform int-to-float conversion and store to memory. | 3968 // Perform int-to-float conversion and store to memory. |
| 4080 __ SmiUntag(r4, key); | 3969 __ SmiUntag(r4, key); |
| 4081 StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9); | 3970 StoreIntAsFloat(masm, r3, r4, r5, r6, r7, r9); |
| 4082 break; | 3971 break; |
| 4083 case EXTERNAL_DOUBLE_ELEMENTS: | 3972 case EXTERNAL_DOUBLE_ELEMENTS: |
| 4084 __ add(r3, r3, Operand(key, LSL, 2)); | 3973 __ lsl(r4, key, Operand(2)); |
| 3974 __ add(r3, r3, r4); |
| 4085 // r3: effective address of the double element | 3975 // r3: effective address of the double element |
| 4086 FloatingPointHelper::Destination destination; | 3976 FloatingPointHelper::Destination destination; |
| 4087 if (CpuFeatures::IsSupported(VFP2)) { | 3977 if (CpuFeatures::IsSupported(FPU)) { |
| 4088 destination = FloatingPointHelper::kVFPRegisters; | 3978 destination = FloatingPointHelper::kVFPRegisters; |
| 4089 } else { | 3979 } else { |
| 4090 destination = FloatingPointHelper::kCoreRegisters; | 3980 destination = FloatingPointHelper::kCoreRegisters; |
| 4091 } | 3981 } |
| 4092 FloatingPointHelper::ConvertIntToDouble( | 3982 FloatingPointHelper::ConvertIntToDouble( |
| 4093 masm, r5, destination, | 3983 masm, r5, destination, |
| 4094 d0, r6, r7, // These are: double_dst, dst1, dst2. | 3984 dr0, r6, r7, // These are: double_dst, dst1, dst2. |
| 4095 r4, s2); // These are: scratch2, single_scratch. | 3985 r4, /*s2*/no_freg); // These are: scratch2, single_scratch. |
| 4096 if (destination == FloatingPointHelper::kVFPRegisters) { | 3986 if (destination == FloatingPointHelper::kVFPRegisters) { |
| 4097 CpuFeatures::Scope scope(VFP2); | 3987 __ dstr(dr0, MemOperand(r3, 0)); |
| 4098 __ vstr(d0, r3, 0); | |
| 4099 } else { | 3988 } else { |
| 4100 __ str(r6, MemOperand(r3, 0)); | 3989 __ str(r6, MemOperand(r3, 0)); |
| 4101 __ str(r7, MemOperand(r3, Register::kSizeInBytes)); | 3990 __ str(r7, MemOperand(r3, Register::kSizeInBytes)); |
| 4102 } | 3991 } |
| 4103 break; | 3992 break; |
| 4104 case FAST_ELEMENTS: | 3993 case FAST_ELEMENTS: |
| 4105 case FAST_SMI_ELEMENTS: | 3994 case FAST_SMI_ELEMENTS: |
| 4106 case FAST_DOUBLE_ELEMENTS: | 3995 case FAST_DOUBLE_ELEMENTS: |
| 4107 case FAST_HOLEY_ELEMENTS: | 3996 case FAST_HOLEY_ELEMENTS: |
| 4108 case FAST_HOLEY_SMI_ELEMENTS: | 3997 case FAST_HOLEY_SMI_ELEMENTS: |
| 4109 case FAST_HOLEY_DOUBLE_ELEMENTS: | 3998 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| 4110 case DICTIONARY_ELEMENTS: | 3999 case DICTIONARY_ELEMENTS: |
| 4111 case NON_STRICT_ARGUMENTS_ELEMENTS: | 4000 case NON_STRICT_ARGUMENTS_ELEMENTS: |
| 4112 UNREACHABLE(); | 4001 UNREACHABLE(); |
| 4113 break; | 4002 break; |
| 4114 } | 4003 } |
| 4115 | 4004 |
| 4116 // Entry registers are intact, r0 holds the value which is the return value. | 4005 // Entry registers are intact, r0 holds the value which is the return value. |
| 4117 __ Ret(); | 4006 __ Ret(); |
| 4118 | 4007 |
| 4119 if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) { | 4008 if (elements_kind != EXTERNAL_PIXEL_ELEMENTS) { |
| 4120 // r3: external array. | 4009 // r3: external array. |
| 4121 __ bind(&check_heap_number); | 4010 __ bind(&check_heap_number); |
| 4122 __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE); | 4011 __ CompareObjectType(value, r5, r6, HEAP_NUMBER_TYPE, eq); |
| 4123 __ b(ne, &slow); | 4012 __ b(ne, &slow); |
| 4124 | 4013 |
| 4125 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); | 4014 __ ldr(r3, FieldMemOperand(r3, ExternalArray::kExternalPointerOffset)); |
| 4126 | 4015 |
| 4127 // r3: base pointer of external storage. | 4016 // r3: base pointer of external storage. |
| 4128 | 4017 |
| 4129 // The WebGL specification leaves the behavior of storing NaN and | 4018 // The WebGL specification leaves the behavior of storing NaN and |
| 4130 // +/-Infinity into integer arrays basically undefined. For more | 4019 // +/-Infinity into integer arrays basically undefined. For more |
| 4131 // reproducible behavior, convert these to zero. | 4020 // reproducible behavior, convert these to zero. |
| 4132 if (CpuFeatures::IsSupported(VFP2)) { | 4021 if (CpuFeatures::IsSupported(FPU)) { |
| 4133 CpuFeatures::Scope scope(VFP2); | |
| 4134 | |
| 4135 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { | 4022 if (elements_kind == EXTERNAL_FLOAT_ELEMENTS) { |
| 4136 // vldr requires offset to be a multiple of 4 so we can not | 4023 // vldr requires offset to be a multiple of 4 so we can not |
| 4137 // include -kHeapObjectTag into it. | 4024 // include -kHeapObjectTag into it. |
| 4138 __ sub(r5, r0, Operand(kHeapObjectTag)); | 4025 ASSERT(Operand(kHeapObjectTag - HeapNumber::kValueOffset).is_int8()); |
| 4139 __ vldr(d0, r5, HeapNumber::kValueOffset); | 4026 __ sub(r5, r0, Operand(kHeapObjectTag - HeapNumber::kValueOffset)); |
| 4140 __ add(r5, r3, Operand(key, LSL, 1)); | 4027 __ dldr(dr0, MemOperand(r5, 0), r5); |
| 4141 __ vcvt_f32_f64(s0, d0); | 4028 __ lsl(r5, key, Operand(1)); |
| 4142 __ vstr(s0, r5, 0); | 4029 __ add(r5, r3, r5); |
| 4030 __ fcnvds(fr0, dr0); |
| 4031 __ fstr(fr0, MemOperand(r5, 0)); |
| 4143 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { | 4032 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
| 4144 __ sub(r5, r0, Operand(kHeapObjectTag)); | 4033 ASSERT(Operand(kHeapObjectTag - HeapNumber::kValueOffset).is_int8()); |
| 4145 __ vldr(d0, r5, HeapNumber::kValueOffset); | 4034 __ sub(r5, r0, Operand(kHeapObjectTag - HeapNumber::kValueOffset)); |
| 4146 __ add(r5, r3, Operand(key, LSL, 2)); | 4035 __ dldr(dr0, MemOperand(r5, 0), r5); |
| 4147 __ vstr(d0, r5, 0); | 4036 __ lsl(r5, key, Operand(2)); |
| 4037 __ add(r5, r3, r5); |
| 4038 __ dstr(dr0, MemOperand(r5, 0), r5); |
| 4148 } else { | 4039 } else { |
| 4149 // Hoisted load. vldr requires offset to be a multiple of 4 so we can | 4040 // Hoisted load. vldr requires offset to be a multiple of 4 so we can |
| 4150 // not include -kHeapObjectTag into it. | 4041 // not include -kHeapObjectTag into it. |
| 4151 __ sub(r5, value, Operand(kHeapObjectTag)); | 4042 __ sub(r5, value, Operand(kHeapObjectTag)); |
| 4152 __ vldr(d0, r5, HeapNumber::kValueOffset); | 4043 __ dldr(dr0, MemOperand(r5, HeapNumber::kValueOffset)); |
| 4153 __ EmitECMATruncate(r5, d0, s2, r6, r7, r9); | 4044 __ EmitECMATruncate(r5, dr0, fr2, r6, r7, r9); |
| 4154 | 4045 |
| 4155 switch (elements_kind) { | 4046 switch (elements_kind) { |
| 4156 case EXTERNAL_BYTE_ELEMENTS: | 4047 case EXTERNAL_BYTE_ELEMENTS: |
| 4157 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: | 4048 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
| 4158 __ strb(r5, MemOperand(r3, key, LSR, 1)); | 4049 __ lsr(ip, key, Operand(1)); |
| 4050 __ strb(r5, MemOperand(r3, ip)); |
| 4159 break; | 4051 break; |
| 4160 case EXTERNAL_SHORT_ELEMENTS: | 4052 case EXTERNAL_SHORT_ELEMENTS: |
| 4161 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: | 4053 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: |
| 4162 __ strh(r5, MemOperand(r3, key, LSL, 0)); | 4054 __ strh(r5, MemOperand(r3, key)); |
| 4163 break; | 4055 break; |
| 4164 case EXTERNAL_INT_ELEMENTS: | 4056 case EXTERNAL_INT_ELEMENTS: |
| 4165 case EXTERNAL_UNSIGNED_INT_ELEMENTS: | 4057 case EXTERNAL_UNSIGNED_INT_ELEMENTS: |
| 4166 __ str(r5, MemOperand(r3, key, LSL, 1)); | 4058 __ lsl(ip, key, Operand(1)); |
| 4059 __ str(r5, MemOperand(r3, ip)); |
| 4167 break; | 4060 break; |
| 4168 case EXTERNAL_PIXEL_ELEMENTS: | 4061 case EXTERNAL_PIXEL_ELEMENTS: |
| 4169 case EXTERNAL_FLOAT_ELEMENTS: | 4062 case EXTERNAL_FLOAT_ELEMENTS: |
| 4170 case EXTERNAL_DOUBLE_ELEMENTS: | 4063 case EXTERNAL_DOUBLE_ELEMENTS: |
| 4171 case FAST_ELEMENTS: | 4064 case FAST_ELEMENTS: |
| 4172 case FAST_SMI_ELEMENTS: | 4065 case FAST_SMI_ELEMENTS: |
| 4173 case FAST_DOUBLE_ELEMENTS: | 4066 case FAST_DOUBLE_ELEMENTS: |
| 4174 case FAST_HOLEY_ELEMENTS: | 4067 case FAST_HOLEY_ELEMENTS: |
| 4175 case FAST_HOLEY_SMI_ELEMENTS: | 4068 case FAST_HOLEY_SMI_ELEMENTS: |
| 4176 case FAST_HOLEY_DOUBLE_ELEMENTS: | 4069 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| (...skipping 16 matching lines...) Expand all Loading... |
| 4193 Label done, nan_or_infinity_or_zero; | 4086 Label done, nan_or_infinity_or_zero; |
| 4194 static const int kMantissaInHiWordShift = | 4087 static const int kMantissaInHiWordShift = |
| 4195 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; | 4088 kBinary32MantissaBits - HeapNumber::kMantissaBitsInTopWord; |
| 4196 | 4089 |
| 4197 static const int kMantissaInLoWordShift = | 4090 static const int kMantissaInLoWordShift = |
| 4198 kBitsPerInt - kMantissaInHiWordShift; | 4091 kBitsPerInt - kMantissaInHiWordShift; |
| 4199 | 4092 |
| 4200 // Test for all special exponent values: zeros, subnormal numbers, NaNs | 4093 // Test for all special exponent values: zeros, subnormal numbers, NaNs |
| 4201 // and infinities. All these should be converted to 0. | 4094 // and infinities. All these should be converted to 0. |
| 4202 __ mov(r7, Operand(HeapNumber::kExponentMask)); | 4095 __ mov(r7, Operand(HeapNumber::kExponentMask)); |
| 4203 __ and_(r9, r5, Operand(r7), SetCC); | 4096 __ land(r9, r5, r7); |
| 4097 __ cmpeq(r9, Operand(0)); |
| 4204 __ b(eq, &nan_or_infinity_or_zero); | 4098 __ b(eq, &nan_or_infinity_or_zero); |
| 4205 | 4099 |
| 4206 __ teq(r9, Operand(r7)); | 4100 __ teq(r9, r7); |
| 4207 __ mov(r9, Operand(kBinary32ExponentMask), LeaveCC, eq); | 4101 __ mov(r9, Operand(kBinary32ExponentMask), eq); |
| 4208 __ b(eq, &nan_or_infinity_or_zero); | 4102 __ b(eq, &nan_or_infinity_or_zero); |
| 4209 | 4103 |
| 4210 // Rebias exponent. | 4104 // Rebias exponent. |
| 4211 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); | 4105 __ lsr(r9, r9, Operand(HeapNumber::kExponentShift)); |
| 4212 __ add(r9, | 4106 __ add(r9, |
| 4213 r9, | 4107 r9, |
| 4214 Operand(kBinary32ExponentBias - HeapNumber::kExponentBias)); | 4108 Operand(kBinary32ExponentBias - HeapNumber::kExponentBias)); |
| 4215 | 4109 |
| 4216 __ cmp(r9, Operand(kBinary32MaxExponent)); | 4110 Label skip1, skip2; |
| 4217 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, gt); | 4111 __ cmpgt(r9, Operand(kBinary32MaxExponent)); |
| 4218 __ orr(r5, r5, Operand(kBinary32ExponentMask), LeaveCC, gt); | 4112 __ bf_near(&skip1); |
| 4219 __ b(gt, &done); | 4113 __ land(r5, r5, Operand(HeapNumber::kSignMask)); |
| 4114 __ orr(r5, r5, Operand(kBinary32ExponentMask)); |
| 4115 __ b(&done); |
| 4116 __ bind(&skip1); |
| 4220 | 4117 |
| 4221 __ cmp(r9, Operand(kBinary32MinExponent)); | 4118 __ cmpge(r9, Operand(kBinary32MinExponent)); |
| 4222 __ and_(r5, r5, Operand(HeapNumber::kSignMask), LeaveCC, lt); | 4119 __ bt_near(&skip2); |
| 4223 __ b(lt, &done); | 4120 __ land(r5, r5, Operand(HeapNumber::kSignMask)); |
| 4121 __ b(&done); |
| 4122 __ bind(&skip2); |
| 4224 | 4123 |
| 4225 __ and_(r7, r5, Operand(HeapNumber::kSignMask)); | 4124 __ land(r7, r5, Operand(HeapNumber::kSignMask)); |
| 4226 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); | 4125 __ land(r5, r5, Operand(HeapNumber::kMantissaMask)); |
| 4227 __ orr(r7, r7, Operand(r5, LSL, kMantissaInHiWordShift)); | 4126 __ lsl(r5, r5, Operand(kMantissaInHiWordShift)); |
| 4228 __ orr(r7, r7, Operand(r6, LSR, kMantissaInLoWordShift)); | 4127 __ orr(r7, r7, r5); |
| 4229 __ orr(r5, r7, Operand(r9, LSL, kBinary32ExponentShift)); | 4128 __ lsr(r5, r6, Operand(kMantissaInLoWordShift)); |
| 4129 __ orr(r7, r7, r5); |
| 4130 __ lsl(r5, r9, Operand(kBinary32ExponentShift)); |
| 4131 __ orr(r5, r7, r5); |
| 4230 | 4132 |
| 4231 __ bind(&done); | 4133 __ bind(&done); |
| 4232 __ str(r5, MemOperand(r3, key, LSL, 1)); | 4134 __ lsl(ip, key, Operand(1)); |
| 4135 __ str(r5, MemOperand(r3, ip)); |
| 4233 // Entry registers are intact, r0 holds the value which is the return | 4136 // Entry registers are intact, r0 holds the value which is the return |
| 4234 // value. | 4137 // value. |
| 4235 __ Ret(); | 4138 __ Ret(); |
| 4236 | 4139 |
| 4237 __ bind(&nan_or_infinity_or_zero); | 4140 __ bind(&nan_or_infinity_or_zero); |
| 4238 __ and_(r7, r5, Operand(HeapNumber::kSignMask)); | 4141 __ land(r7, r5, Operand(HeapNumber::kSignMask)); |
| 4239 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); | 4142 __ land(r5, r5, Operand(HeapNumber::kMantissaMask)); |
| 4240 __ orr(r9, r9, r7); | 4143 __ orr(r9, r9, r7); |
| 4241 __ orr(r9, r9, Operand(r5, LSL, kMantissaInHiWordShift)); | 4144 __ lsl(r5, r5, Operand(kMantissaInHiWordShift)); |
| 4242 __ orr(r5, r9, Operand(r6, LSR, kMantissaInLoWordShift)); | 4145 __ orr(r9, r9, r5); |
| 4146 __ lsr(r5, r6, Operand(kMantissaInLoWordShift)); |
| 4147 __ orr(r5, r9, r5); |
| 4243 __ b(&done); | 4148 __ b(&done); |
| 4244 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { | 4149 } else if (elements_kind == EXTERNAL_DOUBLE_ELEMENTS) { |
| 4245 __ add(r7, r3, Operand(key, LSL, 2)); | 4150 __ lsl(r7, key, Operand(2)); |
| 4151 __ add(r7, r3, r7); |
| 4246 // r7: effective address of destination element. | 4152 // r7: effective address of destination element. |
| 4247 __ str(r6, MemOperand(r7, 0)); | 4153 __ str(r6, MemOperand(r7, 0)); |
| 4248 __ str(r5, MemOperand(r7, Register::kSizeInBytes)); | 4154 __ str(r5, MemOperand(r7, Register::kSizeInBytes)); |
| 4249 __ Ret(); | 4155 __ Ret(); |
| 4250 } else { | 4156 } else { |
| 4251 bool is_signed_type = IsElementTypeSigned(elements_kind); | 4157 bool is_signed_type = IsElementTypeSigned(elements_kind); |
| 4252 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; | 4158 int meaningfull_bits = is_signed_type ? (kBitsPerInt - 1) : kBitsPerInt; |
| 4253 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; | 4159 int32_t min_value = is_signed_type ? 0x80000000 : 0x00000000; |
| 4254 | 4160 |
| 4255 Label done, sign; | 4161 Label done, sign; |
| 4256 | 4162 |
| 4257 // Test for all special exponent values: zeros, subnormal numbers, NaNs | 4163 // Test for all special exponent values: zeros, subnormal numbers, NaNs |
| 4258 // and infinities. All these should be converted to 0. | 4164 // and infinities. All these should be converted to 0. |
| 4259 __ mov(r7, Operand(HeapNumber::kExponentMask)); | 4165 __ mov(r7, Operand(HeapNumber::kExponentMask)); |
| 4260 __ and_(r9, r5, Operand(r7), SetCC); | 4166 __ land(r9, r5, r7); |
| 4261 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); | 4167 __ tst(r9, r9); |
| 4168 __ mov(r5, Operand(0, RelocInfo::NONE), eq); |
| 4262 __ b(eq, &done); | 4169 __ b(eq, &done); |
| 4263 | 4170 |
| 4264 __ teq(r9, Operand(r7)); | 4171 __ teq(r9, r7); |
| 4265 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, eq); | 4172 __ mov(r5, Operand(0, RelocInfo::NONE), eq); |
| 4266 __ b(eq, &done); | 4173 __ b(eq, &done); |
| 4267 | 4174 |
| 4268 // Unbias exponent. | 4175 // Unbias exponent. |
| 4269 __ mov(r9, Operand(r9, LSR, HeapNumber::kExponentShift)); | 4176 __ lsr(r9, r9, Operand(HeapNumber::kExponentShift)); |
| 4270 __ sub(r9, r9, Operand(HeapNumber::kExponentBias), SetCC); | 4177 __ sub(r9, r9, Operand(HeapNumber::kExponentBias)); |
| 4178 __ cmpge(r9, Operand(0)); |
| 4271 // If exponent is negative then result is 0. | 4179 // If exponent is negative then result is 0. |
| 4272 __ mov(r5, Operand(0, RelocInfo::NONE), LeaveCC, mi); | 4180 __ mov(r5, Operand(0, RelocInfo::NONE), f); |
| 4273 __ b(mi, &done); | 4181 __ bf(&done); |
| 4274 | 4182 |
| 4275 // If exponent is too big then result is minimal value. | 4183 // If exponent is too big then result is minimal value. |
| 4276 __ cmp(r9, Operand(meaningfull_bits - 1)); | 4184 __ cmpge(r9, Operand(meaningfull_bits - 1)); |
| 4277 __ mov(r5, Operand(min_value), LeaveCC, ge); | 4185 __ mov(r5, Operand(min_value), eq); |
| 4278 __ b(ge, &done); | 4186 __ bt(&done); |
| 4279 | 4187 |
| 4280 __ and_(r7, r5, Operand(HeapNumber::kSignMask), SetCC); | 4188 __ land(r7, r5, Operand(HeapNumber::kSignMask)); |
| 4281 __ and_(r5, r5, Operand(HeapNumber::kMantissaMask)); | 4189 __ land(r5, r5, Operand(HeapNumber::kMantissaMask)); |
| 4282 __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); | 4190 __ orr(r5, r5, Operand(1u << HeapNumber::kMantissaBitsInTopWord)); |
| 4283 | 4191 |
| 4284 __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord), SetCC); | 4192 __ rsb(r9, r9, Operand(HeapNumber::kMantissaBitsInTopWord)); |
| 4285 __ mov(r5, Operand(r5, LSR, r9), LeaveCC, pl); | 4193 __ cmpge(r9, Operand(0)); |
| 4286 __ b(pl, &sign); | 4194 Label skip; |
| 4195 __ bf_near(&skip); |
| 4196 __ lsr(r5, r5, r9); |
| 4197 __ b(&sign); |
| 4198 __ bind(&skip); |
| 4287 | 4199 |
| 4288 __ rsb(r9, r9, Operand(0, RelocInfo::NONE)); | 4200 __ rsb(r9, r9, Operand(0, RelocInfo::NONE)); |
| 4289 __ mov(r5, Operand(r5, LSL, r9)); | 4201 __ lsl(r5, r5, r9); |
| 4290 __ rsb(r9, r9, Operand(meaningfull_bits)); | 4202 __ rsb(r9, r9, Operand(meaningfull_bits)); |
| 4291 __ orr(r5, r5, Operand(r6, LSR, r9)); | 4203 __ lsr(ip, r6, r9); |
| 4204 __ orr(r5, r5, ip); |
| 4292 | 4205 |
| 4293 __ bind(&sign); | 4206 __ bind(&sign); |
| 4294 __ teq(r7, Operand(0, RelocInfo::NONE)); | 4207 __ teq(r7, Operand(0, RelocInfo::NONE)); |
| 4295 __ rsb(r5, r5, Operand(0, RelocInfo::NONE), LeaveCC, ne); | 4208 __ rsb(r5, r5, Operand(0, RelocInfo::NONE), ne); |
| 4296 | 4209 |
| 4297 __ bind(&done); | 4210 __ bind(&done); |
| 4298 switch (elements_kind) { | 4211 switch (elements_kind) { |
| 4299 case EXTERNAL_BYTE_ELEMENTS: | 4212 case EXTERNAL_BYTE_ELEMENTS: |
| 4300 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: | 4213 case EXTERNAL_UNSIGNED_BYTE_ELEMENTS: |
| 4301 __ strb(r5, MemOperand(r3, key, LSR, 1)); | 4214 __ lsr(ip, key, Operand(1)); |
| 4215 __ strb(r5, MemOperand(r3, ip)); |
| 4302 break; | 4216 break; |
| 4303 case EXTERNAL_SHORT_ELEMENTS: | 4217 case EXTERNAL_SHORT_ELEMENTS: |
| 4304 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: | 4218 case EXTERNAL_UNSIGNED_SHORT_ELEMENTS: |
| 4305 __ strh(r5, MemOperand(r3, key, LSL, 0)); | 4219 __ strh(r5, MemOperand(r3, key)); |
| 4306 break; | 4220 break; |
| 4307 case EXTERNAL_INT_ELEMENTS: | 4221 case EXTERNAL_INT_ELEMENTS: |
| 4308 case EXTERNAL_UNSIGNED_INT_ELEMENTS: | 4222 case EXTERNAL_UNSIGNED_INT_ELEMENTS: |
| 4309 __ str(r5, MemOperand(r3, key, LSL, 1)); | 4223 __ lsl(ip, key, Operand(1)); |
| 4224 __ str(r5, MemOperand(r3, ip)); |
| 4310 break; | 4225 break; |
| 4311 case EXTERNAL_PIXEL_ELEMENTS: | 4226 case EXTERNAL_PIXEL_ELEMENTS: |
| 4312 case EXTERNAL_FLOAT_ELEMENTS: | 4227 case EXTERNAL_FLOAT_ELEMENTS: |
| 4313 case EXTERNAL_DOUBLE_ELEMENTS: | 4228 case EXTERNAL_DOUBLE_ELEMENTS: |
| 4314 case FAST_ELEMENTS: | 4229 case FAST_ELEMENTS: |
| 4315 case FAST_SMI_ELEMENTS: | 4230 case FAST_SMI_ELEMENTS: |
| 4316 case FAST_DOUBLE_ELEMENTS: | 4231 case FAST_DOUBLE_ELEMENTS: |
| 4317 case FAST_HOLEY_ELEMENTS: | 4232 case FAST_HOLEY_ELEMENTS: |
| 4318 case FAST_HOLEY_SMI_ELEMENTS: | 4233 case FAST_HOLEY_SMI_ELEMENTS: |
| 4319 case FAST_HOLEY_DOUBLE_ELEMENTS: | 4234 case FAST_HOLEY_DOUBLE_ELEMENTS: |
| (...skipping 41 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4361 // -- lr : return address | 4276 // -- lr : return address |
| 4362 // -- r0 : key | 4277 // -- r0 : key |
| 4363 // -- r1 : receiver | 4278 // -- r1 : receiver |
| 4364 // ----------------------------------- | 4279 // ----------------------------------- |
| 4365 Label miss_force_generic; | 4280 Label miss_force_generic; |
| 4366 | 4281 |
| 4367 // This stub is meant to be tail-jumped to, the receiver must already | 4282 // This stub is meant to be tail-jumped to, the receiver must already |
| 4368 // have been verified by the caller to not be a smi. | 4283 // have been verified by the caller to not be a smi. |
| 4369 | 4284 |
| 4370 // Check that the key is a smi or a heap number convertible to a smi. | 4285 // Check that the key is a smi or a heap number convertible to a smi. |
| 4371 GenerateSmiKeyCheck(masm, r0, r4, r5, d1, d2, &miss_force_generic); | 4286 GenerateSmiKeyCheck(masm, r0, r4, r5, dr0, &miss_force_generic); |
| 4372 | 4287 |
| 4373 // Get the elements array. | 4288 // Get the elements array. |
| 4374 __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset)); | 4289 __ ldr(r2, FieldMemOperand(r1, JSObject::kElementsOffset)); |
| 4375 __ AssertFastElements(r2); | 4290 __ AssertFastElements(r2); |
| 4376 | 4291 |
| 4377 // Check that the key is within bounds. | 4292 // Check that the key is within bounds. |
| 4378 __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset)); | 4293 __ ldr(r3, FieldMemOperand(r2, FixedArray::kLengthOffset)); |
| 4379 __ cmp(r0, Operand(r3)); | 4294 __ cmphs(r0, r3); |
| 4380 __ b(hs, &miss_force_generic); | 4295 __ bt(&miss_force_generic); |
| 4381 | 4296 |
| 4382 // Load the result and make sure it's not the hole. | 4297 // Load the result and make sure it's not the hole. |
| 4383 __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 4298 __ add(r3, r2, Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 4384 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); | 4299 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); |
| 4385 __ ldr(r4, | 4300 __ lsl(r4, r0, Operand(kPointerSizeLog2 - kSmiTagSize)); |
| 4386 MemOperand(r3, r0, LSL, kPointerSizeLog2 - kSmiTagSize)); | 4301 __ ldr(r4, MemOperand(r3, r4)); |
| 4387 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); | 4302 __ LoadRoot(ip, Heap::kTheHoleValueRootIndex); |
| 4388 __ cmp(r4, ip); | 4303 __ cmp(r4, ip); |
| 4389 __ b(eq, &miss_force_generic); | 4304 __ b(eq, &miss_force_generic); |
| 4390 __ mov(r0, r4); | 4305 __ mov(r0, r4); |
| 4391 __ Ret(); | 4306 __ Ret(); |
| 4392 | 4307 |
| 4393 __ bind(&miss_force_generic); | 4308 __ bind(&miss_force_generic); |
| 4394 Handle<Code> stub = | 4309 Handle<Code> stub = |
| 4395 masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); | 4310 masm->isolate()->builtins()->KeyedLoadIC_MissForceGeneric(); |
| 4396 __ Jump(stub, RelocInfo::CODE_TARGET); | 4311 __ Jump(stub, RelocInfo::CODE_TARGET); |
| (...skipping 16 matching lines...) Expand all Loading... |
| 4413 Register indexed_double_offset = r3; | 4328 Register indexed_double_offset = r3; |
| 4414 Register scratch = r4; | 4329 Register scratch = r4; |
| 4415 Register scratch2 = r5; | 4330 Register scratch2 = r5; |
| 4416 Register scratch3 = r6; | 4331 Register scratch3 = r6; |
| 4417 Register heap_number_map = r7; | 4332 Register heap_number_map = r7; |
| 4418 | 4333 |
| 4419 // This stub is meant to be tail-jumped to, the receiver must already | 4334 // This stub is meant to be tail-jumped to, the receiver must already |
| 4420 // have been verified by the caller to not be a smi. | 4335 // have been verified by the caller to not be a smi. |
| 4421 | 4336 |
| 4422 // Check that the key is a smi or a heap number convertible to a smi. | 4337 // Check that the key is a smi or a heap number convertible to a smi. |
| 4423 GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic); | 4338 GenerateSmiKeyCheck(masm, key_reg, r4, r5, dr0, &miss_force_generic); |
| 4424 | 4339 |
| 4425 // Get the elements array. | 4340 // Get the elements array. |
| 4426 __ ldr(elements_reg, | 4341 __ ldr(elements_reg, |
| 4427 FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); | 4342 FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); |
| 4428 | 4343 |
| 4429 // Check that the key is within bounds. | 4344 // Check that the key is within bounds. |
| 4430 __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); | 4345 __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); |
| 4431 __ cmp(key_reg, Operand(scratch)); | 4346 __ cmphs(key_reg, scratch); |
| 4432 __ b(hs, &miss_force_generic); | 4347 __ bt(&miss_force_generic); |
| 4433 | 4348 |
| 4434 // Load the upper word of the double in the fixed array and test for NaN. | 4349 // Load the upper word of the double in the fixed array and test for NaN. |
| 4435 __ add(indexed_double_offset, elements_reg, | 4350 __ lsl(scratch, key_reg, Operand(kDoubleSizeLog2 - kSmiTagSize)); |
| 4436 Operand(key_reg, LSL, kDoubleSizeLog2 - kSmiTagSize)); | 4351 __ add(indexed_double_offset, elements_reg, scratch); |
| 4437 uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32); | 4352 uint32_t upper_32_offset = FixedArray::kHeaderSize + sizeof(kHoleNanLower32); |
| 4438 __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset)); | 4353 __ ldr(scratch, FieldMemOperand(indexed_double_offset, upper_32_offset)); |
| 4439 __ cmp(scratch, Operand(kHoleNanUpper32)); | 4354 __ cmpeq(scratch, Operand(kHoleNanUpper32)); |
| 4440 __ b(&miss_force_generic, eq); | 4355 __ bt(&miss_force_generic); |
| 4441 | 4356 |
| 4442 // Non-NaN. Allocate a new heap number and copy the double value into it. | 4357 // Non-NaN. Allocate a new heap number and copy the double value into it. |
| 4443 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); | 4358 __ LoadRoot(heap_number_map, Heap::kHeapNumberMapRootIndex); |
| 4444 __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3, | 4359 __ AllocateHeapNumber(heap_number_reg, scratch2, scratch3, |
| 4445 heap_number_map, &slow_allocate_heapnumber, TAG_RESULT); | 4360 heap_number_map, &slow_allocate_heapnumber); |
| 4446 | 4361 |
| 4447 // Don't need to reload the upper 32 bits of the double, it's already in | 4362 // Don't need to reload the upper 32 bits of the double, it's already in |
| 4448 // scratch. | 4363 // scratch. |
| 4449 __ str(scratch, FieldMemOperand(heap_number_reg, | 4364 __ str(scratch, FieldMemOperand(heap_number_reg, |
| 4450 HeapNumber::kExponentOffset)); | 4365 HeapNumber::kExponentOffset)); |
| 4451 __ ldr(scratch, FieldMemOperand(indexed_double_offset, | 4366 __ ldr(scratch, FieldMemOperand(indexed_double_offset, |
| 4452 FixedArray::kHeaderSize)); | 4367 FixedArray::kHeaderSize)); |
| 4453 __ str(scratch, FieldMemOperand(heap_number_reg, | 4368 __ str(scratch, FieldMemOperand(heap_number_reg, |
| 4454 HeapNumber::kMantissaOffset)); | 4369 HeapNumber::kMantissaOffset)); |
| 4455 | 4370 |
| (...skipping 33 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4489 Register receiver_reg = r2; | 4404 Register receiver_reg = r2; |
| 4490 Register scratch = r4; | 4405 Register scratch = r4; |
| 4491 Register elements_reg = r3; | 4406 Register elements_reg = r3; |
| 4492 Register length_reg = r5; | 4407 Register length_reg = r5; |
| 4493 Register scratch2 = r6; | 4408 Register scratch2 = r6; |
| 4494 | 4409 |
| 4495 // This stub is meant to be tail-jumped to, the receiver must already | 4410 // This stub is meant to be tail-jumped to, the receiver must already |
| 4496 // have been verified by the caller to not be a smi. | 4411 // have been verified by the caller to not be a smi. |
| 4497 | 4412 |
| 4498 // Check that the key is a smi or a heap number convertible to a smi. | 4413 // Check that the key is a smi or a heap number convertible to a smi. |
| 4499 GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic); | 4414 GenerateSmiKeyCheck(masm, key_reg, r4, r5, dr0, &miss_force_generic); |
| 4500 | 4415 |
| 4501 if (IsFastSmiElementsKind(elements_kind)) { | 4416 if (IsFastSmiElementsKind(elements_kind)) { |
| 4502 __ JumpIfNotSmi(value_reg, &transition_elements_kind); | 4417 __ JumpIfNotSmi(value_reg, &transition_elements_kind); |
| 4503 } | 4418 } |
| 4504 | 4419 |
| 4505 // Check that the key is within bounds. | 4420 // Check that the key is within bounds. |
| 4506 __ ldr(elements_reg, | 4421 __ ldr(elements_reg, |
| 4507 FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); | 4422 FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); |
| 4508 if (is_js_array) { | 4423 if (is_js_array) { |
| 4509 __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); | 4424 __ ldr(scratch, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); |
| 4510 } else { | 4425 } else { |
| 4511 __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); | 4426 __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); |
| 4512 } | 4427 } |
| 4513 // Compare smis. | 4428 // Compare smis. |
| 4514 __ cmp(key_reg, scratch); | 4429 __ cmphs(key_reg, scratch); |
| 4515 if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { | 4430 if (is_js_array && grow_mode == ALLOW_JSARRAY_GROWTH) { |
| 4516 __ b(hs, &grow); | 4431 __ b(eq, &grow); |
| 4517 } else { | 4432 } else { |
| 4518 __ b(hs, &miss_force_generic); | 4433 __ b(eq, &miss_force_generic); |
| 4519 } | 4434 } |
| 4520 | 4435 |
| 4521 // Make sure elements is a fast element array, not 'cow'. | 4436 // Make sure elements is a fast element array, not 'cow'. |
| 4522 __ CheckMap(elements_reg, | 4437 __ CheckMap(elements_reg, |
| 4523 scratch, | 4438 scratch, |
| 4524 Heap::kFixedArrayMapRootIndex, | 4439 Heap::kFixedArrayMapRootIndex, |
| 4525 &miss_force_generic, | 4440 &miss_force_generic, |
| 4526 DONT_DO_SMI_CHECK); | 4441 DONT_DO_SMI_CHECK); |
| 4527 | 4442 |
| 4528 __ bind(&finish_store); | 4443 __ bind(&finish_store); |
| 4529 if (IsFastSmiElementsKind(elements_kind)) { | 4444 if (IsFastSmiElementsKind(elements_kind)) { |
| 4530 __ add(scratch, | 4445 __ add(scratch, |
| 4531 elements_reg, | 4446 elements_reg, |
| 4532 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 4447 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 4533 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); | 4448 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); |
| 4534 __ add(scratch, | 4449 __ lsl(scratch2, key_reg, Operand(kPointerSizeLog2 - kSmiTagSize)); |
| 4535 scratch, | 4450 __ add(scratch, scratch, scratch2); |
| 4536 Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); | |
| 4537 __ str(value_reg, MemOperand(scratch)); | 4451 __ str(value_reg, MemOperand(scratch)); |
| 4538 } else { | 4452 } else { |
| 4539 ASSERT(IsFastObjectElementsKind(elements_kind)); | 4453 ASSERT(IsFastObjectElementsKind(elements_kind)); |
| 4540 __ add(scratch, | 4454 __ add(scratch, |
| 4541 elements_reg, | 4455 elements_reg, |
| 4542 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); | 4456 Operand(FixedArray::kHeaderSize - kHeapObjectTag)); |
| 4543 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); | 4457 STATIC_ASSERT(kSmiTag == 0 && kSmiTagSize < kPointerSizeLog2); |
| 4544 __ add(scratch, | 4458 __ lsl(scratch2, key_reg, Operand(kPointerSizeLog2 - kSmiTagSize)); |
| 4545 scratch, | 4459 __ add(scratch, scratch, scratch2); |
| 4546 Operand(key_reg, LSL, kPointerSizeLog2 - kSmiTagSize)); | |
| 4547 __ str(value_reg, MemOperand(scratch)); | 4460 __ str(value_reg, MemOperand(scratch)); |
| 4548 __ mov(receiver_reg, value_reg); | 4461 __ mov(receiver_reg, value_reg); |
| 4549 __ RecordWrite(elements_reg, // Object. | 4462 __ RecordWrite(elements_reg, // Object. |
| 4550 scratch, // Address. | 4463 scratch, // Address. |
| 4551 receiver_reg, // Value. | 4464 receiver_reg, // Value. |
| 4552 kLRHasNotBeenSaved, | 4465 kLRHasNotBeenSaved, |
| 4553 kDontSaveFPRegs); | 4466 kDontSaveFPRegs); |
| 4554 } | 4467 } |
| 4555 // value_reg (r0) is preserved. | 4468 // value_reg (r0) is preserved. |
| 4556 // Done. | 4469 // Done. |
| (...skipping 55 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4612 | 4525 |
| 4613 __ bind(&check_capacity); | 4526 __ bind(&check_capacity); |
| 4614 // Check for cow elements, in general they are not handled by this stub | 4527 // Check for cow elements, in general they are not handled by this stub |
| 4615 __ CheckMap(elements_reg, | 4528 __ CheckMap(elements_reg, |
| 4616 scratch, | 4529 scratch, |
| 4617 Heap::kFixedCOWArrayMapRootIndex, | 4530 Heap::kFixedCOWArrayMapRootIndex, |
| 4618 &miss_force_generic, | 4531 &miss_force_generic, |
| 4619 DONT_DO_SMI_CHECK); | 4532 DONT_DO_SMI_CHECK); |
| 4620 | 4533 |
| 4621 __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); | 4534 __ ldr(scratch, FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); |
| 4622 __ cmp(length_reg, scratch); | 4535 __ cmphs(length_reg, scratch); |
| 4623 __ b(hs, &slow); | 4536 __ bt(&slow); |
| 4624 | 4537 |
| 4625 // Grow the array and finish the store. | 4538 // Grow the array and finish the store. |
| 4626 __ add(length_reg, length_reg, Operand(Smi::FromInt(1))); | 4539 __ add(length_reg, length_reg, Operand(Smi::FromInt(1))); |
| 4627 __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); | 4540 __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); |
| 4628 __ jmp(&finish_store); | 4541 __ jmp(&finish_store); |
| 4629 | 4542 |
| 4630 __ bind(&slow); | 4543 __ bind(&slow); |
| 4631 Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow(); | 4544 Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow(); |
| 4632 __ Jump(ic_slow, RelocInfo::CODE_TARGET); | 4545 __ Jump(ic_slow, RelocInfo::CODE_TARGET); |
| 4633 } | 4546 } |
| (...skipping 23 matching lines...) Expand all Loading... |
| 4657 Register scratch1 = r4; | 4570 Register scratch1 = r4; |
| 4658 Register scratch2 = r5; | 4571 Register scratch2 = r5; |
| 4659 Register scratch3 = r6; | 4572 Register scratch3 = r6; |
| 4660 Register scratch4 = r7; | 4573 Register scratch4 = r7; |
| 4661 Register length_reg = r7; | 4574 Register length_reg = r7; |
| 4662 | 4575 |
| 4663 // This stub is meant to be tail-jumped to, the receiver must already | 4576 // This stub is meant to be tail-jumped to, the receiver must already |
| 4664 // have been verified by the caller to not be a smi. | 4577 // have been verified by the caller to not be a smi. |
| 4665 | 4578 |
| 4666 // Check that the key is a smi or a heap number convertible to a smi. | 4579 // Check that the key is a smi or a heap number convertible to a smi. |
| 4667 GenerateSmiKeyCheck(masm, key_reg, r4, r5, d1, d2, &miss_force_generic); | 4580 GenerateSmiKeyCheck(masm, key_reg, r4, r5, dr0, &miss_force_generic); |
| 4668 | 4581 |
| 4669 __ ldr(elements_reg, | 4582 __ ldr(elements_reg, |
| 4670 FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); | 4583 FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); |
| 4671 | 4584 |
| 4672 // Check that the key is within bounds. | 4585 // Check that the key is within bounds. |
| 4673 if (is_js_array) { | 4586 if (is_js_array) { |
| 4674 __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); | 4587 __ ldr(scratch1, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); |
| 4675 } else { | 4588 } else { |
| 4676 __ ldr(scratch1, | 4589 __ ldr(scratch1, |
| 4677 FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); | 4590 FieldMemOperand(elements_reg, FixedArray::kLengthOffset)); |
| 4678 } | 4591 } |
| 4679 // Compare smis, unsigned compare catches both negative and out-of-bound | 4592 // Compare smis, unsigned compare catches both negative and out-of-bound |
| 4680 // indexes. | 4593 // indexes. |
| 4681 __ cmp(key_reg, scratch1); | 4594 __ cmphs(key_reg, scratch1); |
| 4682 if (grow_mode == ALLOW_JSARRAY_GROWTH) { | 4595 if (grow_mode == ALLOW_JSARRAY_GROWTH) { |
| 4683 __ b(hs, &grow); | 4596 __ b(eq, &grow); |
| 4684 } else { | 4597 } else { |
| 4685 __ b(hs, &miss_force_generic); | 4598 __ b(eq, &miss_force_generic); |
| 4686 } | 4599 } |
| 4687 | 4600 |
| 4688 __ bind(&finish_store); | 4601 __ bind(&finish_store); |
| 4689 __ StoreNumberToDoubleElements(value_reg, | 4602 __ StoreNumberToDoubleElements(value_reg, |
| 4690 key_reg, | 4603 key_reg, |
| 4691 receiver_reg, | 4604 receiver_reg, |
| 4692 // All registers after this are overwritten. | 4605 // All registers after this are overwritten. |
| 4693 elements_reg, | 4606 elements_reg, |
| 4694 scratch1, | 4607 scratch1, |
| 4695 scratch2, | 4608 scratch2, |
| (...skipping 61 matching lines...) Expand 10 before | Expand all | Expand 10 after Loading... |
| 4757 __ mov(length_reg, Operand(Smi::FromInt(1))); | 4670 __ mov(length_reg, Operand(Smi::FromInt(1))); |
| 4758 __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); | 4671 __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); |
| 4759 __ ldr(elements_reg, | 4672 __ ldr(elements_reg, |
| 4760 FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); | 4673 FieldMemOperand(receiver_reg, JSObject::kElementsOffset)); |
| 4761 __ jmp(&finish_store); | 4674 __ jmp(&finish_store); |
| 4762 | 4675 |
| 4763 __ bind(&check_capacity); | 4676 __ bind(&check_capacity); |
| 4764 // Make sure that the backing store can hold additional elements. | 4677 // Make sure that the backing store can hold additional elements. |
| 4765 __ ldr(scratch1, | 4678 __ ldr(scratch1, |
| 4766 FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset)); | 4679 FieldMemOperand(elements_reg, FixedDoubleArray::kLengthOffset)); |
| 4767 __ cmp(length_reg, scratch1); | 4680 __ cmphs(length_reg, scratch1); |
| 4768 __ b(hs, &slow); | 4681 __ b(eq, &slow); |
| 4769 | 4682 |
| 4770 // Grow the array and finish the store. | 4683 // Grow the array and finish the store. |
| 4771 __ add(length_reg, length_reg, Operand(Smi::FromInt(1))); | 4684 __ add(length_reg, length_reg, Operand(Smi::FromInt(1))); |
| 4772 __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); | 4685 __ str(length_reg, FieldMemOperand(receiver_reg, JSArray::kLengthOffset)); |
| 4773 __ jmp(&finish_store); | 4686 __ jmp(&finish_store); |
| 4774 | 4687 |
| 4775 __ bind(&slow); | 4688 __ bind(&slow); |
| 4776 Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow(); | 4689 Handle<Code> ic_slow = masm->isolate()->builtins()->KeyedStoreIC_Slow(); |
| 4777 __ Jump(ic_slow, RelocInfo::CODE_TARGET); | 4690 __ Jump(ic_slow, RelocInfo::CODE_TARGET); |
| 4778 } | 4691 } |
| 4779 } | 4692 } |
| 4780 | 4693 |
| 4781 | 4694 |
| 4782 #undef __ | 4695 #undef __ |
| 4783 | 4696 |
| 4784 } } // namespace v8::internal | 4697 } } // namespace v8::internal |
| 4785 | 4698 |
| 4786 #endif // V8_TARGET_ARCH_ARM | 4699 #endif // V8_TARGET_ARCH_SH4 |
| OLD | NEW |